1 // SPDX-License-Identifier: GPL-2.0 2 #include "bcachefs.h" 3 #include "alloc_background.h" 4 #include "alloc_foreground.h" 5 #include "backpointers.h" 6 #include "bkey_buf.h" 7 #include "btree_cache.h" 8 #include "btree_io.h" 9 #include "btree_key_cache.h" 10 #include "btree_update.h" 11 #include "btree_update_interior.h" 12 #include "btree_gc.h" 13 #include "btree_write_buffer.h" 14 #include "buckets.h" 15 #include "buckets_waiting_for_journal.h" 16 #include "clock.h" 17 #include "debug.h" 18 #include "disk_accounting.h" 19 #include "ec.h" 20 #include "error.h" 21 #include "lru.h" 22 #include "recovery.h" 23 #include "trace.h" 24 #include "varint.h" 25 26 #include <linux/kthread.h> 27 #include <linux/math64.h> 28 #include <linux/random.h> 29 #include <linux/rculist.h> 30 #include <linux/rcupdate.h> 31 #include <linux/sched/task.h> 32 #include <linux/sort.h> 33 34 static void bch2_discard_one_bucket_fast(struct bch_dev *, u64); 35 36 /* Persistent alloc info: */ 37 38 static const unsigned BCH_ALLOC_V1_FIELD_BYTES[] = { 39 #define x(name, bits) [BCH_ALLOC_FIELD_V1_##name] = bits / 8, 40 BCH_ALLOC_FIELDS_V1() 41 #undef x 42 }; 43 44 struct bkey_alloc_unpacked { 45 u64 journal_seq; 46 u8 gen; 47 u8 oldest_gen; 48 u8 data_type; 49 bool need_discard:1; 50 bool need_inc_gen:1; 51 #define x(_name, _bits) u##_bits _name; 52 BCH_ALLOC_FIELDS_V2() 53 #undef x 54 }; 55 56 static inline u64 alloc_field_v1_get(const struct bch_alloc *a, 57 const void **p, unsigned field) 58 { 59 unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field]; 60 u64 v; 61 62 if (!(a->fields & (1 << field))) 63 return 0; 64 65 switch (bytes) { 66 case 1: 67 v = *((const u8 *) *p); 68 break; 69 case 2: 70 v = le16_to_cpup(*p); 71 break; 72 case 4: 73 v = le32_to_cpup(*p); 74 break; 75 case 8: 76 v = le64_to_cpup(*p); 77 break; 78 default: 79 BUG(); 80 } 81 82 *p += bytes; 83 return v; 84 } 85 86 static void bch2_alloc_unpack_v1(struct bkey_alloc_unpacked *out, 87 struct bkey_s_c k) 88 { 89 const struct bch_alloc *in = bkey_s_c_to_alloc(k).v; 90 const void *d = in->data; 91 unsigned idx = 0; 92 93 out->gen = in->gen; 94 95 #define x(_name, _bits) out->_name = alloc_field_v1_get(in, &d, idx++); 96 BCH_ALLOC_FIELDS_V1() 97 #undef x 98 } 99 100 static int bch2_alloc_unpack_v2(struct bkey_alloc_unpacked *out, 101 struct bkey_s_c k) 102 { 103 struct bkey_s_c_alloc_v2 a = bkey_s_c_to_alloc_v2(k); 104 const u8 *in = a.v->data; 105 const u8 *end = bkey_val_end(a); 106 unsigned fieldnr = 0; 107 int ret; 108 u64 v; 109 110 out->gen = a.v->gen; 111 out->oldest_gen = a.v->oldest_gen; 112 out->data_type = a.v->data_type; 113 114 #define x(_name, _bits) \ 115 if (fieldnr < a.v->nr_fields) { \ 116 ret = bch2_varint_decode_fast(in, end, &v); \ 117 if (ret < 0) \ 118 return ret; \ 119 in += ret; \ 120 } else { \ 121 v = 0; \ 122 } \ 123 out->_name = v; \ 124 if (v != out->_name) \ 125 return -1; \ 126 fieldnr++; 127 128 BCH_ALLOC_FIELDS_V2() 129 #undef x 130 return 0; 131 } 132 133 static int bch2_alloc_unpack_v3(struct bkey_alloc_unpacked *out, 134 struct bkey_s_c k) 135 { 136 struct bkey_s_c_alloc_v3 a = bkey_s_c_to_alloc_v3(k); 137 const u8 *in = a.v->data; 138 const u8 *end = bkey_val_end(a); 139 unsigned fieldnr = 0; 140 int ret; 141 u64 v; 142 143 out->gen = a.v->gen; 144 out->oldest_gen = a.v->oldest_gen; 145 out->data_type = a.v->data_type; 146 out->need_discard = BCH_ALLOC_V3_NEED_DISCARD(a.v); 147 out->need_inc_gen = BCH_ALLOC_V3_NEED_INC_GEN(a.v); 148 out->journal_seq = le64_to_cpu(a.v->journal_seq); 149 150 #define x(_name, _bits) \ 151 if (fieldnr < a.v->nr_fields) { \ 152 ret = bch2_varint_decode_fast(in, end, &v); \ 153 if (ret < 0) \ 154 return ret; \ 155 in += ret; \ 156 } else { \ 157 v = 0; \ 158 } \ 159 out->_name = v; \ 160 if (v != out->_name) \ 161 return -1; \ 162 fieldnr++; 163 164 BCH_ALLOC_FIELDS_V2() 165 #undef x 166 return 0; 167 } 168 169 static struct bkey_alloc_unpacked bch2_alloc_unpack(struct bkey_s_c k) 170 { 171 struct bkey_alloc_unpacked ret = { .gen = 0 }; 172 173 switch (k.k->type) { 174 case KEY_TYPE_alloc: 175 bch2_alloc_unpack_v1(&ret, k); 176 break; 177 case KEY_TYPE_alloc_v2: 178 bch2_alloc_unpack_v2(&ret, k); 179 break; 180 case KEY_TYPE_alloc_v3: 181 bch2_alloc_unpack_v3(&ret, k); 182 break; 183 } 184 185 return ret; 186 } 187 188 static unsigned bch_alloc_v1_val_u64s(const struct bch_alloc *a) 189 { 190 unsigned i, bytes = offsetof(struct bch_alloc, data); 191 192 for (i = 0; i < ARRAY_SIZE(BCH_ALLOC_V1_FIELD_BYTES); i++) 193 if (a->fields & (1 << i)) 194 bytes += BCH_ALLOC_V1_FIELD_BYTES[i]; 195 196 return DIV_ROUND_UP(bytes, sizeof(u64)); 197 } 198 199 int bch2_alloc_v1_validate(struct bch_fs *c, struct bkey_s_c k, 200 enum bch_validate_flags flags) 201 { 202 struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k); 203 int ret = 0; 204 205 /* allow for unknown fields */ 206 bkey_fsck_err_on(bkey_val_u64s(a.k) < bch_alloc_v1_val_u64s(a.v), 207 c, alloc_v1_val_size_bad, 208 "incorrect value size (%zu < %u)", 209 bkey_val_u64s(a.k), bch_alloc_v1_val_u64s(a.v)); 210 fsck_err: 211 return ret; 212 } 213 214 int bch2_alloc_v2_validate(struct bch_fs *c, struct bkey_s_c k, 215 enum bch_validate_flags flags) 216 { 217 struct bkey_alloc_unpacked u; 218 int ret = 0; 219 220 bkey_fsck_err_on(bch2_alloc_unpack_v2(&u, k), 221 c, alloc_v2_unpack_error, 222 "unpack error"); 223 fsck_err: 224 return ret; 225 } 226 227 int bch2_alloc_v3_validate(struct bch_fs *c, struct bkey_s_c k, 228 enum bch_validate_flags flags) 229 { 230 struct bkey_alloc_unpacked u; 231 int ret = 0; 232 233 bkey_fsck_err_on(bch2_alloc_unpack_v3(&u, k), 234 c, alloc_v2_unpack_error, 235 "unpack error"); 236 fsck_err: 237 return ret; 238 } 239 240 int bch2_alloc_v4_validate(struct bch_fs *c, struct bkey_s_c k, 241 enum bch_validate_flags flags) 242 { 243 struct bkey_s_c_alloc_v4 a = bkey_s_c_to_alloc_v4(k); 244 int ret = 0; 245 246 bkey_fsck_err_on(alloc_v4_u64s_noerror(a.v) > bkey_val_u64s(k.k), 247 c, alloc_v4_val_size_bad, 248 "bad val size (%u > %zu)", 249 alloc_v4_u64s_noerror(a.v), bkey_val_u64s(k.k)); 250 251 bkey_fsck_err_on(!BCH_ALLOC_V4_BACKPOINTERS_START(a.v) && 252 BCH_ALLOC_V4_NR_BACKPOINTERS(a.v), 253 c, alloc_v4_backpointers_start_bad, 254 "invalid backpointers_start"); 255 256 bkey_fsck_err_on(alloc_data_type(*a.v, a.v->data_type) != a.v->data_type, 257 c, alloc_key_data_type_bad, 258 "invalid data type (got %u should be %u)", 259 a.v->data_type, alloc_data_type(*a.v, a.v->data_type)); 260 261 for (unsigned i = 0; i < 2; i++) 262 bkey_fsck_err_on(a.v->io_time[i] > LRU_TIME_MAX, 263 c, alloc_key_io_time_bad, 264 "invalid io_time[%s]: %llu, max %llu", 265 i == READ ? "read" : "write", 266 a.v->io_time[i], LRU_TIME_MAX); 267 268 unsigned stripe_sectors = BCH_ALLOC_V4_BACKPOINTERS_START(a.v) * sizeof(u64) > 269 offsetof(struct bch_alloc_v4, stripe_sectors) 270 ? a.v->stripe_sectors 271 : 0; 272 273 switch (a.v->data_type) { 274 case BCH_DATA_free: 275 case BCH_DATA_need_gc_gens: 276 case BCH_DATA_need_discard: 277 bkey_fsck_err_on(stripe_sectors || 278 a.v->dirty_sectors || 279 a.v->cached_sectors || 280 a.v->stripe, 281 c, alloc_key_empty_but_have_data, 282 "empty data type free but have data %u.%u.%u %u", 283 stripe_sectors, 284 a.v->dirty_sectors, 285 a.v->cached_sectors, 286 a.v->stripe); 287 break; 288 case BCH_DATA_sb: 289 case BCH_DATA_journal: 290 case BCH_DATA_btree: 291 case BCH_DATA_user: 292 case BCH_DATA_parity: 293 bkey_fsck_err_on(!a.v->dirty_sectors && 294 !stripe_sectors, 295 c, alloc_key_dirty_sectors_0, 296 "data_type %s but dirty_sectors==0", 297 bch2_data_type_str(a.v->data_type)); 298 break; 299 case BCH_DATA_cached: 300 bkey_fsck_err_on(!a.v->cached_sectors || 301 a.v->dirty_sectors || 302 stripe_sectors || 303 a.v->stripe, 304 c, alloc_key_cached_inconsistency, 305 "data type inconsistency"); 306 307 bkey_fsck_err_on(!a.v->io_time[READ] && 308 c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_to_lru_refs, 309 c, alloc_key_cached_but_read_time_zero, 310 "cached bucket with read_time == 0"); 311 break; 312 case BCH_DATA_stripe: 313 break; 314 } 315 fsck_err: 316 return ret; 317 } 318 319 void bch2_alloc_v4_swab(struct bkey_s k) 320 { 321 struct bch_alloc_v4 *a = bkey_s_to_alloc_v4(k).v; 322 struct bch_backpointer *bp, *bps; 323 324 a->journal_seq = swab64(a->journal_seq); 325 a->flags = swab32(a->flags); 326 a->dirty_sectors = swab32(a->dirty_sectors); 327 a->cached_sectors = swab32(a->cached_sectors); 328 a->io_time[0] = swab64(a->io_time[0]); 329 a->io_time[1] = swab64(a->io_time[1]); 330 a->stripe = swab32(a->stripe); 331 a->nr_external_backpointers = swab32(a->nr_external_backpointers); 332 a->fragmentation_lru = swab64(a->fragmentation_lru); 333 a->stripe_sectors = swab32(a->stripe_sectors); 334 335 bps = alloc_v4_backpointers(a); 336 for (bp = bps; bp < bps + BCH_ALLOC_V4_NR_BACKPOINTERS(a); bp++) { 337 bp->bucket_offset = swab40(bp->bucket_offset); 338 bp->bucket_len = swab32(bp->bucket_len); 339 bch2_bpos_swab(&bp->pos); 340 } 341 } 342 343 void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k) 344 { 345 struct bch_alloc_v4 _a; 346 const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &_a); 347 348 prt_newline(out); 349 printbuf_indent_add(out, 2); 350 351 prt_printf(out, "gen %u oldest_gen %u data_type ", a->gen, a->oldest_gen); 352 bch2_prt_data_type(out, a->data_type); 353 prt_newline(out); 354 prt_printf(out, "journal_seq %llu\n", a->journal_seq); 355 prt_printf(out, "need_discard %llu\n", BCH_ALLOC_V4_NEED_DISCARD(a)); 356 prt_printf(out, "need_inc_gen %llu\n", BCH_ALLOC_V4_NEED_INC_GEN(a)); 357 prt_printf(out, "dirty_sectors %u\n", a->dirty_sectors); 358 prt_printf(out, "stripe_sectors %u\n", a->stripe_sectors); 359 prt_printf(out, "cached_sectors %u\n", a->cached_sectors); 360 prt_printf(out, "stripe %u\n", a->stripe); 361 prt_printf(out, "stripe_redundancy %u\n", a->stripe_redundancy); 362 prt_printf(out, "io_time[READ] %llu\n", a->io_time[READ]); 363 prt_printf(out, "io_time[WRITE] %llu\n", a->io_time[WRITE]); 364 prt_printf(out, "fragmentation %llu\n", a->fragmentation_lru); 365 prt_printf(out, "bp_start %llu\n", BCH_ALLOC_V4_BACKPOINTERS_START(a)); 366 printbuf_indent_sub(out, 2); 367 } 368 369 void __bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *out) 370 { 371 if (k.k->type == KEY_TYPE_alloc_v4) { 372 void *src, *dst; 373 374 *out = *bkey_s_c_to_alloc_v4(k).v; 375 376 src = alloc_v4_backpointers(out); 377 SET_BCH_ALLOC_V4_BACKPOINTERS_START(out, BCH_ALLOC_V4_U64s); 378 dst = alloc_v4_backpointers(out); 379 380 if (src < dst) 381 memset(src, 0, dst - src); 382 383 SET_BCH_ALLOC_V4_NR_BACKPOINTERS(out, 0); 384 } else { 385 struct bkey_alloc_unpacked u = bch2_alloc_unpack(k); 386 387 *out = (struct bch_alloc_v4) { 388 .journal_seq = u.journal_seq, 389 .flags = u.need_discard, 390 .gen = u.gen, 391 .oldest_gen = u.oldest_gen, 392 .data_type = u.data_type, 393 .stripe_redundancy = u.stripe_redundancy, 394 .dirty_sectors = u.dirty_sectors, 395 .cached_sectors = u.cached_sectors, 396 .io_time[READ] = u.read_time, 397 .io_time[WRITE] = u.write_time, 398 .stripe = u.stripe, 399 }; 400 401 SET_BCH_ALLOC_V4_BACKPOINTERS_START(out, BCH_ALLOC_V4_U64s); 402 } 403 } 404 405 static noinline struct bkey_i_alloc_v4 * 406 __bch2_alloc_to_v4_mut(struct btree_trans *trans, struct bkey_s_c k) 407 { 408 struct bkey_i_alloc_v4 *ret; 409 410 ret = bch2_trans_kmalloc(trans, max(bkey_bytes(k.k), sizeof(struct bkey_i_alloc_v4))); 411 if (IS_ERR(ret)) 412 return ret; 413 414 if (k.k->type == KEY_TYPE_alloc_v4) { 415 void *src, *dst; 416 417 bkey_reassemble(&ret->k_i, k); 418 419 src = alloc_v4_backpointers(&ret->v); 420 SET_BCH_ALLOC_V4_BACKPOINTERS_START(&ret->v, BCH_ALLOC_V4_U64s); 421 dst = alloc_v4_backpointers(&ret->v); 422 423 if (src < dst) 424 memset(src, 0, dst - src); 425 426 SET_BCH_ALLOC_V4_NR_BACKPOINTERS(&ret->v, 0); 427 set_alloc_v4_u64s(ret); 428 } else { 429 bkey_alloc_v4_init(&ret->k_i); 430 ret->k.p = k.k->p; 431 bch2_alloc_to_v4(k, &ret->v); 432 } 433 return ret; 434 } 435 436 static inline struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut_inlined(struct btree_trans *trans, struct bkey_s_c k) 437 { 438 struct bkey_s_c_alloc_v4 a; 439 440 if (likely(k.k->type == KEY_TYPE_alloc_v4) && 441 ((a = bkey_s_c_to_alloc_v4(k), true) && 442 BCH_ALLOC_V4_NR_BACKPOINTERS(a.v) == 0)) 443 return bch2_bkey_make_mut_noupdate_typed(trans, k, alloc_v4); 444 445 return __bch2_alloc_to_v4_mut(trans, k); 446 } 447 448 struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut(struct btree_trans *trans, struct bkey_s_c k) 449 { 450 return bch2_alloc_to_v4_mut_inlined(trans, k); 451 } 452 453 struct bkey_i_alloc_v4 * 454 bch2_trans_start_alloc_update_noupdate(struct btree_trans *trans, struct btree_iter *iter, 455 struct bpos pos) 456 { 457 struct bkey_s_c k = bch2_bkey_get_iter(trans, iter, BTREE_ID_alloc, pos, 458 BTREE_ITER_with_updates| 459 BTREE_ITER_cached| 460 BTREE_ITER_intent); 461 int ret = bkey_err(k); 462 if (unlikely(ret)) 463 return ERR_PTR(ret); 464 465 struct bkey_i_alloc_v4 *a = bch2_alloc_to_v4_mut_inlined(trans, k); 466 ret = PTR_ERR_OR_ZERO(a); 467 if (unlikely(ret)) 468 goto err; 469 return a; 470 err: 471 bch2_trans_iter_exit(trans, iter); 472 return ERR_PTR(ret); 473 } 474 475 __flatten 476 struct bkey_i_alloc_v4 *bch2_trans_start_alloc_update(struct btree_trans *trans, struct bpos pos, 477 enum btree_iter_update_trigger_flags flags) 478 { 479 struct btree_iter iter; 480 struct bkey_i_alloc_v4 *a = bch2_trans_start_alloc_update_noupdate(trans, &iter, pos); 481 int ret = PTR_ERR_OR_ZERO(a); 482 if (ret) 483 return ERR_PTR(ret); 484 485 ret = bch2_trans_update(trans, &iter, &a->k_i, flags); 486 bch2_trans_iter_exit(trans, &iter); 487 return unlikely(ret) ? ERR_PTR(ret) : a; 488 } 489 490 static struct bpos alloc_gens_pos(struct bpos pos, unsigned *offset) 491 { 492 *offset = pos.offset & KEY_TYPE_BUCKET_GENS_MASK; 493 494 pos.offset >>= KEY_TYPE_BUCKET_GENS_BITS; 495 return pos; 496 } 497 498 static struct bpos bucket_gens_pos_to_alloc(struct bpos pos, unsigned offset) 499 { 500 pos.offset <<= KEY_TYPE_BUCKET_GENS_BITS; 501 pos.offset += offset; 502 return pos; 503 } 504 505 static unsigned alloc_gen(struct bkey_s_c k, unsigned offset) 506 { 507 return k.k->type == KEY_TYPE_bucket_gens 508 ? bkey_s_c_to_bucket_gens(k).v->gens[offset] 509 : 0; 510 } 511 512 int bch2_bucket_gens_validate(struct bch_fs *c, struct bkey_s_c k, 513 enum bch_validate_flags flags) 514 { 515 int ret = 0; 516 517 bkey_fsck_err_on(bkey_val_bytes(k.k) != sizeof(struct bch_bucket_gens), 518 c, bucket_gens_val_size_bad, 519 "bad val size (%zu != %zu)", 520 bkey_val_bytes(k.k), sizeof(struct bch_bucket_gens)); 521 fsck_err: 522 return ret; 523 } 524 525 void bch2_bucket_gens_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k) 526 { 527 struct bkey_s_c_bucket_gens g = bkey_s_c_to_bucket_gens(k); 528 unsigned i; 529 530 for (i = 0; i < ARRAY_SIZE(g.v->gens); i++) { 531 if (i) 532 prt_char(out, ' '); 533 prt_printf(out, "%u", g.v->gens[i]); 534 } 535 } 536 537 int bch2_bucket_gens_init(struct bch_fs *c) 538 { 539 struct btree_trans *trans = bch2_trans_get(c); 540 struct bkey_i_bucket_gens g; 541 bool have_bucket_gens_key = false; 542 int ret; 543 544 ret = for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN, 545 BTREE_ITER_prefetch, k, ({ 546 /* 547 * Not a fsck error because this is checked/repaired by 548 * bch2_check_alloc_key() which runs later: 549 */ 550 if (!bch2_dev_bucket_exists(c, k.k->p)) 551 continue; 552 553 struct bch_alloc_v4 a; 554 u8 gen = bch2_alloc_to_v4(k, &a)->gen; 555 unsigned offset; 556 struct bpos pos = alloc_gens_pos(iter.pos, &offset); 557 int ret2 = 0; 558 559 if (have_bucket_gens_key && bkey_cmp(iter.pos, pos)) { 560 ret2 = bch2_btree_insert_trans(trans, BTREE_ID_bucket_gens, &g.k_i, 0) ?: 561 bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc); 562 if (ret2) 563 goto iter_err; 564 have_bucket_gens_key = false; 565 } 566 567 if (!have_bucket_gens_key) { 568 bkey_bucket_gens_init(&g.k_i); 569 g.k.p = pos; 570 have_bucket_gens_key = true; 571 } 572 573 g.v.gens[offset] = gen; 574 iter_err: 575 ret2; 576 })); 577 578 if (have_bucket_gens_key && !ret) 579 ret = commit_do(trans, NULL, NULL, 580 BCH_TRANS_COMMIT_no_enospc, 581 bch2_btree_insert_trans(trans, BTREE_ID_bucket_gens, &g.k_i, 0)); 582 583 bch2_trans_put(trans); 584 585 bch_err_fn(c, ret); 586 return ret; 587 } 588 589 int bch2_alloc_read(struct bch_fs *c) 590 { 591 struct btree_trans *trans = bch2_trans_get(c); 592 struct bch_dev *ca = NULL; 593 int ret; 594 595 if (c->sb.version_upgrade_complete >= bcachefs_metadata_version_bucket_gens) { 596 ret = for_each_btree_key(trans, iter, BTREE_ID_bucket_gens, POS_MIN, 597 BTREE_ITER_prefetch, k, ({ 598 u64 start = bucket_gens_pos_to_alloc(k.k->p, 0).offset; 599 u64 end = bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0).offset; 600 601 if (k.k->type != KEY_TYPE_bucket_gens) 602 continue; 603 604 ca = bch2_dev_iterate(c, ca, k.k->p.inode); 605 /* 606 * Not a fsck error because this is checked/repaired by 607 * bch2_check_alloc_key() which runs later: 608 */ 609 if (!ca) { 610 bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0)); 611 continue; 612 } 613 614 const struct bch_bucket_gens *g = bkey_s_c_to_bucket_gens(k).v; 615 616 for (u64 b = max_t(u64, ca->mi.first_bucket, start); 617 b < min_t(u64, ca->mi.nbuckets, end); 618 b++) 619 *bucket_gen(ca, b) = g->gens[b & KEY_TYPE_BUCKET_GENS_MASK]; 620 0; 621 })); 622 } else { 623 ret = for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN, 624 BTREE_ITER_prefetch, k, ({ 625 ca = bch2_dev_iterate(c, ca, k.k->p.inode); 626 /* 627 * Not a fsck error because this is checked/repaired by 628 * bch2_check_alloc_key() which runs later: 629 */ 630 if (!ca) { 631 bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0)); 632 continue; 633 } 634 635 struct bch_alloc_v4 a; 636 *bucket_gen(ca, k.k->p.offset) = bch2_alloc_to_v4(k, &a)->gen; 637 0; 638 })); 639 } 640 641 bch2_dev_put(ca); 642 bch2_trans_put(trans); 643 644 bch_err_fn(c, ret); 645 return ret; 646 } 647 648 /* Free space/discard btree: */ 649 650 static int bch2_bucket_do_index(struct btree_trans *trans, 651 struct bch_dev *ca, 652 struct bkey_s_c alloc_k, 653 const struct bch_alloc_v4 *a, 654 bool set) 655 { 656 struct bch_fs *c = trans->c; 657 struct btree_iter iter; 658 struct bkey_s_c old; 659 struct bkey_i *k; 660 enum btree_id btree; 661 enum bch_bkey_type old_type = !set ? KEY_TYPE_set : KEY_TYPE_deleted; 662 enum bch_bkey_type new_type = set ? KEY_TYPE_set : KEY_TYPE_deleted; 663 struct printbuf buf = PRINTBUF; 664 int ret; 665 666 if (a->data_type != BCH_DATA_free && 667 a->data_type != BCH_DATA_need_discard) 668 return 0; 669 670 k = bch2_trans_kmalloc_nomemzero(trans, sizeof(*k)); 671 if (IS_ERR(k)) 672 return PTR_ERR(k); 673 674 bkey_init(&k->k); 675 k->k.type = new_type; 676 677 switch (a->data_type) { 678 case BCH_DATA_free: 679 btree = BTREE_ID_freespace; 680 k->k.p = alloc_freespace_pos(alloc_k.k->p, *a); 681 bch2_key_resize(&k->k, 1); 682 break; 683 case BCH_DATA_need_discard: 684 btree = BTREE_ID_need_discard; 685 k->k.p = alloc_k.k->p; 686 break; 687 default: 688 return 0; 689 } 690 691 old = bch2_bkey_get_iter(trans, &iter, btree, 692 bkey_start_pos(&k->k), 693 BTREE_ITER_intent); 694 ret = bkey_err(old); 695 if (ret) 696 return ret; 697 698 if (ca->mi.freespace_initialized && 699 c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info && 700 bch2_trans_inconsistent_on(old.k->type != old_type, trans, 701 "incorrect key when %s %s:%llu:%llu:0 (got %s should be %s)\n" 702 " for %s", 703 set ? "setting" : "clearing", 704 bch2_btree_id_str(btree), 705 iter.pos.inode, 706 iter.pos.offset, 707 bch2_bkey_types[old.k->type], 708 bch2_bkey_types[old_type], 709 (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) { 710 ret = -EIO; 711 goto err; 712 } 713 714 ret = bch2_trans_update(trans, &iter, k, 0); 715 err: 716 bch2_trans_iter_exit(trans, &iter); 717 printbuf_exit(&buf); 718 return ret; 719 } 720 721 static noinline int bch2_bucket_gen_update(struct btree_trans *trans, 722 struct bpos bucket, u8 gen) 723 { 724 struct btree_iter iter; 725 unsigned offset; 726 struct bpos pos = alloc_gens_pos(bucket, &offset); 727 struct bkey_i_bucket_gens *g; 728 struct bkey_s_c k; 729 int ret; 730 731 g = bch2_trans_kmalloc(trans, sizeof(*g)); 732 ret = PTR_ERR_OR_ZERO(g); 733 if (ret) 734 return ret; 735 736 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_bucket_gens, pos, 737 BTREE_ITER_intent| 738 BTREE_ITER_with_updates); 739 ret = bkey_err(k); 740 if (ret) 741 return ret; 742 743 if (k.k->type != KEY_TYPE_bucket_gens) { 744 bkey_bucket_gens_init(&g->k_i); 745 g->k.p = iter.pos; 746 } else { 747 bkey_reassemble(&g->k_i, k); 748 } 749 750 g->v.gens[offset] = gen; 751 752 ret = bch2_trans_update(trans, &iter, &g->k_i, 0); 753 bch2_trans_iter_exit(trans, &iter); 754 return ret; 755 } 756 757 static inline int bch2_dev_data_type_accounting_mod(struct btree_trans *trans, struct bch_dev *ca, 758 enum bch_data_type data_type, 759 s64 delta_buckets, 760 s64 delta_sectors, 761 s64 delta_fragmented, unsigned flags) 762 { 763 struct disk_accounting_pos acc = { 764 .type = BCH_DISK_ACCOUNTING_dev_data_type, 765 .dev_data_type.dev = ca->dev_idx, 766 .dev_data_type.data_type = data_type, 767 }; 768 s64 d[3] = { delta_buckets, delta_sectors, delta_fragmented }; 769 770 return bch2_disk_accounting_mod(trans, &acc, d, 3, flags & BTREE_TRIGGER_gc); 771 } 772 773 int bch2_alloc_key_to_dev_counters(struct btree_trans *trans, struct bch_dev *ca, 774 const struct bch_alloc_v4 *old, 775 const struct bch_alloc_v4 *new, 776 unsigned flags) 777 { 778 s64 old_sectors = bch2_bucket_sectors(*old); 779 s64 new_sectors = bch2_bucket_sectors(*new); 780 if (old->data_type != new->data_type) { 781 int ret = bch2_dev_data_type_accounting_mod(trans, ca, new->data_type, 782 1, new_sectors, bch2_bucket_sectors_fragmented(ca, *new), flags) ?: 783 bch2_dev_data_type_accounting_mod(trans, ca, old->data_type, 784 -1, -old_sectors, -bch2_bucket_sectors_fragmented(ca, *old), flags); 785 if (ret) 786 return ret; 787 } else if (old_sectors != new_sectors) { 788 int ret = bch2_dev_data_type_accounting_mod(trans, ca, new->data_type, 789 0, 790 new_sectors - old_sectors, 791 bch2_bucket_sectors_fragmented(ca, *new) - 792 bch2_bucket_sectors_fragmented(ca, *old), flags); 793 if (ret) 794 return ret; 795 } 796 797 s64 old_unstriped = bch2_bucket_sectors_unstriped(*old); 798 s64 new_unstriped = bch2_bucket_sectors_unstriped(*new); 799 if (old_unstriped != new_unstriped) { 800 int ret = bch2_dev_data_type_accounting_mod(trans, ca, BCH_DATA_unstriped, 801 !!new_unstriped - !!old_unstriped, 802 new_unstriped - old_unstriped, 803 0, 804 flags); 805 if (ret) 806 return ret; 807 } 808 809 return 0; 810 } 811 812 int bch2_trigger_alloc(struct btree_trans *trans, 813 enum btree_id btree, unsigned level, 814 struct bkey_s_c old, struct bkey_s new, 815 enum btree_iter_update_trigger_flags flags) 816 { 817 struct bch_fs *c = trans->c; 818 struct printbuf buf = PRINTBUF; 819 int ret = 0; 820 821 struct bch_dev *ca = bch2_dev_bucket_tryget(c, new.k->p); 822 if (!ca) 823 return -EIO; 824 825 struct bch_alloc_v4 old_a_convert; 826 const struct bch_alloc_v4 *old_a = bch2_alloc_to_v4(old, &old_a_convert); 827 828 struct bch_alloc_v4 *new_a; 829 if (likely(new.k->type == KEY_TYPE_alloc_v4)) { 830 new_a = bkey_s_to_alloc_v4(new).v; 831 } else { 832 BUG_ON(!(flags & BTREE_TRIGGER_gc)); 833 834 struct bkey_i_alloc_v4 *new_ka = bch2_alloc_to_v4_mut_inlined(trans, new.s_c); 835 ret = PTR_ERR_OR_ZERO(new_ka); 836 if (unlikely(ret)) 837 goto err; 838 new_a = &new_ka->v; 839 } 840 841 if (flags & BTREE_TRIGGER_transactional) { 842 alloc_data_type_set(new_a, new_a->data_type); 843 844 if (bch2_bucket_sectors_total(*new_a) > bch2_bucket_sectors_total(*old_a)) { 845 new_a->io_time[READ] = bch2_current_io_time(c, READ); 846 new_a->io_time[WRITE]= bch2_current_io_time(c, WRITE); 847 SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, true); 848 SET_BCH_ALLOC_V4_NEED_DISCARD(new_a, true); 849 } 850 851 if (data_type_is_empty(new_a->data_type) && 852 BCH_ALLOC_V4_NEED_INC_GEN(new_a) && 853 !bch2_bucket_is_open_safe(c, new.k->p.inode, new.k->p.offset)) { 854 new_a->gen++; 855 SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, false); 856 alloc_data_type_set(new_a, new_a->data_type); 857 } 858 859 if (old_a->data_type != new_a->data_type || 860 (new_a->data_type == BCH_DATA_free && 861 alloc_freespace_genbits(*old_a) != alloc_freespace_genbits(*new_a))) { 862 ret = bch2_bucket_do_index(trans, ca, old, old_a, false) ?: 863 bch2_bucket_do_index(trans, ca, new.s_c, new_a, true); 864 if (ret) 865 goto err; 866 } 867 868 if (new_a->data_type == BCH_DATA_cached && 869 !new_a->io_time[READ]) 870 new_a->io_time[READ] = bch2_current_io_time(c, READ); 871 872 u64 old_lru = alloc_lru_idx_read(*old_a); 873 u64 new_lru = alloc_lru_idx_read(*new_a); 874 if (old_lru != new_lru) { 875 ret = bch2_lru_change(trans, new.k->p.inode, 876 bucket_to_u64(new.k->p), 877 old_lru, new_lru); 878 if (ret) 879 goto err; 880 } 881 882 new_a->fragmentation_lru = alloc_lru_idx_fragmentation(*new_a, ca); 883 if (old_a->fragmentation_lru != new_a->fragmentation_lru) { 884 ret = bch2_lru_change(trans, 885 BCH_LRU_FRAGMENTATION_START, 886 bucket_to_u64(new.k->p), 887 old_a->fragmentation_lru, new_a->fragmentation_lru); 888 if (ret) 889 goto err; 890 } 891 892 if (old_a->gen != new_a->gen) { 893 ret = bch2_bucket_gen_update(trans, new.k->p, new_a->gen); 894 if (ret) 895 goto err; 896 } 897 898 if ((flags & BTREE_TRIGGER_bucket_invalidate) && 899 old_a->cached_sectors) { 900 ret = bch2_mod_dev_cached_sectors(trans, ca->dev_idx, 901 -((s64) old_a->cached_sectors), 902 flags & BTREE_TRIGGER_gc); 903 if (ret) 904 goto err; 905 } 906 907 ret = bch2_alloc_key_to_dev_counters(trans, ca, old_a, new_a, flags); 908 if (ret) 909 goto err; 910 } 911 912 if ((flags & BTREE_TRIGGER_atomic) && (flags & BTREE_TRIGGER_insert)) { 913 u64 journal_seq = trans->journal_res.seq; 914 u64 bucket_journal_seq = new_a->journal_seq; 915 916 if ((flags & BTREE_TRIGGER_insert) && 917 data_type_is_empty(old_a->data_type) != 918 data_type_is_empty(new_a->data_type) && 919 new.k->type == KEY_TYPE_alloc_v4) { 920 struct bch_alloc_v4 *v = bkey_s_to_alloc_v4(new).v; 921 922 /* 923 * If the btree updates referring to a bucket weren't flushed 924 * before the bucket became empty again, then the we don't have 925 * to wait on a journal flush before we can reuse the bucket: 926 */ 927 v->journal_seq = bucket_journal_seq = 928 data_type_is_empty(new_a->data_type) && 929 (journal_seq == v->journal_seq || 930 bch2_journal_noflush_seq(&c->journal, v->journal_seq)) 931 ? 0 : journal_seq; 932 } 933 934 if (!data_type_is_empty(old_a->data_type) && 935 data_type_is_empty(new_a->data_type) && 936 bucket_journal_seq) { 937 ret = bch2_set_bucket_needs_journal_commit(&c->buckets_waiting_for_journal, 938 c->journal.flushed_seq_ondisk, 939 new.k->p.inode, new.k->p.offset, 940 bucket_journal_seq); 941 if (bch2_fs_fatal_err_on(ret, c, 942 "setting bucket_needs_journal_commit: %s", bch2_err_str(ret))) 943 goto err; 944 } 945 946 if (new_a->gen != old_a->gen) { 947 rcu_read_lock(); 948 u8 *gen = bucket_gen(ca, new.k->p.offset); 949 if (unlikely(!gen)) { 950 rcu_read_unlock(); 951 goto invalid_bucket; 952 } 953 *gen = new_a->gen; 954 rcu_read_unlock(); 955 } 956 957 #define eval_state(_a, expr) ({ const struct bch_alloc_v4 *a = _a; expr; }) 958 #define statechange(expr) !eval_state(old_a, expr) && eval_state(new_a, expr) 959 #define bucket_flushed(a) (!a->journal_seq || a->journal_seq <= c->journal.flushed_seq_ondisk) 960 961 if (statechange(a->data_type == BCH_DATA_free) && 962 bucket_flushed(new_a)) 963 closure_wake_up(&c->freelist_wait); 964 965 if (statechange(a->data_type == BCH_DATA_need_discard) && 966 !bch2_bucket_is_open_safe(c, new.k->p.inode, new.k->p.offset) && 967 bucket_flushed(new_a)) 968 bch2_discard_one_bucket_fast(ca, new.k->p.offset); 969 970 if (statechange(a->data_type == BCH_DATA_cached) && 971 !bch2_bucket_is_open(c, new.k->p.inode, new.k->p.offset) && 972 should_invalidate_buckets(ca, bch2_dev_usage_read(ca))) 973 bch2_dev_do_invalidates(ca); 974 975 if (statechange(a->data_type == BCH_DATA_need_gc_gens)) 976 bch2_gc_gens_async(c); 977 } 978 979 if ((flags & BTREE_TRIGGER_gc) && (flags & BTREE_TRIGGER_insert)) { 980 rcu_read_lock(); 981 struct bucket *g = gc_bucket(ca, new.k->p.offset); 982 if (unlikely(!g)) { 983 rcu_read_unlock(); 984 goto invalid_bucket; 985 } 986 g->gen_valid = 1; 987 g->gen = new_a->gen; 988 rcu_read_unlock(); 989 } 990 err: 991 printbuf_exit(&buf); 992 bch2_dev_put(ca); 993 return ret; 994 invalid_bucket: 995 bch2_fs_inconsistent(c, "reference to invalid bucket\n %s", 996 (bch2_bkey_val_to_text(&buf, c, new.s_c), buf.buf)); 997 ret = -EIO; 998 goto err; 999 } 1000 1001 /* 1002 * This synthesizes deleted extents for holes, similar to BTREE_ITER_slots for 1003 * extents style btrees, but works on non-extents btrees: 1004 */ 1005 static struct bkey_s_c bch2_get_key_or_hole(struct btree_iter *iter, struct bpos end, struct bkey *hole) 1006 { 1007 struct bkey_s_c k = bch2_btree_iter_peek_slot(iter); 1008 1009 if (bkey_err(k)) 1010 return k; 1011 1012 if (k.k->type) { 1013 return k; 1014 } else { 1015 struct btree_iter iter2; 1016 struct bpos next; 1017 1018 bch2_trans_copy_iter(&iter2, iter); 1019 1020 struct btree_path *path = btree_iter_path(iter->trans, iter); 1021 if (!bpos_eq(path->l[0].b->key.k.p, SPOS_MAX)) 1022 end = bkey_min(end, bpos_nosnap_successor(path->l[0].b->key.k.p)); 1023 1024 end = bkey_min(end, POS(iter->pos.inode, iter->pos.offset + U32_MAX - 1)); 1025 1026 /* 1027 * btree node min/max is a closed interval, upto takes a half 1028 * open interval: 1029 */ 1030 k = bch2_btree_iter_peek_upto(&iter2, end); 1031 next = iter2.pos; 1032 bch2_trans_iter_exit(iter->trans, &iter2); 1033 1034 BUG_ON(next.offset >= iter->pos.offset + U32_MAX); 1035 1036 if (bkey_err(k)) 1037 return k; 1038 1039 bkey_init(hole); 1040 hole->p = iter->pos; 1041 1042 bch2_key_resize(hole, next.offset - iter->pos.offset); 1043 return (struct bkey_s_c) { hole, NULL }; 1044 } 1045 } 1046 1047 static bool next_bucket(struct bch_fs *c, struct bch_dev **ca, struct bpos *bucket) 1048 { 1049 if (*ca) { 1050 if (bucket->offset < (*ca)->mi.first_bucket) 1051 bucket->offset = (*ca)->mi.first_bucket; 1052 1053 if (bucket->offset < (*ca)->mi.nbuckets) 1054 return true; 1055 1056 bch2_dev_put(*ca); 1057 *ca = NULL; 1058 bucket->inode++; 1059 bucket->offset = 0; 1060 } 1061 1062 rcu_read_lock(); 1063 *ca = __bch2_next_dev_idx(c, bucket->inode, NULL); 1064 if (*ca) { 1065 *bucket = POS((*ca)->dev_idx, (*ca)->mi.first_bucket); 1066 bch2_dev_get(*ca); 1067 } 1068 rcu_read_unlock(); 1069 1070 return *ca != NULL; 1071 } 1072 1073 static struct bkey_s_c bch2_get_key_or_real_bucket_hole(struct btree_iter *iter, 1074 struct bch_dev **ca, struct bkey *hole) 1075 { 1076 struct bch_fs *c = iter->trans->c; 1077 struct bkey_s_c k; 1078 again: 1079 k = bch2_get_key_or_hole(iter, POS_MAX, hole); 1080 if (bkey_err(k)) 1081 return k; 1082 1083 *ca = bch2_dev_iterate_noerror(c, *ca, k.k->p.inode); 1084 1085 if (!k.k->type) { 1086 struct bpos hole_start = bkey_start_pos(k.k); 1087 1088 if (!*ca || !bucket_valid(*ca, hole_start.offset)) { 1089 if (!next_bucket(c, ca, &hole_start)) 1090 return bkey_s_c_null; 1091 1092 bch2_btree_iter_set_pos(iter, hole_start); 1093 goto again; 1094 } 1095 1096 if (k.k->p.offset > (*ca)->mi.nbuckets) 1097 bch2_key_resize(hole, (*ca)->mi.nbuckets - hole_start.offset); 1098 } 1099 1100 return k; 1101 } 1102 1103 static noinline_for_stack 1104 int bch2_check_alloc_key(struct btree_trans *trans, 1105 struct bkey_s_c alloc_k, 1106 struct btree_iter *alloc_iter, 1107 struct btree_iter *discard_iter, 1108 struct btree_iter *freespace_iter, 1109 struct btree_iter *bucket_gens_iter) 1110 { 1111 struct bch_fs *c = trans->c; 1112 struct bch_alloc_v4 a_convert; 1113 const struct bch_alloc_v4 *a; 1114 unsigned discard_key_type, freespace_key_type; 1115 unsigned gens_offset; 1116 struct bkey_s_c k; 1117 struct printbuf buf = PRINTBUF; 1118 int ret = 0; 1119 1120 struct bch_dev *ca = bch2_dev_bucket_tryget_noerror(c, alloc_k.k->p); 1121 if (fsck_err_on(!ca, 1122 trans, alloc_key_to_missing_dev_bucket, 1123 "alloc key for invalid device:bucket %llu:%llu", 1124 alloc_k.k->p.inode, alloc_k.k->p.offset)) 1125 ret = bch2_btree_delete_at(trans, alloc_iter, 0); 1126 if (!ca) 1127 return ret; 1128 1129 if (!ca->mi.freespace_initialized) 1130 goto out; 1131 1132 a = bch2_alloc_to_v4(alloc_k, &a_convert); 1133 1134 discard_key_type = a->data_type == BCH_DATA_need_discard ? KEY_TYPE_set : 0; 1135 bch2_btree_iter_set_pos(discard_iter, alloc_k.k->p); 1136 k = bch2_btree_iter_peek_slot(discard_iter); 1137 ret = bkey_err(k); 1138 if (ret) 1139 goto err; 1140 1141 if (fsck_err_on(k.k->type != discard_key_type, 1142 trans, need_discard_key_wrong, 1143 "incorrect key in need_discard btree (got %s should be %s)\n" 1144 " %s", 1145 bch2_bkey_types[k.k->type], 1146 bch2_bkey_types[discard_key_type], 1147 (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) { 1148 struct bkey_i *update = 1149 bch2_trans_kmalloc(trans, sizeof(*update)); 1150 1151 ret = PTR_ERR_OR_ZERO(update); 1152 if (ret) 1153 goto err; 1154 1155 bkey_init(&update->k); 1156 update->k.type = discard_key_type; 1157 update->k.p = discard_iter->pos; 1158 1159 ret = bch2_trans_update(trans, discard_iter, update, 0); 1160 if (ret) 1161 goto err; 1162 } 1163 1164 freespace_key_type = a->data_type == BCH_DATA_free ? KEY_TYPE_set : 0; 1165 bch2_btree_iter_set_pos(freespace_iter, alloc_freespace_pos(alloc_k.k->p, *a)); 1166 k = bch2_btree_iter_peek_slot(freespace_iter); 1167 ret = bkey_err(k); 1168 if (ret) 1169 goto err; 1170 1171 if (fsck_err_on(k.k->type != freespace_key_type, 1172 trans, freespace_key_wrong, 1173 "incorrect key in freespace btree (got %s should be %s)\n" 1174 " %s", 1175 bch2_bkey_types[k.k->type], 1176 bch2_bkey_types[freespace_key_type], 1177 (printbuf_reset(&buf), 1178 bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) { 1179 struct bkey_i *update = 1180 bch2_trans_kmalloc(trans, sizeof(*update)); 1181 1182 ret = PTR_ERR_OR_ZERO(update); 1183 if (ret) 1184 goto err; 1185 1186 bkey_init(&update->k); 1187 update->k.type = freespace_key_type; 1188 update->k.p = freespace_iter->pos; 1189 bch2_key_resize(&update->k, 1); 1190 1191 ret = bch2_trans_update(trans, freespace_iter, update, 0); 1192 if (ret) 1193 goto err; 1194 } 1195 1196 bch2_btree_iter_set_pos(bucket_gens_iter, alloc_gens_pos(alloc_k.k->p, &gens_offset)); 1197 k = bch2_btree_iter_peek_slot(bucket_gens_iter); 1198 ret = bkey_err(k); 1199 if (ret) 1200 goto err; 1201 1202 if (fsck_err_on(a->gen != alloc_gen(k, gens_offset), 1203 trans, bucket_gens_key_wrong, 1204 "incorrect gen in bucket_gens btree (got %u should be %u)\n" 1205 " %s", 1206 alloc_gen(k, gens_offset), a->gen, 1207 (printbuf_reset(&buf), 1208 bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) { 1209 struct bkey_i_bucket_gens *g = 1210 bch2_trans_kmalloc(trans, sizeof(*g)); 1211 1212 ret = PTR_ERR_OR_ZERO(g); 1213 if (ret) 1214 goto err; 1215 1216 if (k.k->type == KEY_TYPE_bucket_gens) { 1217 bkey_reassemble(&g->k_i, k); 1218 } else { 1219 bkey_bucket_gens_init(&g->k_i); 1220 g->k.p = alloc_gens_pos(alloc_k.k->p, &gens_offset); 1221 } 1222 1223 g->v.gens[gens_offset] = a->gen; 1224 1225 ret = bch2_trans_update(trans, bucket_gens_iter, &g->k_i, 0); 1226 if (ret) 1227 goto err; 1228 } 1229 out: 1230 err: 1231 fsck_err: 1232 bch2_dev_put(ca); 1233 printbuf_exit(&buf); 1234 return ret; 1235 } 1236 1237 static noinline_for_stack 1238 int bch2_check_alloc_hole_freespace(struct btree_trans *trans, 1239 struct bch_dev *ca, 1240 struct bpos start, 1241 struct bpos *end, 1242 struct btree_iter *freespace_iter) 1243 { 1244 struct bkey_s_c k; 1245 struct printbuf buf = PRINTBUF; 1246 int ret; 1247 1248 if (!ca->mi.freespace_initialized) 1249 return 0; 1250 1251 bch2_btree_iter_set_pos(freespace_iter, start); 1252 1253 k = bch2_btree_iter_peek_slot(freespace_iter); 1254 ret = bkey_err(k); 1255 if (ret) 1256 goto err; 1257 1258 *end = bkey_min(k.k->p, *end); 1259 1260 if (fsck_err_on(k.k->type != KEY_TYPE_set, 1261 trans, freespace_hole_missing, 1262 "hole in alloc btree missing in freespace btree\n" 1263 " device %llu buckets %llu-%llu", 1264 freespace_iter->pos.inode, 1265 freespace_iter->pos.offset, 1266 end->offset)) { 1267 struct bkey_i *update = 1268 bch2_trans_kmalloc(trans, sizeof(*update)); 1269 1270 ret = PTR_ERR_OR_ZERO(update); 1271 if (ret) 1272 goto err; 1273 1274 bkey_init(&update->k); 1275 update->k.type = KEY_TYPE_set; 1276 update->k.p = freespace_iter->pos; 1277 bch2_key_resize(&update->k, 1278 min_t(u64, U32_MAX, end->offset - 1279 freespace_iter->pos.offset)); 1280 1281 ret = bch2_trans_update(trans, freespace_iter, update, 0); 1282 if (ret) 1283 goto err; 1284 } 1285 err: 1286 fsck_err: 1287 printbuf_exit(&buf); 1288 return ret; 1289 } 1290 1291 static noinline_for_stack 1292 int bch2_check_alloc_hole_bucket_gens(struct btree_trans *trans, 1293 struct bpos start, 1294 struct bpos *end, 1295 struct btree_iter *bucket_gens_iter) 1296 { 1297 struct bkey_s_c k; 1298 struct printbuf buf = PRINTBUF; 1299 unsigned i, gens_offset, gens_end_offset; 1300 int ret; 1301 1302 bch2_btree_iter_set_pos(bucket_gens_iter, alloc_gens_pos(start, &gens_offset)); 1303 1304 k = bch2_btree_iter_peek_slot(bucket_gens_iter); 1305 ret = bkey_err(k); 1306 if (ret) 1307 goto err; 1308 1309 if (bkey_cmp(alloc_gens_pos(start, &gens_offset), 1310 alloc_gens_pos(*end, &gens_end_offset))) 1311 gens_end_offset = KEY_TYPE_BUCKET_GENS_NR; 1312 1313 if (k.k->type == KEY_TYPE_bucket_gens) { 1314 struct bkey_i_bucket_gens g; 1315 bool need_update = false; 1316 1317 bkey_reassemble(&g.k_i, k); 1318 1319 for (i = gens_offset; i < gens_end_offset; i++) { 1320 if (fsck_err_on(g.v.gens[i], trans, 1321 bucket_gens_hole_wrong, 1322 "hole in alloc btree at %llu:%llu with nonzero gen in bucket_gens btree (%u)", 1323 bucket_gens_pos_to_alloc(k.k->p, i).inode, 1324 bucket_gens_pos_to_alloc(k.k->p, i).offset, 1325 g.v.gens[i])) { 1326 g.v.gens[i] = 0; 1327 need_update = true; 1328 } 1329 } 1330 1331 if (need_update) { 1332 struct bkey_i *u = bch2_trans_kmalloc(trans, sizeof(g)); 1333 1334 ret = PTR_ERR_OR_ZERO(u); 1335 if (ret) 1336 goto err; 1337 1338 memcpy(u, &g, sizeof(g)); 1339 1340 ret = bch2_trans_update(trans, bucket_gens_iter, u, 0); 1341 if (ret) 1342 goto err; 1343 } 1344 } 1345 1346 *end = bkey_min(*end, bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0)); 1347 err: 1348 fsck_err: 1349 printbuf_exit(&buf); 1350 return ret; 1351 } 1352 1353 static noinline_for_stack int bch2_check_discard_freespace_key(struct btree_trans *trans, 1354 struct btree_iter *iter) 1355 { 1356 struct bch_fs *c = trans->c; 1357 struct btree_iter alloc_iter; 1358 struct bkey_s_c alloc_k; 1359 struct bch_alloc_v4 a_convert; 1360 const struct bch_alloc_v4 *a; 1361 u64 genbits; 1362 struct bpos pos; 1363 enum bch_data_type state = iter->btree_id == BTREE_ID_need_discard 1364 ? BCH_DATA_need_discard 1365 : BCH_DATA_free; 1366 struct printbuf buf = PRINTBUF; 1367 int ret; 1368 1369 pos = iter->pos; 1370 pos.offset &= ~(~0ULL << 56); 1371 genbits = iter->pos.offset & (~0ULL << 56); 1372 1373 alloc_k = bch2_bkey_get_iter(trans, &alloc_iter, BTREE_ID_alloc, pos, 0); 1374 ret = bkey_err(alloc_k); 1375 if (ret) 1376 return ret; 1377 1378 if (fsck_err_on(!bch2_dev_bucket_exists(c, pos), 1379 trans, need_discard_freespace_key_to_invalid_dev_bucket, 1380 "entry in %s btree for nonexistant dev:bucket %llu:%llu", 1381 bch2_btree_id_str(iter->btree_id), pos.inode, pos.offset)) 1382 goto delete; 1383 1384 a = bch2_alloc_to_v4(alloc_k, &a_convert); 1385 1386 if (fsck_err_on(a->data_type != state || 1387 (state == BCH_DATA_free && 1388 genbits != alloc_freespace_genbits(*a)), 1389 trans, need_discard_freespace_key_bad, 1390 "%s\n incorrectly set at %s:%llu:%llu:0 (free %u, genbits %llu should be %llu)", 1391 (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf), 1392 bch2_btree_id_str(iter->btree_id), 1393 iter->pos.inode, 1394 iter->pos.offset, 1395 a->data_type == state, 1396 genbits >> 56, alloc_freespace_genbits(*a) >> 56)) 1397 goto delete; 1398 out: 1399 fsck_err: 1400 bch2_set_btree_iter_dontneed(&alloc_iter); 1401 bch2_trans_iter_exit(trans, &alloc_iter); 1402 printbuf_exit(&buf); 1403 return ret; 1404 delete: 1405 ret = bch2_btree_delete_extent_at(trans, iter, 1406 iter->btree_id == BTREE_ID_freespace ? 1 : 0, 0) ?: 1407 bch2_trans_commit(trans, NULL, NULL, 1408 BCH_TRANS_COMMIT_no_enospc); 1409 goto out; 1410 } 1411 1412 /* 1413 * We've already checked that generation numbers in the bucket_gens btree are 1414 * valid for buckets that exist; this just checks for keys for nonexistent 1415 * buckets. 1416 */ 1417 static noinline_for_stack 1418 int bch2_check_bucket_gens_key(struct btree_trans *trans, 1419 struct btree_iter *iter, 1420 struct bkey_s_c k) 1421 { 1422 struct bch_fs *c = trans->c; 1423 struct bkey_i_bucket_gens g; 1424 u64 start = bucket_gens_pos_to_alloc(k.k->p, 0).offset; 1425 u64 end = bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0).offset; 1426 u64 b; 1427 bool need_update = false; 1428 struct printbuf buf = PRINTBUF; 1429 int ret = 0; 1430 1431 BUG_ON(k.k->type != KEY_TYPE_bucket_gens); 1432 bkey_reassemble(&g.k_i, k); 1433 1434 struct bch_dev *ca = bch2_dev_tryget_noerror(c, k.k->p.inode); 1435 if (!ca) { 1436 if (fsck_err(trans, bucket_gens_to_invalid_dev, 1437 "bucket_gens key for invalid device:\n %s", 1438 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) 1439 ret = bch2_btree_delete_at(trans, iter, 0); 1440 goto out; 1441 } 1442 1443 if (fsck_err_on(end <= ca->mi.first_bucket || 1444 start >= ca->mi.nbuckets, 1445 trans, bucket_gens_to_invalid_buckets, 1446 "bucket_gens key for invalid buckets:\n %s", 1447 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { 1448 ret = bch2_btree_delete_at(trans, iter, 0); 1449 goto out; 1450 } 1451 1452 for (b = start; b < ca->mi.first_bucket; b++) 1453 if (fsck_err_on(g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK], 1454 trans, bucket_gens_nonzero_for_invalid_buckets, 1455 "bucket_gens key has nonzero gen for invalid bucket")) { 1456 g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK] = 0; 1457 need_update = true; 1458 } 1459 1460 for (b = ca->mi.nbuckets; b < end; b++) 1461 if (fsck_err_on(g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK], 1462 trans, bucket_gens_nonzero_for_invalid_buckets, 1463 "bucket_gens key has nonzero gen for invalid bucket")) { 1464 g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK] = 0; 1465 need_update = true; 1466 } 1467 1468 if (need_update) { 1469 struct bkey_i *u = bch2_trans_kmalloc(trans, sizeof(g)); 1470 1471 ret = PTR_ERR_OR_ZERO(u); 1472 if (ret) 1473 goto out; 1474 1475 memcpy(u, &g, sizeof(g)); 1476 ret = bch2_trans_update(trans, iter, u, 0); 1477 } 1478 out: 1479 fsck_err: 1480 bch2_dev_put(ca); 1481 printbuf_exit(&buf); 1482 return ret; 1483 } 1484 1485 int bch2_check_alloc_info(struct bch_fs *c) 1486 { 1487 struct btree_trans *trans = bch2_trans_get(c); 1488 struct btree_iter iter, discard_iter, freespace_iter, bucket_gens_iter; 1489 struct bch_dev *ca = NULL; 1490 struct bkey hole; 1491 struct bkey_s_c k; 1492 int ret = 0; 1493 1494 bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS_MIN, 1495 BTREE_ITER_prefetch); 1496 bch2_trans_iter_init(trans, &discard_iter, BTREE_ID_need_discard, POS_MIN, 1497 BTREE_ITER_prefetch); 1498 bch2_trans_iter_init(trans, &freespace_iter, BTREE_ID_freespace, POS_MIN, 1499 BTREE_ITER_prefetch); 1500 bch2_trans_iter_init(trans, &bucket_gens_iter, BTREE_ID_bucket_gens, POS_MIN, 1501 BTREE_ITER_prefetch); 1502 1503 while (1) { 1504 struct bpos next; 1505 1506 bch2_trans_begin(trans); 1507 1508 k = bch2_get_key_or_real_bucket_hole(&iter, &ca, &hole); 1509 ret = bkey_err(k); 1510 if (ret) 1511 goto bkey_err; 1512 1513 if (!k.k) 1514 break; 1515 1516 if (k.k->type) { 1517 next = bpos_nosnap_successor(k.k->p); 1518 1519 ret = bch2_check_alloc_key(trans, 1520 k, &iter, 1521 &discard_iter, 1522 &freespace_iter, 1523 &bucket_gens_iter); 1524 if (ret) 1525 goto bkey_err; 1526 } else { 1527 next = k.k->p; 1528 1529 ret = bch2_check_alloc_hole_freespace(trans, ca, 1530 bkey_start_pos(k.k), 1531 &next, 1532 &freespace_iter) ?: 1533 bch2_check_alloc_hole_bucket_gens(trans, 1534 bkey_start_pos(k.k), 1535 &next, 1536 &bucket_gens_iter); 1537 if (ret) 1538 goto bkey_err; 1539 } 1540 1541 ret = bch2_trans_commit(trans, NULL, NULL, 1542 BCH_TRANS_COMMIT_no_enospc); 1543 if (ret) 1544 goto bkey_err; 1545 1546 bch2_btree_iter_set_pos(&iter, next); 1547 bkey_err: 1548 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 1549 continue; 1550 if (ret) 1551 break; 1552 } 1553 bch2_trans_iter_exit(trans, &bucket_gens_iter); 1554 bch2_trans_iter_exit(trans, &freespace_iter); 1555 bch2_trans_iter_exit(trans, &discard_iter); 1556 bch2_trans_iter_exit(trans, &iter); 1557 bch2_dev_put(ca); 1558 ca = NULL; 1559 1560 if (ret < 0) 1561 goto err; 1562 1563 ret = for_each_btree_key(trans, iter, 1564 BTREE_ID_need_discard, POS_MIN, 1565 BTREE_ITER_prefetch, k, 1566 bch2_check_discard_freespace_key(trans, &iter)); 1567 if (ret) 1568 goto err; 1569 1570 bch2_trans_iter_init(trans, &iter, BTREE_ID_freespace, POS_MIN, 1571 BTREE_ITER_prefetch); 1572 while (1) { 1573 bch2_trans_begin(trans); 1574 k = bch2_btree_iter_peek(&iter); 1575 if (!k.k) 1576 break; 1577 1578 ret = bkey_err(k) ?: 1579 bch2_check_discard_freespace_key(trans, &iter); 1580 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) { 1581 ret = 0; 1582 continue; 1583 } 1584 if (ret) { 1585 struct printbuf buf = PRINTBUF; 1586 bch2_bkey_val_to_text(&buf, c, k); 1587 1588 bch_err(c, "while checking %s", buf.buf); 1589 printbuf_exit(&buf); 1590 break; 1591 } 1592 1593 bch2_btree_iter_set_pos(&iter, bpos_nosnap_successor(iter.pos)); 1594 } 1595 bch2_trans_iter_exit(trans, &iter); 1596 if (ret) 1597 goto err; 1598 1599 ret = for_each_btree_key_commit(trans, iter, 1600 BTREE_ID_bucket_gens, POS_MIN, 1601 BTREE_ITER_prefetch, k, 1602 NULL, NULL, BCH_TRANS_COMMIT_no_enospc, 1603 bch2_check_bucket_gens_key(trans, &iter, k)); 1604 err: 1605 bch2_trans_put(trans); 1606 bch_err_fn(c, ret); 1607 return ret; 1608 } 1609 1610 static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans, 1611 struct btree_iter *alloc_iter, 1612 struct bkey_buf *last_flushed) 1613 { 1614 struct bch_fs *c = trans->c; 1615 struct bch_alloc_v4 a_convert; 1616 const struct bch_alloc_v4 *a; 1617 struct bkey_s_c alloc_k; 1618 struct printbuf buf = PRINTBUF; 1619 int ret; 1620 1621 alloc_k = bch2_btree_iter_peek(alloc_iter); 1622 if (!alloc_k.k) 1623 return 0; 1624 1625 ret = bkey_err(alloc_k); 1626 if (ret) 1627 return ret; 1628 1629 a = bch2_alloc_to_v4(alloc_k, &a_convert); 1630 1631 if (a->fragmentation_lru) { 1632 ret = bch2_lru_check_set(trans, BCH_LRU_FRAGMENTATION_START, 1633 a->fragmentation_lru, 1634 alloc_k, last_flushed); 1635 if (ret) 1636 return ret; 1637 } 1638 1639 if (a->data_type != BCH_DATA_cached) 1640 return 0; 1641 1642 if (fsck_err_on(!a->io_time[READ], 1643 trans, alloc_key_cached_but_read_time_zero, 1644 "cached bucket with read_time 0\n" 1645 " %s", 1646 (printbuf_reset(&buf), 1647 bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) { 1648 struct bkey_i_alloc_v4 *a_mut = 1649 bch2_alloc_to_v4_mut(trans, alloc_k); 1650 ret = PTR_ERR_OR_ZERO(a_mut); 1651 if (ret) 1652 goto err; 1653 1654 a_mut->v.io_time[READ] = bch2_current_io_time(c, READ); 1655 ret = bch2_trans_update(trans, alloc_iter, 1656 &a_mut->k_i, BTREE_TRIGGER_norun); 1657 if (ret) 1658 goto err; 1659 1660 a = &a_mut->v; 1661 } 1662 1663 ret = bch2_lru_check_set(trans, alloc_k.k->p.inode, a->io_time[READ], 1664 alloc_k, last_flushed); 1665 if (ret) 1666 goto err; 1667 err: 1668 fsck_err: 1669 printbuf_exit(&buf); 1670 return ret; 1671 } 1672 1673 int bch2_check_alloc_to_lru_refs(struct bch_fs *c) 1674 { 1675 struct bkey_buf last_flushed; 1676 1677 bch2_bkey_buf_init(&last_flushed); 1678 bkey_init(&last_flushed.k->k); 1679 1680 int ret = bch2_trans_run(c, 1681 for_each_btree_key_commit(trans, iter, BTREE_ID_alloc, 1682 POS_MIN, BTREE_ITER_prefetch, k, 1683 NULL, NULL, BCH_TRANS_COMMIT_no_enospc, 1684 bch2_check_alloc_to_lru_ref(trans, &iter, &last_flushed))); 1685 1686 bch2_bkey_buf_exit(&last_flushed, c); 1687 bch_err_fn(c, ret); 1688 return ret; 1689 } 1690 1691 static int discard_in_flight_add(struct bch_dev *ca, u64 bucket, bool in_progress) 1692 { 1693 int ret; 1694 1695 mutex_lock(&ca->discard_buckets_in_flight_lock); 1696 darray_for_each(ca->discard_buckets_in_flight, i) 1697 if (i->bucket == bucket) { 1698 ret = -BCH_ERR_EEXIST_discard_in_flight_add; 1699 goto out; 1700 } 1701 1702 ret = darray_push(&ca->discard_buckets_in_flight, ((struct discard_in_flight) { 1703 .in_progress = in_progress, 1704 .bucket = bucket, 1705 })); 1706 out: 1707 mutex_unlock(&ca->discard_buckets_in_flight_lock); 1708 return ret; 1709 } 1710 1711 static void discard_in_flight_remove(struct bch_dev *ca, u64 bucket) 1712 { 1713 mutex_lock(&ca->discard_buckets_in_flight_lock); 1714 darray_for_each(ca->discard_buckets_in_flight, i) 1715 if (i->bucket == bucket) { 1716 BUG_ON(!i->in_progress); 1717 darray_remove_item(&ca->discard_buckets_in_flight, i); 1718 goto found; 1719 } 1720 BUG(); 1721 found: 1722 mutex_unlock(&ca->discard_buckets_in_flight_lock); 1723 } 1724 1725 struct discard_buckets_state { 1726 u64 seen; 1727 u64 open; 1728 u64 need_journal_commit; 1729 u64 discarded; 1730 u64 need_journal_commit_this_dev; 1731 }; 1732 1733 static int bch2_discard_one_bucket(struct btree_trans *trans, 1734 struct bch_dev *ca, 1735 struct btree_iter *need_discard_iter, 1736 struct bpos *discard_pos_done, 1737 struct discard_buckets_state *s) 1738 { 1739 struct bch_fs *c = trans->c; 1740 struct bpos pos = need_discard_iter->pos; 1741 struct btree_iter iter = { NULL }; 1742 struct bkey_s_c k; 1743 struct bkey_i_alloc_v4 *a; 1744 struct printbuf buf = PRINTBUF; 1745 bool discard_locked = false; 1746 int ret = 0; 1747 1748 if (bch2_bucket_is_open_safe(c, pos.inode, pos.offset)) { 1749 s->open++; 1750 goto out; 1751 } 1752 1753 if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal, 1754 c->journal.flushed_seq_ondisk, 1755 pos.inode, pos.offset)) { 1756 s->need_journal_commit++; 1757 s->need_journal_commit_this_dev++; 1758 goto out; 1759 } 1760 1761 k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_alloc, 1762 need_discard_iter->pos, 1763 BTREE_ITER_cached); 1764 ret = bkey_err(k); 1765 if (ret) 1766 goto out; 1767 1768 a = bch2_alloc_to_v4_mut(trans, k); 1769 ret = PTR_ERR_OR_ZERO(a); 1770 if (ret) 1771 goto out; 1772 1773 if (bch2_bucket_sectors_total(a->v)) { 1774 if (bch2_trans_inconsistent_on(c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info, 1775 trans, "attempting to discard bucket with dirty data\n%s", 1776 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) 1777 ret = -EIO; 1778 goto out; 1779 } 1780 1781 if (a->v.data_type != BCH_DATA_need_discard) { 1782 if (data_type_is_empty(a->v.data_type) && 1783 BCH_ALLOC_V4_NEED_INC_GEN(&a->v)) { 1784 a->v.gen++; 1785 SET_BCH_ALLOC_V4_NEED_INC_GEN(&a->v, false); 1786 goto write; 1787 } 1788 1789 if (bch2_trans_inconsistent_on(c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info, 1790 trans, "bucket incorrectly set in need_discard btree\n" 1791 "%s", 1792 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) 1793 ret = -EIO; 1794 goto out; 1795 } 1796 1797 if (a->v.journal_seq > c->journal.flushed_seq_ondisk) { 1798 if (bch2_trans_inconsistent_on(c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info, 1799 trans, "clearing need_discard but journal_seq %llu > flushed_seq %llu\n%s", 1800 a->v.journal_seq, 1801 c->journal.flushed_seq_ondisk, 1802 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) 1803 ret = -EIO; 1804 goto out; 1805 } 1806 1807 if (discard_in_flight_add(ca, iter.pos.offset, true)) 1808 goto out; 1809 1810 discard_locked = true; 1811 1812 if (!bkey_eq(*discard_pos_done, iter.pos) && 1813 ca->mi.discard && !c->opts.nochanges) { 1814 /* 1815 * This works without any other locks because this is the only 1816 * thread that removes items from the need_discard tree 1817 */ 1818 bch2_trans_unlock_long(trans); 1819 blkdev_issue_discard(ca->disk_sb.bdev, 1820 k.k->p.offset * ca->mi.bucket_size, 1821 ca->mi.bucket_size, 1822 GFP_KERNEL); 1823 *discard_pos_done = iter.pos; 1824 1825 ret = bch2_trans_relock_notrace(trans); 1826 if (ret) 1827 goto out; 1828 } 1829 1830 SET_BCH_ALLOC_V4_NEED_DISCARD(&a->v, false); 1831 write: 1832 alloc_data_type_set(&a->v, a->v.data_type); 1833 1834 ret = bch2_trans_update(trans, &iter, &a->k_i, 0) ?: 1835 bch2_trans_commit(trans, NULL, NULL, 1836 BCH_WATERMARK_btree| 1837 BCH_TRANS_COMMIT_no_enospc); 1838 if (ret) 1839 goto out; 1840 1841 count_event(c, bucket_discard); 1842 s->discarded++; 1843 out: 1844 if (discard_locked) 1845 discard_in_flight_remove(ca, iter.pos.offset); 1846 s->seen++; 1847 bch2_trans_iter_exit(trans, &iter); 1848 printbuf_exit(&buf); 1849 return ret; 1850 } 1851 1852 static void bch2_do_discards_work(struct work_struct *work) 1853 { 1854 struct bch_dev *ca = container_of(work, struct bch_dev, discard_work); 1855 struct bch_fs *c = ca->fs; 1856 struct discard_buckets_state s = {}; 1857 struct bpos discard_pos_done = POS_MAX; 1858 int ret; 1859 1860 /* 1861 * We're doing the commit in bch2_discard_one_bucket instead of using 1862 * for_each_btree_key_commit() so that we can increment counters after 1863 * successful commit: 1864 */ 1865 ret = bch2_trans_run(c, 1866 for_each_btree_key_upto(trans, iter, 1867 BTREE_ID_need_discard, 1868 POS(ca->dev_idx, 0), 1869 POS(ca->dev_idx, U64_MAX), 0, k, 1870 bch2_discard_one_bucket(trans, ca, &iter, &discard_pos_done, &s))); 1871 1872 trace_discard_buckets(c, s.seen, s.open, s.need_journal_commit, s.discarded, 1873 bch2_err_str(ret)); 1874 1875 bch2_write_ref_put(c, BCH_WRITE_REF_discard); 1876 percpu_ref_put(&ca->io_ref); 1877 } 1878 1879 void bch2_dev_do_discards(struct bch_dev *ca) 1880 { 1881 struct bch_fs *c = ca->fs; 1882 1883 if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE)) 1884 return; 1885 1886 if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_discard)) 1887 goto put_ioref; 1888 1889 if (queue_work(c->write_ref_wq, &ca->discard_work)) 1890 return; 1891 1892 bch2_write_ref_put(c, BCH_WRITE_REF_discard); 1893 put_ioref: 1894 percpu_ref_put(&ca->io_ref); 1895 } 1896 1897 void bch2_do_discards(struct bch_fs *c) 1898 { 1899 for_each_member_device(c, ca) 1900 bch2_dev_do_discards(ca); 1901 } 1902 1903 static int bch2_clear_bucket_needs_discard(struct btree_trans *trans, struct bpos bucket) 1904 { 1905 struct btree_iter iter; 1906 bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, bucket, BTREE_ITER_intent); 1907 struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter); 1908 int ret = bkey_err(k); 1909 if (ret) 1910 goto err; 1911 1912 struct bkey_i_alloc_v4 *a = bch2_alloc_to_v4_mut(trans, k); 1913 ret = PTR_ERR_OR_ZERO(a); 1914 if (ret) 1915 goto err; 1916 1917 BUG_ON(a->v.dirty_sectors); 1918 SET_BCH_ALLOC_V4_NEED_DISCARD(&a->v, false); 1919 alloc_data_type_set(&a->v, a->v.data_type); 1920 1921 ret = bch2_trans_update(trans, &iter, &a->k_i, 0); 1922 err: 1923 bch2_trans_iter_exit(trans, &iter); 1924 return ret; 1925 } 1926 1927 static void bch2_do_discards_fast_work(struct work_struct *work) 1928 { 1929 struct bch_dev *ca = container_of(work, struct bch_dev, discard_fast_work); 1930 struct bch_fs *c = ca->fs; 1931 1932 while (1) { 1933 bool got_bucket = false; 1934 u64 bucket; 1935 1936 mutex_lock(&ca->discard_buckets_in_flight_lock); 1937 darray_for_each(ca->discard_buckets_in_flight, i) { 1938 if (i->in_progress) 1939 continue; 1940 1941 got_bucket = true; 1942 bucket = i->bucket; 1943 i->in_progress = true; 1944 break; 1945 } 1946 mutex_unlock(&ca->discard_buckets_in_flight_lock); 1947 1948 if (!got_bucket) 1949 break; 1950 1951 if (ca->mi.discard && !c->opts.nochanges) 1952 blkdev_issue_discard(ca->disk_sb.bdev, 1953 bucket_to_sector(ca, bucket), 1954 ca->mi.bucket_size, 1955 GFP_KERNEL); 1956 1957 int ret = bch2_trans_do(c, NULL, NULL, 1958 BCH_WATERMARK_btree| 1959 BCH_TRANS_COMMIT_no_enospc, 1960 bch2_clear_bucket_needs_discard(trans, POS(ca->dev_idx, bucket))); 1961 bch_err_fn(c, ret); 1962 1963 discard_in_flight_remove(ca, bucket); 1964 1965 if (ret) 1966 break; 1967 } 1968 1969 bch2_write_ref_put(c, BCH_WRITE_REF_discard_fast); 1970 percpu_ref_put(&ca->io_ref); 1971 } 1972 1973 static void bch2_discard_one_bucket_fast(struct bch_dev *ca, u64 bucket) 1974 { 1975 struct bch_fs *c = ca->fs; 1976 1977 if (discard_in_flight_add(ca, bucket, false)) 1978 return; 1979 1980 if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE)) 1981 return; 1982 1983 if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_discard_fast)) 1984 goto put_ioref; 1985 1986 if (queue_work(c->write_ref_wq, &ca->discard_fast_work)) 1987 return; 1988 1989 bch2_write_ref_put(c, BCH_WRITE_REF_discard_fast); 1990 put_ioref: 1991 percpu_ref_put(&ca->io_ref); 1992 } 1993 1994 static int invalidate_one_bucket(struct btree_trans *trans, 1995 struct btree_iter *lru_iter, 1996 struct bkey_s_c lru_k, 1997 s64 *nr_to_invalidate) 1998 { 1999 struct bch_fs *c = trans->c; 2000 struct bkey_i_alloc_v4 *a = NULL; 2001 struct printbuf buf = PRINTBUF; 2002 struct bpos bucket = u64_to_bucket(lru_k.k->p.offset); 2003 unsigned cached_sectors; 2004 int ret = 0; 2005 2006 if (*nr_to_invalidate <= 0) 2007 return 1; 2008 2009 if (!bch2_dev_bucket_exists(c, bucket)) { 2010 prt_str(&buf, "lru entry points to invalid bucket"); 2011 goto err; 2012 } 2013 2014 if (bch2_bucket_is_open_safe(c, bucket.inode, bucket.offset)) 2015 return 0; 2016 2017 a = bch2_trans_start_alloc_update(trans, bucket, BTREE_TRIGGER_bucket_invalidate); 2018 ret = PTR_ERR_OR_ZERO(a); 2019 if (ret) 2020 goto out; 2021 2022 /* We expect harmless races here due to the btree write buffer: */ 2023 if (lru_pos_time(lru_iter->pos) != alloc_lru_idx_read(a->v)) 2024 goto out; 2025 2026 BUG_ON(a->v.data_type != BCH_DATA_cached); 2027 BUG_ON(a->v.dirty_sectors); 2028 2029 if (!a->v.cached_sectors) 2030 bch_err(c, "invalidating empty bucket, confused"); 2031 2032 cached_sectors = a->v.cached_sectors; 2033 2034 SET_BCH_ALLOC_V4_NEED_INC_GEN(&a->v, false); 2035 a->v.gen++; 2036 a->v.data_type = 0; 2037 a->v.dirty_sectors = 0; 2038 a->v.stripe_sectors = 0; 2039 a->v.cached_sectors = 0; 2040 a->v.io_time[READ] = bch2_current_io_time(c, READ); 2041 a->v.io_time[WRITE] = bch2_current_io_time(c, WRITE); 2042 2043 ret = bch2_trans_commit(trans, NULL, NULL, 2044 BCH_WATERMARK_btree| 2045 BCH_TRANS_COMMIT_no_enospc); 2046 if (ret) 2047 goto out; 2048 2049 trace_and_count(c, bucket_invalidate, c, bucket.inode, bucket.offset, cached_sectors); 2050 --*nr_to_invalidate; 2051 out: 2052 printbuf_exit(&buf); 2053 return ret; 2054 err: 2055 prt_str(&buf, "\n lru key: "); 2056 bch2_bkey_val_to_text(&buf, c, lru_k); 2057 2058 prt_str(&buf, "\n lru entry: "); 2059 bch2_lru_pos_to_text(&buf, lru_iter->pos); 2060 2061 prt_str(&buf, "\n alloc key: "); 2062 if (!a) 2063 bch2_bpos_to_text(&buf, bucket); 2064 else 2065 bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&a->k_i)); 2066 2067 bch_err(c, "%s", buf.buf); 2068 if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_lrus) { 2069 bch2_inconsistent_error(c); 2070 ret = -EINVAL; 2071 } 2072 2073 goto out; 2074 } 2075 2076 static struct bkey_s_c next_lru_key(struct btree_trans *trans, struct btree_iter *iter, 2077 struct bch_dev *ca, bool *wrapped) 2078 { 2079 struct bkey_s_c k; 2080 again: 2081 k = bch2_btree_iter_peek_upto(iter, lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX)); 2082 if (!k.k && !*wrapped) { 2083 bch2_btree_iter_set_pos(iter, lru_pos(ca->dev_idx, 0, 0)); 2084 *wrapped = true; 2085 goto again; 2086 } 2087 2088 return k; 2089 } 2090 2091 static void bch2_do_invalidates_work(struct work_struct *work) 2092 { 2093 struct bch_dev *ca = container_of(work, struct bch_dev, invalidate_work); 2094 struct bch_fs *c = ca->fs; 2095 struct btree_trans *trans = bch2_trans_get(c); 2096 int ret = 0; 2097 2098 ret = bch2_btree_write_buffer_tryflush(trans); 2099 if (ret) 2100 goto err; 2101 2102 s64 nr_to_invalidate = 2103 should_invalidate_buckets(ca, bch2_dev_usage_read(ca)); 2104 struct btree_iter iter; 2105 bool wrapped = false; 2106 2107 bch2_trans_iter_init(trans, &iter, BTREE_ID_lru, 2108 lru_pos(ca->dev_idx, 0, 2109 ((bch2_current_io_time(c, READ) + U32_MAX) & 2110 LRU_TIME_MAX)), 0); 2111 2112 while (true) { 2113 bch2_trans_begin(trans); 2114 2115 struct bkey_s_c k = next_lru_key(trans, &iter, ca, &wrapped); 2116 ret = bkey_err(k); 2117 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 2118 continue; 2119 if (ret) 2120 break; 2121 if (!k.k) 2122 break; 2123 2124 ret = invalidate_one_bucket(trans, &iter, k, &nr_to_invalidate); 2125 if (ret) 2126 break; 2127 2128 bch2_btree_iter_advance(&iter); 2129 } 2130 bch2_trans_iter_exit(trans, &iter); 2131 err: 2132 bch2_trans_put(trans); 2133 bch2_write_ref_put(c, BCH_WRITE_REF_invalidate); 2134 percpu_ref_put(&ca->io_ref); 2135 } 2136 2137 void bch2_dev_do_invalidates(struct bch_dev *ca) 2138 { 2139 struct bch_fs *c = ca->fs; 2140 2141 if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE)) 2142 return; 2143 2144 if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_invalidate)) 2145 goto put_ioref; 2146 2147 if (queue_work(c->write_ref_wq, &ca->invalidate_work)) 2148 return; 2149 2150 bch2_write_ref_put(c, BCH_WRITE_REF_invalidate); 2151 put_ioref: 2152 percpu_ref_put(&ca->io_ref); 2153 } 2154 2155 void bch2_do_invalidates(struct bch_fs *c) 2156 { 2157 for_each_member_device(c, ca) 2158 bch2_dev_do_invalidates(ca); 2159 } 2160 2161 int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca, 2162 u64 bucket_start, u64 bucket_end) 2163 { 2164 struct btree_trans *trans = bch2_trans_get(c); 2165 struct btree_iter iter; 2166 struct bkey_s_c k; 2167 struct bkey hole; 2168 struct bpos end = POS(ca->dev_idx, bucket_end); 2169 struct bch_member *m; 2170 unsigned long last_updated = jiffies; 2171 int ret; 2172 2173 BUG_ON(bucket_start > bucket_end); 2174 BUG_ON(bucket_end > ca->mi.nbuckets); 2175 2176 bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, 2177 POS(ca->dev_idx, max_t(u64, ca->mi.first_bucket, bucket_start)), 2178 BTREE_ITER_prefetch); 2179 /* 2180 * Scan the alloc btree for every bucket on @ca, and add buckets to the 2181 * freespace/need_discard/need_gc_gens btrees as needed: 2182 */ 2183 while (1) { 2184 if (last_updated + HZ * 10 < jiffies) { 2185 bch_info(ca, "%s: currently at %llu/%llu", 2186 __func__, iter.pos.offset, ca->mi.nbuckets); 2187 last_updated = jiffies; 2188 } 2189 2190 bch2_trans_begin(trans); 2191 2192 if (bkey_ge(iter.pos, end)) { 2193 ret = 0; 2194 break; 2195 } 2196 2197 k = bch2_get_key_or_hole(&iter, end, &hole); 2198 ret = bkey_err(k); 2199 if (ret) 2200 goto bkey_err; 2201 2202 if (k.k->type) { 2203 /* 2204 * We process live keys in the alloc btree one at a 2205 * time: 2206 */ 2207 struct bch_alloc_v4 a_convert; 2208 const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert); 2209 2210 ret = bch2_bucket_do_index(trans, ca, k, a, true) ?: 2211 bch2_trans_commit(trans, NULL, NULL, 2212 BCH_TRANS_COMMIT_no_enospc); 2213 if (ret) 2214 goto bkey_err; 2215 2216 bch2_btree_iter_advance(&iter); 2217 } else { 2218 struct bkey_i *freespace; 2219 2220 freespace = bch2_trans_kmalloc(trans, sizeof(*freespace)); 2221 ret = PTR_ERR_OR_ZERO(freespace); 2222 if (ret) 2223 goto bkey_err; 2224 2225 bkey_init(&freespace->k); 2226 freespace->k.type = KEY_TYPE_set; 2227 freespace->k.p = k.k->p; 2228 freespace->k.size = k.k->size; 2229 2230 ret = bch2_btree_insert_trans(trans, BTREE_ID_freespace, freespace, 0) ?: 2231 bch2_trans_commit(trans, NULL, NULL, 2232 BCH_TRANS_COMMIT_no_enospc); 2233 if (ret) 2234 goto bkey_err; 2235 2236 bch2_btree_iter_set_pos(&iter, k.k->p); 2237 } 2238 bkey_err: 2239 if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) 2240 continue; 2241 if (ret) 2242 break; 2243 } 2244 2245 bch2_trans_iter_exit(trans, &iter); 2246 bch2_trans_put(trans); 2247 2248 if (ret < 0) { 2249 bch_err_msg(ca, ret, "initializing free space"); 2250 return ret; 2251 } 2252 2253 mutex_lock(&c->sb_lock); 2254 m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx); 2255 SET_BCH_MEMBER_FREESPACE_INITIALIZED(m, true); 2256 mutex_unlock(&c->sb_lock); 2257 2258 return 0; 2259 } 2260 2261 int bch2_fs_freespace_init(struct bch_fs *c) 2262 { 2263 int ret = 0; 2264 bool doing_init = false; 2265 2266 /* 2267 * We can crash during the device add path, so we need to check this on 2268 * every mount: 2269 */ 2270 2271 for_each_member_device(c, ca) { 2272 if (ca->mi.freespace_initialized) 2273 continue; 2274 2275 if (!doing_init) { 2276 bch_info(c, "initializing freespace"); 2277 doing_init = true; 2278 } 2279 2280 ret = bch2_dev_freespace_init(c, ca, 0, ca->mi.nbuckets); 2281 if (ret) { 2282 bch2_dev_put(ca); 2283 bch_err_fn(c, ret); 2284 return ret; 2285 } 2286 } 2287 2288 if (doing_init) { 2289 mutex_lock(&c->sb_lock); 2290 bch2_write_super(c); 2291 mutex_unlock(&c->sb_lock); 2292 bch_verbose(c, "done initializing freespace"); 2293 } 2294 2295 return 0; 2296 } 2297 2298 /* Bucket IO clocks: */ 2299 2300 int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev, 2301 size_t bucket_nr, int rw) 2302 { 2303 struct bch_fs *c = trans->c; 2304 struct btree_iter iter; 2305 struct bkey_i_alloc_v4 *a; 2306 u64 now; 2307 int ret = 0; 2308 2309 if (bch2_trans_relock(trans)) 2310 bch2_trans_begin(trans); 2311 2312 a = bch2_trans_start_alloc_update_noupdate(trans, &iter, POS(dev, bucket_nr)); 2313 ret = PTR_ERR_OR_ZERO(a); 2314 if (ret) 2315 return ret; 2316 2317 now = bch2_current_io_time(c, rw); 2318 if (a->v.io_time[rw] == now) 2319 goto out; 2320 2321 a->v.io_time[rw] = now; 2322 2323 ret = bch2_trans_update(trans, &iter, &a->k_i, 0) ?: 2324 bch2_trans_commit(trans, NULL, NULL, 0); 2325 out: 2326 bch2_trans_iter_exit(trans, &iter); 2327 return ret; 2328 } 2329 2330 /* Startup/shutdown (ro/rw): */ 2331 2332 void bch2_recalc_capacity(struct bch_fs *c) 2333 { 2334 u64 capacity = 0, reserved_sectors = 0, gc_reserve; 2335 unsigned bucket_size_max = 0; 2336 unsigned long ra_pages = 0; 2337 2338 lockdep_assert_held(&c->state_lock); 2339 2340 for_each_online_member(c, ca) { 2341 struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_disk->bdi; 2342 2343 ra_pages += bdi->ra_pages; 2344 } 2345 2346 bch2_set_ra_pages(c, ra_pages); 2347 2348 for_each_rw_member(c, ca) { 2349 u64 dev_reserve = 0; 2350 2351 /* 2352 * We need to reserve buckets (from the number 2353 * of currently available buckets) against 2354 * foreground writes so that mainly copygc can 2355 * make forward progress. 2356 * 2357 * We need enough to refill the various reserves 2358 * from scratch - copygc will use its entire 2359 * reserve all at once, then run against when 2360 * its reserve is refilled (from the formerly 2361 * available buckets). 2362 * 2363 * This reserve is just used when considering if 2364 * allocations for foreground writes must wait - 2365 * not -ENOSPC calculations. 2366 */ 2367 2368 dev_reserve += ca->nr_btree_reserve * 2; 2369 dev_reserve += ca->mi.nbuckets >> 6; /* copygc reserve */ 2370 2371 dev_reserve += 1; /* btree write point */ 2372 dev_reserve += 1; /* copygc write point */ 2373 dev_reserve += 1; /* rebalance write point */ 2374 2375 dev_reserve *= ca->mi.bucket_size; 2376 2377 capacity += bucket_to_sector(ca, ca->mi.nbuckets - 2378 ca->mi.first_bucket); 2379 2380 reserved_sectors += dev_reserve * 2; 2381 2382 bucket_size_max = max_t(unsigned, bucket_size_max, 2383 ca->mi.bucket_size); 2384 } 2385 2386 gc_reserve = c->opts.gc_reserve_bytes 2387 ? c->opts.gc_reserve_bytes >> 9 2388 : div64_u64(capacity * c->opts.gc_reserve_percent, 100); 2389 2390 reserved_sectors = max(gc_reserve, reserved_sectors); 2391 2392 reserved_sectors = min(reserved_sectors, capacity); 2393 2394 c->reserved = reserved_sectors; 2395 c->capacity = capacity - reserved_sectors; 2396 2397 c->bucket_size_max = bucket_size_max; 2398 2399 /* Wake up case someone was waiting for buckets */ 2400 closure_wake_up(&c->freelist_wait); 2401 } 2402 2403 u64 bch2_min_rw_member_capacity(struct bch_fs *c) 2404 { 2405 u64 ret = U64_MAX; 2406 2407 for_each_rw_member(c, ca) 2408 ret = min(ret, ca->mi.nbuckets * ca->mi.bucket_size); 2409 return ret; 2410 } 2411 2412 static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca) 2413 { 2414 struct open_bucket *ob; 2415 bool ret = false; 2416 2417 for (ob = c->open_buckets; 2418 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); 2419 ob++) { 2420 spin_lock(&ob->lock); 2421 if (ob->valid && !ob->on_partial_list && 2422 ob->dev == ca->dev_idx) 2423 ret = true; 2424 spin_unlock(&ob->lock); 2425 } 2426 2427 return ret; 2428 } 2429 2430 /* device goes ro: */ 2431 void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca) 2432 { 2433 unsigned i; 2434 2435 /* First, remove device from allocation groups: */ 2436 2437 for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++) 2438 clear_bit(ca->dev_idx, c->rw_devs[i].d); 2439 2440 /* 2441 * Capacity is calculated based off of devices in allocation groups: 2442 */ 2443 bch2_recalc_capacity(c); 2444 2445 bch2_open_buckets_stop(c, ca, false); 2446 2447 /* 2448 * Wake up threads that were blocked on allocation, so they can notice 2449 * the device can no longer be removed and the capacity has changed: 2450 */ 2451 closure_wake_up(&c->freelist_wait); 2452 2453 /* 2454 * journal_res_get() can block waiting for free space in the journal - 2455 * it needs to notice there may not be devices to allocate from anymore: 2456 */ 2457 wake_up(&c->journal.wait); 2458 2459 /* Now wait for any in flight writes: */ 2460 2461 closure_wait_event(&c->open_buckets_wait, 2462 !bch2_dev_has_open_write_point(c, ca)); 2463 } 2464 2465 /* device goes rw: */ 2466 void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca) 2467 { 2468 unsigned i; 2469 2470 for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++) 2471 if (ca->mi.data_allowed & (1 << i)) 2472 set_bit(ca->dev_idx, c->rw_devs[i].d); 2473 } 2474 2475 void bch2_dev_allocator_background_exit(struct bch_dev *ca) 2476 { 2477 darray_exit(&ca->discard_buckets_in_flight); 2478 } 2479 2480 void bch2_dev_allocator_background_init(struct bch_dev *ca) 2481 { 2482 mutex_init(&ca->discard_buckets_in_flight_lock); 2483 INIT_WORK(&ca->discard_work, bch2_do_discards_work); 2484 INIT_WORK(&ca->discard_fast_work, bch2_do_discards_fast_work); 2485 INIT_WORK(&ca->invalidate_work, bch2_do_invalidates_work); 2486 } 2487 2488 void bch2_fs_allocator_background_init(struct bch_fs *c) 2489 { 2490 spin_lock_init(&c->freelist_lock); 2491 } 2492