Lines Matching +full:a +full:- +full:c
1 // SPDX-License-Identifier: GPL-2.0
57 static inline u64 alloc_field_v1_get(const struct bch_alloc *a, in alloc_field_v1_get() argument
63 if (!(a->fields & (1 << field))) in alloc_field_v1_get()
91 const void *d = in->data; in bch2_alloc_unpack_v1()
94 out->gen = in->gen; in bch2_alloc_unpack_v1()
96 #define x(_name, _bits) out->_name = alloc_field_v1_get(in, &d, idx++); in bch2_alloc_unpack_v1()
104 struct bkey_s_c_alloc_v2 a = bkey_s_c_to_alloc_v2(k); in bch2_alloc_unpack_v2() local
105 const u8 *in = a.v->data; in bch2_alloc_unpack_v2()
106 const u8 *end = bkey_val_end(a); in bch2_alloc_unpack_v2()
111 out->gen = a.v->gen; in bch2_alloc_unpack_v2()
112 out->oldest_gen = a.v->oldest_gen; in bch2_alloc_unpack_v2()
113 out->data_type = a.v->data_type; in bch2_alloc_unpack_v2()
116 if (fieldnr < a.v->nr_fields) { \ in bch2_alloc_unpack_v2()
124 out->_name = v; \ in bch2_alloc_unpack_v2()
125 if (v != out->_name) \ in bch2_alloc_unpack_v2()
126 return -1; \ in bch2_alloc_unpack_v2()
137 struct bkey_s_c_alloc_v3 a = bkey_s_c_to_alloc_v3(k); in bch2_alloc_unpack_v3() local
138 const u8 *in = a.v->data; in bch2_alloc_unpack_v3()
139 const u8 *end = bkey_val_end(a); in bch2_alloc_unpack_v3()
144 out->gen = a.v->gen; in bch2_alloc_unpack_v3()
145 out->oldest_gen = a.v->oldest_gen; in bch2_alloc_unpack_v3()
146 out->data_type = a.v->data_type; in bch2_alloc_unpack_v3()
147 out->need_discard = BCH_ALLOC_V3_NEED_DISCARD(a.v); in bch2_alloc_unpack_v3()
148 out->need_inc_gen = BCH_ALLOC_V3_NEED_INC_GEN(a.v); in bch2_alloc_unpack_v3()
149 out->journal_seq = le64_to_cpu(a.v->journal_seq); in bch2_alloc_unpack_v3()
152 if (fieldnr < a.v->nr_fields) { \ in bch2_alloc_unpack_v3()
160 out->_name = v; \ in bch2_alloc_unpack_v3()
161 if (v != out->_name) \ in bch2_alloc_unpack_v3()
162 return -1; \ in bch2_alloc_unpack_v3()
174 switch (k.k->type) { in bch2_alloc_unpack()
189 static unsigned bch_alloc_v1_val_u64s(const struct bch_alloc *a) in bch_alloc_v1_val_u64s() argument
194 if (a->fields & (1 << i)) in bch_alloc_v1_val_u64s()
200 int bch2_alloc_v1_validate(struct bch_fs *c, struct bkey_s_c k, in bch2_alloc_v1_validate() argument
203 struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k); in bch2_alloc_v1_validate() local
207 bkey_fsck_err_on(bkey_val_u64s(a.k) < bch_alloc_v1_val_u64s(a.v), in bch2_alloc_v1_validate()
208 c, alloc_v1_val_size_bad, in bch2_alloc_v1_validate()
210 bkey_val_u64s(a.k), bch_alloc_v1_val_u64s(a.v)); in bch2_alloc_v1_validate()
215 int bch2_alloc_v2_validate(struct bch_fs *c, struct bkey_s_c k, in bch2_alloc_v2_validate() argument
222 c, alloc_v2_unpack_error, in bch2_alloc_v2_validate()
228 int bch2_alloc_v3_validate(struct bch_fs *c, struct bkey_s_c k, in bch2_alloc_v3_validate() argument
235 c, alloc_v2_unpack_error, in bch2_alloc_v3_validate()
241 int bch2_alloc_v4_validate(struct bch_fs *c, struct bkey_s_c k, in bch2_alloc_v4_validate() argument
244 struct bch_alloc_v4 a; in bch2_alloc_v4_validate() local
247 bkey_val_copy(&a, bkey_s_c_to_alloc_v4(k)); in bch2_alloc_v4_validate()
249 bkey_fsck_err_on(alloc_v4_u64s_noerror(&a) > bkey_val_u64s(k.k), in bch2_alloc_v4_validate()
250 c, alloc_v4_val_size_bad, in bch2_alloc_v4_validate()
252 alloc_v4_u64s_noerror(&a), bkey_val_u64s(k.k)); in bch2_alloc_v4_validate()
254 bkey_fsck_err_on(!BCH_ALLOC_V4_BACKPOINTERS_START(&a) && in bch2_alloc_v4_validate()
255 BCH_ALLOC_V4_NR_BACKPOINTERS(&a), in bch2_alloc_v4_validate()
256 c, alloc_v4_backpointers_start_bad, in bch2_alloc_v4_validate()
259 bkey_fsck_err_on(alloc_data_type(a, a.data_type) != a.data_type, in bch2_alloc_v4_validate()
260 c, alloc_key_data_type_bad, in bch2_alloc_v4_validate()
262 a.data_type, alloc_data_type(a, a.data_type)); in bch2_alloc_v4_validate()
265 bkey_fsck_err_on(a.io_time[i] > LRU_TIME_MAX, in bch2_alloc_v4_validate()
266 c, alloc_key_io_time_bad, in bch2_alloc_v4_validate()
269 a.io_time[i], LRU_TIME_MAX); in bch2_alloc_v4_validate()
271 unsigned stripe_sectors = BCH_ALLOC_V4_BACKPOINTERS_START(&a) * sizeof(u64) > in bch2_alloc_v4_validate()
273 ? a.stripe_sectors in bch2_alloc_v4_validate()
276 switch (a.data_type) { in bch2_alloc_v4_validate()
281 a.dirty_sectors || in bch2_alloc_v4_validate()
282 a.cached_sectors || in bch2_alloc_v4_validate()
283 a.stripe, in bch2_alloc_v4_validate()
284 c, alloc_key_empty_but_have_data, in bch2_alloc_v4_validate()
287 a.dirty_sectors, in bch2_alloc_v4_validate()
288 a.cached_sectors, in bch2_alloc_v4_validate()
289 a.stripe); in bch2_alloc_v4_validate()
296 bkey_fsck_err_on(!a.dirty_sectors && in bch2_alloc_v4_validate()
298 c, alloc_key_dirty_sectors_0, in bch2_alloc_v4_validate()
300 bch2_data_type_str(a.data_type)); in bch2_alloc_v4_validate()
303 bkey_fsck_err_on(!a.cached_sectors || in bch2_alloc_v4_validate()
304 a.dirty_sectors || in bch2_alloc_v4_validate()
306 a.stripe, in bch2_alloc_v4_validate()
307 c, alloc_key_cached_inconsistency, in bch2_alloc_v4_validate()
310 bkey_fsck_err_on(!a.io_time[READ] && in bch2_alloc_v4_validate()
311 c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_to_lru_refs, in bch2_alloc_v4_validate()
312 c, alloc_key_cached_but_read_time_zero, in bch2_alloc_v4_validate()
324 struct bch_alloc_v4 *a = bkey_s_to_alloc_v4(k).v; in bch2_alloc_v4_swab() local
326 a->journal_seq_nonempty = swab64(a->journal_seq_nonempty); in bch2_alloc_v4_swab()
327 a->journal_seq_empty = swab64(a->journal_seq_empty); in bch2_alloc_v4_swab()
328 a->flags = swab32(a->flags); in bch2_alloc_v4_swab()
329 a->dirty_sectors = swab32(a->dirty_sectors); in bch2_alloc_v4_swab()
330 a->cached_sectors = swab32(a->cached_sectors); in bch2_alloc_v4_swab()
331 a->io_time[0] = swab64(a->io_time[0]); in bch2_alloc_v4_swab()
332 a->io_time[1] = swab64(a->io_time[1]); in bch2_alloc_v4_swab()
333 a->stripe = swab32(a->stripe); in bch2_alloc_v4_swab()
334 a->nr_external_backpointers = swab32(a->nr_external_backpointers); in bch2_alloc_v4_swab()
335 a->stripe_sectors = swab32(a->stripe_sectors); in bch2_alloc_v4_swab()
338 void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k) in bch2_alloc_to_text() argument
341 const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &_a); in bch2_alloc_to_text() local
342 struct bch_dev *ca = c ? bch2_dev_bucket_tryget_noerror(c, k.k->p) : NULL; in bch2_alloc_to_text()
347 prt_printf(out, "gen %u oldest_gen %u data_type ", a->gen, a->oldest_gen); in bch2_alloc_to_text()
348 bch2_prt_data_type(out, a->data_type); in bch2_alloc_to_text()
350 prt_printf(out, "journal_seq_nonempty %llu\n", a->journal_seq_nonempty); in bch2_alloc_to_text()
351 prt_printf(out, "journal_seq_empty %llu\n", a->journal_seq_empty); in bch2_alloc_to_text()
352 prt_printf(out, "need_discard %llu\n", BCH_ALLOC_V4_NEED_DISCARD(a)); in bch2_alloc_to_text()
353 prt_printf(out, "need_inc_gen %llu\n", BCH_ALLOC_V4_NEED_INC_GEN(a)); in bch2_alloc_to_text()
354 prt_printf(out, "dirty_sectors %u\n", a->dirty_sectors); in bch2_alloc_to_text()
355 prt_printf(out, "stripe_sectors %u\n", a->stripe_sectors); in bch2_alloc_to_text()
356 prt_printf(out, "cached_sectors %u\n", a->cached_sectors); in bch2_alloc_to_text()
357 prt_printf(out, "stripe %u\n", a->stripe); in bch2_alloc_to_text()
358 prt_printf(out, "stripe_redundancy %u\n", a->stripe_redundancy); in bch2_alloc_to_text()
359 prt_printf(out, "io_time[READ] %llu\n", a->io_time[READ]); in bch2_alloc_to_text()
360 prt_printf(out, "io_time[WRITE] %llu\n", a->io_time[WRITE]); in bch2_alloc_to_text()
363 prt_printf(out, "fragmentation %llu\n", alloc_lru_idx_fragmentation(*a, ca)); in bch2_alloc_to_text()
364 prt_printf(out, "bp_start %llu\n", BCH_ALLOC_V4_BACKPOINTERS_START(a)); in bch2_alloc_to_text()
372 if (k.k->type == KEY_TYPE_alloc_v4) { in __bch2_alloc_to_v4()
382 memset(src, 0, dst - src); in __bch2_alloc_to_v4()
415 if (k.k->type == KEY_TYPE_alloc_v4) { in __bch2_alloc_to_v4_mut()
418 bkey_reassemble(&ret->k_i, k); in __bch2_alloc_to_v4_mut()
420 src = alloc_v4_backpointers(&ret->v); in __bch2_alloc_to_v4_mut()
421 SET_BCH_ALLOC_V4_BACKPOINTERS_START(&ret->v, BCH_ALLOC_V4_U64s); in __bch2_alloc_to_v4_mut()
422 dst = alloc_v4_backpointers(&ret->v); in __bch2_alloc_to_v4_mut()
425 memset(src, 0, dst - src); in __bch2_alloc_to_v4_mut()
427 SET_BCH_ALLOC_V4_NR_BACKPOINTERS(&ret->v, 0); in __bch2_alloc_to_v4_mut()
430 bkey_alloc_v4_init(&ret->k_i); in __bch2_alloc_to_v4_mut()
431 ret->k.p = k.k->p; in __bch2_alloc_to_v4_mut()
432 bch2_alloc_to_v4(k, &ret->v); in __bch2_alloc_to_v4_mut()
439 struct bkey_s_c_alloc_v4 a; in bch2_alloc_to_v4_mut_inlined() local
441 if (likely(k.k->type == KEY_TYPE_alloc_v4) && in bch2_alloc_to_v4_mut_inlined()
442 ((a = bkey_s_c_to_alloc_v4(k), true) && in bch2_alloc_to_v4_mut_inlined()
443 BCH_ALLOC_V4_NR_BACKPOINTERS(a.v) == 0)) in bch2_alloc_to_v4_mut_inlined()
466 struct bkey_i_alloc_v4 *a = bch2_alloc_to_v4_mut_inlined(trans, k); in bch2_trans_start_alloc_update_noupdate() local
467 ret = PTR_ERR_OR_ZERO(a); in bch2_trans_start_alloc_update_noupdate()
470 return a; in bch2_trans_start_alloc_update_noupdate()
481 struct bkey_i_alloc_v4 *a = bch2_trans_start_alloc_update_noupdate(trans, &iter, pos); in bch2_trans_start_alloc_update() local
482 int ret = PTR_ERR_OR_ZERO(a); in bch2_trans_start_alloc_update()
486 ret = bch2_trans_update(trans, &iter, &a->k_i, flags); in bch2_trans_start_alloc_update()
488 return unlikely(ret) ? ERR_PTR(ret) : a; in bch2_trans_start_alloc_update()
508 return k.k->type == KEY_TYPE_bucket_gens in alloc_gen()
509 ? bkey_s_c_to_bucket_gens(k).v->gens[offset] in alloc_gen()
513 int bch2_bucket_gens_validate(struct bch_fs *c, struct bkey_s_c k, in bch2_bucket_gens_validate() argument
519 c, bucket_gens_val_size_bad, in bch2_bucket_gens_validate()
526 void bch2_bucket_gens_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k) in bch2_bucket_gens_to_text() argument
531 for (i = 0; i < ARRAY_SIZE(g.v->gens); i++) { in bch2_bucket_gens_to_text()
534 prt_printf(out, "%u", g.v->gens[i]); in bch2_bucket_gens_to_text()
538 int bch2_bucket_gens_init(struct bch_fs *c) in bch2_bucket_gens_init() argument
540 struct btree_trans *trans = bch2_trans_get(c); in bch2_bucket_gens_init()
548 * Not a fsck error because this is checked/repaired by in bch2_bucket_gens_init()
551 if (!bch2_dev_bucket_exists(c, k.k->p)) in bch2_bucket_gens_init()
554 struct bch_alloc_v4 a; in bch2_bucket_gens_init()
555 u8 gen = bch2_alloc_to_v4(k, &a)->gen; in bch2_bucket_gens_init()
586 bch_err_fn(c, ret); in bch2_bucket_gens_init()
590 int bch2_alloc_read(struct bch_fs *c) in bch2_alloc_read() argument
592 struct btree_trans *trans = bch2_trans_get(c); in bch2_alloc_read()
596 if (c->sb.version_upgrade_complete >= bcachefs_metadata_version_bucket_gens) { in bch2_alloc_read()
599 u64 start = bucket_gens_pos_to_alloc(k.k->p, 0).offset; in bch2_alloc_read()
600 u64 end = bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0).offset; in bch2_alloc_read()
602 if (k.k->type != KEY_TYPE_bucket_gens) in bch2_alloc_read()
605 ca = bch2_dev_iterate(c, ca, k.k->p.inode); in bch2_alloc_read()
607 * Not a fsck error because this is checked/repaired by in bch2_alloc_read()
611 bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0)); in bch2_alloc_read()
617 for (u64 b = max_t(u64, ca->mi.first_bucket, start); in bch2_alloc_read()
618 b < min_t(u64, ca->mi.nbuckets, end); in bch2_alloc_read()
620 *bucket_gen(ca, b) = g->gens[b & KEY_TYPE_BUCKET_GENS_MASK]; in bch2_alloc_read()
626 ca = bch2_dev_iterate(c, ca, k.k->p.inode); in bch2_alloc_read()
628 * Not a fsck error because this is checked/repaired by in bch2_alloc_read()
632 bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0)); in bch2_alloc_read()
636 if (k.k->p.offset < ca->mi.first_bucket) { in bch2_alloc_read()
637 bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode, ca->mi.first_bucket)); in bch2_alloc_read()
641 if (k.k->p.offset >= ca->mi.nbuckets) { in bch2_alloc_read()
642 bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0)); in bch2_alloc_read()
646 struct bch_alloc_v4 a; in bch2_alloc_read()
647 *bucket_gen(ca, k.k->p.offset) = bch2_alloc_to_v4(k, &a)->gen; in bch2_alloc_read()
655 bch_err_fn(c, ret); in bch2_alloc_read()
665 struct bch_fs *c = trans->c; in __need_discard_or_freespace_err() local
673 bch2_bkey_val_to_text(&buf, c, alloc_k); in __need_discard_or_freespace_err()
681 if (ret == -BCH_ERR_fsck_ignore || in __need_discard_or_freespace_err()
682 ret == -BCH_ERR_fsck_errors_not_fixed) in __need_discard_or_freespace_err()
698 const struct bch_alloc_v4 *a, in bch2_bucket_do_index() argument
704 if (a->data_type != BCH_DATA_free && in bch2_bucket_do_index()
705 a->data_type != BCH_DATA_need_discard) in bch2_bucket_do_index()
708 switch (a->data_type) { in bch2_bucket_do_index()
711 pos = alloc_freespace_pos(alloc_k.k->p, *a); in bch2_bucket_do_index()
715 pos = alloc_k.k->p; in bch2_bucket_do_index()
727 need_discard_or_freespace_err_on(ca->mi.freespace_initialized && in bch2_bucket_do_index()
728 !old.k->type != set, in bch2_bucket_do_index()
760 if (k.k->type != KEY_TYPE_bucket_gens) { in bch2_bucket_gen_update()
761 bkey_bucket_gens_init(&g->k_i); in bch2_bucket_gen_update()
762 g->k.p = iter.pos; in bch2_bucket_gen_update()
764 bkey_reassemble(&g->k_i, k); in bch2_bucket_gen_update()
767 g->v.gens[offset] = gen; in bch2_bucket_gen_update()
769 ret = bch2_trans_update(trans, &iter, &g->k_i, 0); in bch2_bucket_gen_update()
782 .dev_data_type.dev = ca->dev_idx, in bch2_dev_data_type_accounting_mod()
797 if (old->data_type != new->data_type) { in bch2_alloc_key_to_dev_counters()
798 int ret = bch2_dev_data_type_accounting_mod(trans, ca, new->data_type, in bch2_alloc_key_to_dev_counters()
800 bch2_dev_data_type_accounting_mod(trans, ca, old->data_type, in bch2_alloc_key_to_dev_counters()
801 -1, -old_sectors, -bch2_bucket_sectors_fragmented(ca, *old), flags); in bch2_alloc_key_to_dev_counters()
805 int ret = bch2_dev_data_type_accounting_mod(trans, ca, new->data_type, in bch2_alloc_key_to_dev_counters()
807 new_sectors - old_sectors, in bch2_alloc_key_to_dev_counters()
808 bch2_bucket_sectors_fragmented(ca, *new) - in bch2_alloc_key_to_dev_counters()
818 !!new_unstriped - !!old_unstriped, in bch2_alloc_key_to_dev_counters()
819 new_unstriped - old_unstriped, in bch2_alloc_key_to_dev_counters()
834 struct bch_fs *c = trans->c; in bch2_trigger_alloc() local
838 struct bch_dev *ca = bch2_dev_bucket_tryget(c, new.k->p); in bch2_trigger_alloc()
840 return -EIO; in bch2_trigger_alloc()
846 if (likely(new.k->type == KEY_TYPE_alloc_v4)) { in bch2_trigger_alloc()
855 new_a = &new_ka->v; in bch2_trigger_alloc()
859 alloc_data_type_set(new_a, new_a->data_type); in bch2_trigger_alloc()
861 int is_empty_delta = (int) data_type_is_empty(new_a->data_type) - in bch2_trigger_alloc()
862 (int) data_type_is_empty(old_a->data_type); in bch2_trigger_alloc()
865 new_a->io_time[READ] = bch2_current_io_time(c, READ); in bch2_trigger_alloc()
866 new_a->io_time[WRITE]= bch2_current_io_time(c, WRITE); in bch2_trigger_alloc()
871 if (data_type_is_empty(new_a->data_type) && in bch2_trigger_alloc()
873 !bch2_bucket_is_open_safe(c, new.k->p.inode, new.k->p.offset)) { in bch2_trigger_alloc()
874 new_a->gen++; in bch2_trigger_alloc()
876 alloc_data_type_set(new_a, new_a->data_type); in bch2_trigger_alloc()
879 if (old_a->data_type != new_a->data_type || in bch2_trigger_alloc()
880 (new_a->data_type == BCH_DATA_free && in bch2_trigger_alloc()
888 if (new_a->data_type == BCH_DATA_cached && in bch2_trigger_alloc()
889 !new_a->io_time[READ]) in bch2_trigger_alloc()
890 new_a->io_time[READ] = bch2_current_io_time(c, READ); in bch2_trigger_alloc()
895 ret = bch2_lru_change(trans, new.k->p.inode, in bch2_trigger_alloc()
896 bucket_to_u64(new.k->p), in bch2_trigger_alloc()
907 bucket_to_u64(new.k->p), in bch2_trigger_alloc()
913 if (old_a->gen != new_a->gen) { in bch2_trigger_alloc()
914 ret = bch2_bucket_gen_update(trans, new.k->p, new_a->gen); in bch2_trigger_alloc()
920 old_a->cached_sectors) { in bch2_trigger_alloc()
921 ret = bch2_mod_dev_cached_sectors(trans, ca->dev_idx, in bch2_trigger_alloc()
922 -((s64) old_a->cached_sectors), in bch2_trigger_alloc()
934 u64 transaction_seq = trans->journal_res.seq; in bch2_trigger_alloc()
937 if (log_fsck_err_on(transaction_seq && new_a->journal_seq_nonempty > transaction_seq, in bch2_trigger_alloc()
940 journal_cur_seq(&c->journal), in bch2_trigger_alloc()
941 (bch2_bkey_val_to_text(&buf, c, new.s_c), buf.buf))) in bch2_trigger_alloc()
942 new_a->journal_seq_nonempty = transaction_seq; in bch2_trigger_alloc()
944 int is_empty_delta = (int) data_type_is_empty(new_a->data_type) - in bch2_trigger_alloc()
945 (int) data_type_is_empty(old_a->data_type); in bch2_trigger_alloc()
948 * Record journal sequence number of empty -> nonempty transition: in bch2_trigger_alloc()
949 * Note that there may be multiple empty -> nonempty in bch2_trigger_alloc()
950 * transitions, data in a bucket may be overwritten while we're in bch2_trigger_alloc()
951 * still writing to it - so be careful to only record the first: in bch2_trigger_alloc()
954 new_a->journal_seq_empty <= c->journal.flushed_seq_ondisk) { in bch2_trigger_alloc()
955 new_a->journal_seq_nonempty = transaction_seq; in bch2_trigger_alloc()
956 new_a->journal_seq_empty = 0; in bch2_trigger_alloc()
960 * Bucket becomes empty: mark it as waiting for a journal flush, in bch2_trigger_alloc()
961 * unless updates since empty -> nonempty transition were never in bch2_trigger_alloc()
962 * flushed - we may need to ask the journal not to flush in bch2_trigger_alloc()
966 if (new_a->journal_seq_nonempty == transaction_seq || in bch2_trigger_alloc()
967 bch2_journal_noflush_seq(&c->journal, in bch2_trigger_alloc()
968 new_a->journal_seq_nonempty, in bch2_trigger_alloc()
970 new_a->journal_seq_nonempty = new_a->journal_seq_empty = 0; in bch2_trigger_alloc()
972 new_a->journal_seq_empty = transaction_seq; in bch2_trigger_alloc()
974 ret = bch2_set_bucket_needs_journal_commit(&c->buckets_waiting_for_journal, in bch2_trigger_alloc()
975 c->journal.flushed_seq_ondisk, in bch2_trigger_alloc()
976 new.k->p.inode, new.k->p.offset, in bch2_trigger_alloc()
978 if (bch2_fs_fatal_err_on(ret, c, in bch2_trigger_alloc()
985 if (new_a->gen != old_a->gen) { in bch2_trigger_alloc()
987 u8 *gen = bucket_gen(ca, new.k->p.offset); in bch2_trigger_alloc()
992 *gen = new_a->gen; in bch2_trigger_alloc()
996 #define eval_state(_a, expr) ({ const struct bch_alloc_v4 *a = _a; expr; }) in bch2_trigger_alloc()
998 #define bucket_flushed(a) (a->journal_seq_empty <= c->journal.flushed_seq_ondisk) in bch2_trigger_alloc() argument
1000 if (statechange(a->data_type == BCH_DATA_free) && in bch2_trigger_alloc()
1002 closure_wake_up(&c->freelist_wait); in bch2_trigger_alloc()
1004 if (statechange(a->data_type == BCH_DATA_need_discard) && in bch2_trigger_alloc()
1005 !bch2_bucket_is_open_safe(c, new.k->p.inode, new.k->p.offset) && in bch2_trigger_alloc()
1007 bch2_discard_one_bucket_fast(ca, new.k->p.offset); in bch2_trigger_alloc()
1009 if (statechange(a->data_type == BCH_DATA_cached) && in bch2_trigger_alloc()
1010 !bch2_bucket_is_open(c, new.k->p.inode, new.k->p.offset) && in bch2_trigger_alloc()
1014 if (statechange(a->data_type == BCH_DATA_need_gc_gens)) in bch2_trigger_alloc()
1015 bch2_gc_gens_async(c); in bch2_trigger_alloc()
1020 struct bucket *g = gc_bucket(ca, new.k->p.offset); in bch2_trigger_alloc()
1025 g->gen_valid = 1; in bch2_trigger_alloc()
1026 g->gen = new_a->gen; in bch2_trigger_alloc()
1035 bch2_fs_inconsistent(c, "reference to invalid bucket\n %s", in bch2_trigger_alloc()
1036 (bch2_bkey_val_to_text(&buf, c, new.s_c), buf.buf)); in bch2_trigger_alloc()
1037 ret = -EIO; in bch2_trigger_alloc()
1043 * extents style btrees, but works on non-extents btrees:
1052 if (k.k->type) { in bch2_get_key_or_hole()
1060 struct btree_path *path = btree_iter_path(iter->trans, iter); in bch2_get_key_or_hole()
1061 if (!bpos_eq(path->l[0].b->key.k.p, SPOS_MAX)) in bch2_get_key_or_hole()
1062 end = bkey_min(end, bpos_nosnap_successor(path->l[0].b->key.k.p)); in bch2_get_key_or_hole()
1064 end = bkey_min(end, POS(iter->pos.inode, iter->pos.offset + U32_MAX - 1)); in bch2_get_key_or_hole()
1067 * btree node min/max is a closed interval, upto takes a half in bch2_get_key_or_hole()
1072 bch2_trans_iter_exit(iter->trans, &iter2); in bch2_get_key_or_hole()
1074 BUG_ON(next.offset >= iter->pos.offset + U32_MAX); in bch2_get_key_or_hole()
1080 hole->p = iter->pos; in bch2_get_key_or_hole()
1082 bch2_key_resize(hole, next.offset - iter->pos.offset); in bch2_get_key_or_hole()
1087 static bool next_bucket(struct bch_fs *c, struct bch_dev **ca, struct bpos *bucket) in next_bucket() argument
1090 if (bucket->offset < (*ca)->mi.first_bucket) in next_bucket()
1091 bucket->offset = (*ca)->mi.first_bucket; in next_bucket()
1093 if (bucket->offset < (*ca)->mi.nbuckets) in next_bucket()
1098 bucket->inode++; in next_bucket()
1099 bucket->offset = 0; in next_bucket()
1103 *ca = __bch2_next_dev_idx(c, bucket->inode, NULL); in next_bucket()
1105 *bucket = POS((*ca)->dev_idx, (*ca)->mi.first_bucket); in next_bucket()
1116 struct bch_fs *c = iter->trans->c; in bch2_get_key_or_real_bucket_hole() local
1123 *ca = bch2_dev_iterate_noerror(c, *ca, k.k->p.inode); in bch2_get_key_or_real_bucket_hole()
1125 if (!k.k->type) { in bch2_get_key_or_real_bucket_hole()
1129 if (!next_bucket(c, ca, &hole_start)) in bch2_get_key_or_real_bucket_hole()
1136 if (k.k->p.offset > (*ca)->mi.nbuckets) in bch2_get_key_or_real_bucket_hole()
1137 bch2_key_resize(hole, (*ca)->mi.nbuckets - hole_start.offset); in bch2_get_key_or_real_bucket_hole()
1151 struct bch_fs *c = trans->c; in bch2_check_alloc_key() local
1153 const struct bch_alloc_v4 *a; in bch2_check_alloc_key() local
1159 struct bch_dev *ca = bch2_dev_bucket_tryget_noerror(c, alloc_k.k->p); in bch2_check_alloc_key()
1163 alloc_k.k->p.inode, alloc_k.k->p.offset)) in bch2_check_alloc_key()
1168 if (!ca->mi.freespace_initialized) in bch2_check_alloc_key()
1171 a = bch2_alloc_to_v4(alloc_k, &a_convert); in bch2_check_alloc_key()
1173 bch2_btree_iter_set_pos(discard_iter, alloc_k.k->p); in bch2_check_alloc_key()
1179 bool is_discarded = a->data_type == BCH_DATA_need_discard; in bch2_check_alloc_key()
1180 if (need_discard_or_freespace_err_on(!!k.k->type != is_discarded, in bch2_check_alloc_key()
1187 bch2_btree_iter_set_pos(freespace_iter, alloc_freespace_pos(alloc_k.k->p, *a)); in bch2_check_alloc_key()
1193 bool is_free = a->data_type == BCH_DATA_free; in bch2_check_alloc_key()
1194 if (need_discard_or_freespace_err_on(!!k.k->type != is_free, in bch2_check_alloc_key()
1201 bch2_btree_iter_set_pos(bucket_gens_iter, alloc_gens_pos(alloc_k.k->p, &gens_offset)); in bch2_check_alloc_key()
1207 if (fsck_err_on(a->gen != alloc_gen(k, gens_offset), in bch2_check_alloc_key()
1211 alloc_gen(k, gens_offset), a->gen, in bch2_check_alloc_key()
1213 bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) { in bch2_check_alloc_key()
1221 if (k.k->type == KEY_TYPE_bucket_gens) { in bch2_check_alloc_key()
1222 bkey_reassemble(&g->k_i, k); in bch2_check_alloc_key()
1224 bkey_bucket_gens_init(&g->k_i); in bch2_check_alloc_key()
1225 g->k.p = alloc_gens_pos(alloc_k.k->p, &gens_offset); in bch2_check_alloc_key()
1228 g->v.gens[gens_offset] = a->gen; in bch2_check_alloc_key()
1230 ret = bch2_trans_update(trans, bucket_gens_iter, &g->k_i, 0); in bch2_check_alloc_key()
1253 if (!ca->mi.freespace_initialized) in bch2_check_alloc_hole_freespace()
1263 *end = bkey_min(k.k->p, *end); in bch2_check_alloc_hole_freespace()
1265 if (fsck_err_on(k.k->type != KEY_TYPE_set, in bch2_check_alloc_hole_freespace()
1268 " device %llu buckets %llu-%llu", in bch2_check_alloc_hole_freespace()
1269 freespace_iter->pos.inode, in bch2_check_alloc_hole_freespace()
1270 freespace_iter->pos.offset, in bch2_check_alloc_hole_freespace()
1271 end->offset)) { in bch2_check_alloc_hole_freespace()
1279 bkey_init(&update->k); in bch2_check_alloc_hole_freespace()
1280 update->k.type = KEY_TYPE_set; in bch2_check_alloc_hole_freespace()
1281 update->k.p = freespace_iter->pos; in bch2_check_alloc_hole_freespace()
1282 bch2_key_resize(&update->k, in bch2_check_alloc_hole_freespace()
1283 min_t(u64, U32_MAX, end->offset - in bch2_check_alloc_hole_freespace()
1284 freespace_iter->pos.offset)); in bch2_check_alloc_hole_freespace()
1318 if (k.k->type == KEY_TYPE_bucket_gens) { in bch2_check_alloc_hole_bucket_gens()
1328 bucket_gens_pos_to_alloc(k.k->p, i).inode, in bch2_check_alloc_hole_bucket_gens()
1329 bucket_gens_pos_to_alloc(k.k->p, i).offset, in bch2_check_alloc_hole_bucket_gens()
1351 *end = bkey_min(*end, bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0)); in bch2_check_alloc_hole_bucket_gens()
1360 struct bch_fs *c; member
1373 ret = k.k->type != KEY_TYPE_set in bch2_recheck_discard_freespace_key()
1385 bch2_trans_do(w->c, bch2_recheck_discard_freespace_key(trans, w->pos)); in check_discard_freespace_key_work()
1386 bch2_write_ref_put(w->c, BCH_WRITE_REF_check_discard_freespace_key); in check_discard_freespace_key_work()
1393 struct bch_fs *c = trans->c; in bch2_check_discard_freespace_key() local
1394 enum bch_data_type state = iter->btree_id == BTREE_ID_need_discard in bch2_check_discard_freespace_key()
1399 struct bpos bucket = iter->pos; in bch2_check_discard_freespace_key()
1401 u64 genbits = iter->pos.offset & (~0ULL << 56); in bch2_check_discard_freespace_key()
1411 if (!bch2_dev_bucket_exists(c, bucket)) { in bch2_check_discard_freespace_key()
1414 bch2_btree_id_str(iter->btree_id), bucket.inode, bucket.offset)) in bch2_check_discard_freespace_key()
1421 const struct bch_alloc_v4 *a = bch2_alloc_to_v4(alloc_k, &a_convert); in bch2_check_discard_freespace_key() local
1423 if (a->data_type != state || in bch2_check_discard_freespace_key()
1425 genbits != alloc_freespace_genbits(*a))) { in bch2_check_discard_freespace_key()
1428 (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf), in bch2_check_discard_freespace_key()
1429 bch2_btree_id_str(iter->btree_id), in bch2_check_discard_freespace_key()
1430 iter->pos.inode, in bch2_check_discard_freespace_key()
1431 iter->pos.offset, in bch2_check_discard_freespace_key()
1432 a->data_type == state, in bch2_check_discard_freespace_key()
1433 genbits >> 56, alloc_freespace_genbits(*a) >> 56)) in bch2_check_discard_freespace_key()
1439 *gen = a->gen; in bch2_check_discard_freespace_key()
1451 -BCH_ERR_transaction_restart_commit; in bch2_check_discard_freespace_key()
1463 if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_check_discard_freespace_key)) { in bch2_check_discard_freespace_key()
1468 INIT_WORK(&w->work, check_discard_freespace_key_work); in bch2_check_discard_freespace_key()
1469 w->c = c; in bch2_check_discard_freespace_key()
1470 w->pos = BBPOS(iter->btree_id, iter->pos); in bch2_check_discard_freespace_key()
1471 queue_work(c->write_ref_wq, &w->work); in bch2_check_discard_freespace_key()
1493 struct bch_fs *c = trans->c; in bch2_check_bucket_gens_key() local
1495 u64 start = bucket_gens_pos_to_alloc(k.k->p, 0).offset; in bch2_check_bucket_gens_key()
1496 u64 end = bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0).offset; in bch2_check_bucket_gens_key()
1502 BUG_ON(k.k->type != KEY_TYPE_bucket_gens); in bch2_check_bucket_gens_key()
1505 struct bch_dev *ca = bch2_dev_tryget_noerror(c, k.k->p.inode); in bch2_check_bucket_gens_key()
1509 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) in bch2_check_bucket_gens_key()
1514 if (fsck_err_on(end <= ca->mi.first_bucket || in bch2_check_bucket_gens_key()
1515 start >= ca->mi.nbuckets, in bch2_check_bucket_gens_key()
1518 (bch2_bkey_val_to_text(&buf, c, k), buf.buf))) { in bch2_check_bucket_gens_key()
1523 for (b = start; b < ca->mi.first_bucket; b++) in bch2_check_bucket_gens_key()
1531 for (b = ca->mi.nbuckets; b < end; b++) in bch2_check_bucket_gens_key()
1556 int bch2_check_alloc_info(struct bch_fs *c) in bch2_check_alloc_info() argument
1558 struct btree_trans *trans = bch2_trans_get(c); in bch2_check_alloc_info()
1587 if (k.k->type) { in bch2_check_alloc_info()
1588 next = bpos_nosnap_successor(k.k->p); in bch2_check_alloc_info()
1598 next = k.k->p; in bch2_check_alloc_info()
1657 bch2_bkey_val_to_text(&buf, c, k); in bch2_check_alloc_info()
1659 bch_err(c, "while checking %s", buf.buf); in bch2_check_alloc_info()
1677 bch_err_fn(c, ret); in bch2_check_alloc_info()
1685 struct bch_fs *c = trans->c; in bch2_check_alloc_to_lru_ref() local
1687 const struct bch_alloc_v4 *a; in bch2_check_alloc_to_lru_ref() local
1700 struct bch_dev *ca = bch2_dev_tryget_noerror(c, alloc_k.k->p.inode); in bch2_check_alloc_to_lru_ref()
1704 a = bch2_alloc_to_v4(alloc_k, &a_convert); in bch2_check_alloc_to_lru_ref()
1706 u64 lru_idx = alloc_lru_idx_fragmentation(*a, ca); in bch2_check_alloc_to_lru_ref()
1714 if (a->data_type != BCH_DATA_cached) in bch2_check_alloc_to_lru_ref()
1717 if (fsck_err_on(!a->io_time[READ], in bch2_check_alloc_to_lru_ref()
1722 bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) { in bch2_check_alloc_to_lru_ref()
1729 a_mut->v.io_time[READ] = bch2_current_io_time(c, READ); in bch2_check_alloc_to_lru_ref()
1731 &a_mut->k_i, BTREE_TRIGGER_norun); in bch2_check_alloc_to_lru_ref()
1735 a = &a_mut->v; in bch2_check_alloc_to_lru_ref()
1738 ret = bch2_lru_check_set(trans, alloc_k.k->p.inode, a->io_time[READ], in bch2_check_alloc_to_lru_ref()
1749 int bch2_check_alloc_to_lru_refs(struct bch_fs *c) in bch2_check_alloc_to_lru_refs() argument
1754 bkey_init(&last_flushed.k->k); in bch2_check_alloc_to_lru_refs()
1756 int ret = bch2_trans_run(c, in bch2_check_alloc_to_lru_refs()
1762 bch2_bkey_buf_exit(&last_flushed, c); in bch2_check_alloc_to_lru_refs()
1763 bch_err_fn(c, ret); in bch2_check_alloc_to_lru_refs()
1771 mutex_lock(&ca->discard_buckets_in_flight_lock); in discard_in_flight_add()
1772 darray_for_each(ca->discard_buckets_in_flight, i) in discard_in_flight_add()
1773 if (i->bucket == bucket) { in discard_in_flight_add()
1774 ret = -BCH_ERR_EEXIST_discard_in_flight_add; in discard_in_flight_add()
1778 ret = darray_push(&ca->discard_buckets_in_flight, ((struct discard_in_flight) { in discard_in_flight_add()
1783 mutex_unlock(&ca->discard_buckets_in_flight_lock); in discard_in_flight_add()
1789 mutex_lock(&ca->discard_buckets_in_flight_lock); in discard_in_flight_remove()
1790 darray_for_each(ca->discard_buckets_in_flight, i) in discard_in_flight_remove()
1791 if (i->bucket == bucket) { in discard_in_flight_remove()
1792 BUG_ON(!i->in_progress); in discard_in_flight_remove()
1793 darray_remove_item(&ca->discard_buckets_in_flight, i); in discard_in_flight_remove()
1798 mutex_unlock(&ca->discard_buckets_in_flight_lock); in discard_in_flight_remove()
1815 struct bch_fs *c = trans->c; in bch2_discard_one_bucket() local
1816 struct bpos pos = need_discard_iter->pos; in bch2_discard_one_bucket()
1819 struct bkey_i_alloc_v4 *a; in bch2_discard_one_bucket() local
1824 if (bch2_bucket_is_open_safe(c, pos.inode, pos.offset)) { in bch2_discard_one_bucket()
1825 s->open++; in bch2_discard_one_bucket()
1829 u64 seq_ready = bch2_bucket_journal_seq_ready(&c->buckets_waiting_for_journal, in bch2_discard_one_bucket()
1831 if (seq_ready > c->journal.flushed_seq_ondisk) { in bch2_discard_one_bucket()
1832 if (seq_ready > c->journal.flushing_seq) in bch2_discard_one_bucket()
1833 s->need_journal_commit++; in bch2_discard_one_bucket()
1838 need_discard_iter->pos, in bch2_discard_one_bucket()
1844 a = bch2_alloc_to_v4_mut(trans, k); in bch2_discard_one_bucket()
1845 ret = PTR_ERR_OR_ZERO(a); in bch2_discard_one_bucket()
1849 if (a->v.data_type != BCH_DATA_need_discard) { in bch2_discard_one_bucket()
1868 s->discarded++; in bch2_discard_one_bucket()
1871 if (ca->mi.discard && !c->opts.nochanges) { in bch2_discard_one_bucket()
1877 blkdev_issue_discard(ca->disk_sb.bdev, in bch2_discard_one_bucket()
1878 k.k->p.offset * ca->mi.bucket_size, in bch2_discard_one_bucket()
1879 ca->mi.bucket_size, in bch2_discard_one_bucket()
1887 SET_BCH_ALLOC_V4_NEED_DISCARD(&a->v, false); in bch2_discard_one_bucket()
1888 alloc_data_type_set(&a->v, a->v.data_type); in bch2_discard_one_bucket()
1890 ret = bch2_trans_update(trans, &iter, &a->k_i, 0); in bch2_discard_one_bucket()
1900 count_event(c, bucket_discard); in bch2_discard_one_bucket()
1906 s->seen++; in bch2_discard_one_bucket()
1915 struct bch_fs *c = ca->fs; in bch2_do_discards_work() local
1925 ret = bch2_trans_run(c, in bch2_do_discards_work()
1928 POS(ca->dev_idx, 0), in bch2_do_discards_work()
1929 POS(ca->dev_idx, U64_MAX), 0, k, in bch2_do_discards_work()
1933 bch2_journal_flush_async(&c->journal, NULL); in bch2_do_discards_work()
1935 trace_discard_buckets(c, s.seen, s.open, s.need_journal_commit, s.discarded, in bch2_do_discards_work()
1938 percpu_ref_put(&ca->io_ref); in bch2_do_discards_work()
1939 bch2_write_ref_put(c, BCH_WRITE_REF_discard); in bch2_do_discards_work()
1944 struct bch_fs *c = ca->fs; in bch2_dev_do_discards() local
1946 if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_discard)) in bch2_dev_do_discards()
1949 if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE)) in bch2_dev_do_discards()
1952 if (queue_work(c->write_ref_wq, &ca->discard_work)) in bch2_dev_do_discards()
1955 percpu_ref_put(&ca->io_ref); in bch2_dev_do_discards()
1957 bch2_write_ref_put(c, BCH_WRITE_REF_discard); in bch2_dev_do_discards()
1960 void bch2_do_discards(struct bch_fs *c) in bch2_do_discards() argument
1962 for_each_member_device(c, ca) in bch2_do_discards()
1974 BTREE_ID_need_discard, POS(ca->dev_idx, bucket), 0); in bch2_do_discards_fast_one()
1979 if (log_fsck_err_on(discard_k.k->type != KEY_TYPE_set, in bch2_do_discards_fast_one()
1982 ca->dev_idx, bucket)) in bch2_do_discards_fast_one()
1995 struct bch_fs *c = ca->fs; in bch2_do_discards_fast_work() local
1998 struct btree_trans *trans = bch2_trans_get(c); in bch2_do_discards_fast_work()
2005 mutex_lock(&ca->discard_buckets_in_flight_lock); in bch2_do_discards_fast_work()
2006 darray_for_each(ca->discard_buckets_in_flight, i) { in bch2_do_discards_fast_work()
2007 if (i->in_progress) in bch2_do_discards_fast_work()
2011 bucket = i->bucket; in bch2_do_discards_fast_work()
2012 i->in_progress = true; in bch2_do_discards_fast_work()
2015 mutex_unlock(&ca->discard_buckets_in_flight_lock); in bch2_do_discards_fast_work()
2022 bch_err_fn(c, ret); in bch2_do_discards_fast_work()
2030 …trace_discard_buckets_fast(c, s.seen, s.open, s.need_journal_commit, s.discarded, bch2_err_str(ret… in bch2_do_discards_fast_work()
2033 percpu_ref_put(&ca->io_ref); in bch2_do_discards_fast_work()
2034 bch2_write_ref_put(c, BCH_WRITE_REF_discard_fast); in bch2_do_discards_fast_work()
2039 struct bch_fs *c = ca->fs; in bch2_discard_one_bucket_fast() local
2044 if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_discard_fast)) in bch2_discard_one_bucket_fast()
2047 if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE)) in bch2_discard_one_bucket_fast()
2050 if (queue_work(c->write_ref_wq, &ca->discard_fast_work)) in bch2_discard_one_bucket_fast()
2053 percpu_ref_put(&ca->io_ref); in bch2_discard_one_bucket_fast()
2055 bch2_write_ref_put(c, BCH_WRITE_REF_discard_fast); in bch2_discard_one_bucket_fast()
2063 struct bch_fs *c = trans->c; in invalidate_one_bucket() local
2064 struct bkey_i_alloc_v4 *a = NULL; in invalidate_one_bucket() local
2066 struct bpos bucket = u64_to_bucket(lru_k.k->p.offset); in invalidate_one_bucket()
2073 if (!bch2_dev_bucket_exists(c, bucket)) { in invalidate_one_bucket()
2077 return bch2_btree_bit_mod_buffered(trans, BTREE_ID_lru, lru_iter->pos, false); in invalidate_one_bucket()
2081 if (bch2_bucket_is_open_safe(c, bucket.inode, bucket.offset)) in invalidate_one_bucket()
2084 a = bch2_trans_start_alloc_update(trans, bucket, BTREE_TRIGGER_bucket_invalidate); in invalidate_one_bucket()
2085 ret = PTR_ERR_OR_ZERO(a); in invalidate_one_bucket()
2090 if (lru_pos_time(lru_iter->pos) != alloc_lru_idx_read(a->v)) in invalidate_one_bucket()
2093 BUG_ON(a->v.data_type != BCH_DATA_cached); in invalidate_one_bucket()
2094 BUG_ON(a->v.dirty_sectors); in invalidate_one_bucket()
2096 if (!a->v.cached_sectors) in invalidate_one_bucket()
2097 bch_err(c, "invalidating empty bucket, confused"); in invalidate_one_bucket()
2099 cached_sectors = a->v.cached_sectors; in invalidate_one_bucket()
2101 SET_BCH_ALLOC_V4_NEED_INC_GEN(&a->v, false); in invalidate_one_bucket()
2102 a->v.gen++; in invalidate_one_bucket()
2103 a->v.data_type = 0; in invalidate_one_bucket()
2104 a->v.dirty_sectors = 0; in invalidate_one_bucket()
2105 a->v.stripe_sectors = 0; in invalidate_one_bucket()
2106 a->v.cached_sectors = 0; in invalidate_one_bucket()
2107 a->v.io_time[READ] = bch2_current_io_time(c, READ); in invalidate_one_bucket()
2108 a->v.io_time[WRITE] = bch2_current_io_time(c, WRITE); in invalidate_one_bucket()
2116 trace_and_count(c, bucket_invalidate, c, bucket.inode, bucket.offset, cached_sectors); in invalidate_one_bucket()
2117 --*nr_to_invalidate; in invalidate_one_bucket()
2129 k = bch2_btree_iter_peek_max(iter, lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX)); in next_lru_key()
2131 bch2_btree_iter_set_pos(iter, lru_pos(ca->dev_idx, 0, 0)); in next_lru_key()
2142 struct bch_fs *c = ca->fs; in bch2_do_invalidates_work() local
2143 struct btree_trans *trans = bch2_trans_get(c); in bch2_do_invalidates_work()
2156 lru_pos(ca->dev_idx, 0, in bch2_do_invalidates_work()
2157 ((bch2_current_io_time(c, READ) + U32_MAX) & in bch2_do_invalidates_work()
2182 percpu_ref_put(&ca->io_ref); in bch2_do_invalidates_work()
2183 bch2_write_ref_put(c, BCH_WRITE_REF_invalidate); in bch2_do_invalidates_work()
2188 struct bch_fs *c = ca->fs; in bch2_dev_do_invalidates() local
2190 if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_invalidate)) in bch2_dev_do_invalidates()
2193 if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE)) in bch2_dev_do_invalidates()
2196 if (queue_work(c->write_ref_wq, &ca->invalidate_work)) in bch2_dev_do_invalidates()
2199 percpu_ref_put(&ca->io_ref); in bch2_dev_do_invalidates()
2201 bch2_write_ref_put(c, BCH_WRITE_REF_invalidate); in bch2_dev_do_invalidates()
2204 void bch2_do_invalidates(struct bch_fs *c) in bch2_do_invalidates() argument
2206 for_each_member_device(c, ca) in bch2_do_invalidates()
2210 int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca, in bch2_dev_freespace_init() argument
2213 struct btree_trans *trans = bch2_trans_get(c); in bch2_dev_freespace_init()
2217 struct bpos end = POS(ca->dev_idx, bucket_end); in bch2_dev_freespace_init()
2223 BUG_ON(bucket_end > ca->mi.nbuckets); in bch2_dev_freespace_init()
2226 POS(ca->dev_idx, max_t(u64, ca->mi.first_bucket, bucket_start)), in bch2_dev_freespace_init()
2235 __func__, iter.pos.offset, ca->mi.nbuckets); in bch2_dev_freespace_init()
2251 if (k.k->type) { in bch2_dev_freespace_init()
2253 * We process live keys in the alloc btree one at a in bch2_dev_freespace_init()
2257 const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert); in bch2_dev_freespace_init() local
2259 ret = bch2_bucket_do_index(trans, ca, k, a, true) ?: in bch2_dev_freespace_init()
2274 bkey_init(&freespace->k); in bch2_dev_freespace_init()
2275 freespace->k.type = KEY_TYPE_set; in bch2_dev_freespace_init()
2276 freespace->k.p = k.k->p; in bch2_dev_freespace_init()
2277 freespace->k.size = k.k->size; in bch2_dev_freespace_init()
2285 bch2_btree_iter_set_pos(&iter, k.k->p); in bch2_dev_freespace_init()
2302 mutex_lock(&c->sb_lock); in bch2_dev_freespace_init()
2303 m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx); in bch2_dev_freespace_init()
2305 mutex_unlock(&c->sb_lock); in bch2_dev_freespace_init()
2310 int bch2_fs_freespace_init(struct bch_fs *c) in bch2_fs_freespace_init() argument
2320 for_each_member_device(c, ca) { in bch2_fs_freespace_init()
2321 if (ca->mi.freespace_initialized) in bch2_fs_freespace_init()
2325 bch_info(c, "initializing freespace"); in bch2_fs_freespace_init()
2329 ret = bch2_dev_freespace_init(c, ca, 0, ca->mi.nbuckets); in bch2_fs_freespace_init()
2332 bch_err_fn(c, ret); in bch2_fs_freespace_init()
2338 mutex_lock(&c->sb_lock); in bch2_fs_freespace_init()
2339 bch2_write_super(c); in bch2_fs_freespace_init()
2340 mutex_unlock(&c->sb_lock); in bch2_fs_freespace_init()
2341 bch_verbose(c, "done initializing freespace"); in bch2_fs_freespace_init()
2349 int bch2_dev_remove_alloc(struct bch_fs *c, struct bch_dev *ca) in bch2_dev_remove_alloc() argument
2351 struct bpos start = POS(ca->dev_idx, 0); in bch2_dev_remove_alloc()
2352 struct bpos end = POS(ca->dev_idx, U64_MAX); in bch2_dev_remove_alloc()
2359 ret = bch2_dev_remove_stripes(c, ca->dev_idx) ?: in bch2_dev_remove_alloc()
2360 bch2_btree_delete_range(c, BTREE_ID_lru, start, end, in bch2_dev_remove_alloc()
2362 bch2_btree_delete_range(c, BTREE_ID_need_discard, start, end, in bch2_dev_remove_alloc()
2364 bch2_btree_delete_range(c, BTREE_ID_freespace, start, end, in bch2_dev_remove_alloc()
2366 bch2_btree_delete_range(c, BTREE_ID_backpointers, start, end, in bch2_dev_remove_alloc()
2368 bch2_btree_delete_range(c, BTREE_ID_bucket_gens, start, end, in bch2_dev_remove_alloc()
2370 bch2_btree_delete_range(c, BTREE_ID_alloc, start, end, in bch2_dev_remove_alloc()
2372 bch2_dev_usage_remove(c, ca->dev_idx); in bch2_dev_remove_alloc()
2382 struct bch_fs *c = trans->c; in __bch2_bucket_io_time_reset() local
2385 struct bkey_i_alloc_v4 *a = in __bch2_bucket_io_time_reset() local
2387 int ret = PTR_ERR_OR_ZERO(a); in __bch2_bucket_io_time_reset()
2391 u64 now = bch2_current_io_time(c, rw); in __bch2_bucket_io_time_reset()
2392 if (a->v.io_time[rw] == now) in __bch2_bucket_io_time_reset()
2395 a->v.io_time[rw] = now; in __bch2_bucket_io_time_reset()
2397 ret = bch2_trans_update(trans, &iter, &a->k_i, 0) ?: in __bch2_bucket_io_time_reset()
2415 void bch2_recalc_capacity(struct bch_fs *c) in bch2_recalc_capacity() argument
2421 lockdep_assert_held(&c->state_lock); in bch2_recalc_capacity()
2423 for_each_online_member(c, ca) { in bch2_recalc_capacity()
2424 struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_disk->bdi; in bch2_recalc_capacity()
2426 ra_pages += bdi->ra_pages; in bch2_recalc_capacity()
2429 bch2_set_ra_pages(c, ra_pages); in bch2_recalc_capacity()
2431 for_each_rw_member(c, ca) { in bch2_recalc_capacity()
2441 * from scratch - copygc will use its entire in bch2_recalc_capacity()
2447 * allocations for foreground writes must wait - in bch2_recalc_capacity()
2448 * not -ENOSPC calculations. in bch2_recalc_capacity()
2451 dev_reserve += ca->nr_btree_reserve * 2; in bch2_recalc_capacity()
2452 dev_reserve += ca->mi.nbuckets >> 6; /* copygc reserve */ in bch2_recalc_capacity()
2458 dev_reserve *= ca->mi.bucket_size; in bch2_recalc_capacity()
2460 capacity += bucket_to_sector(ca, ca->mi.nbuckets - in bch2_recalc_capacity()
2461 ca->mi.first_bucket); in bch2_recalc_capacity()
2466 ca->mi.bucket_size); in bch2_recalc_capacity()
2469 gc_reserve = c->opts.gc_reserve_bytes in bch2_recalc_capacity()
2470 ? c->opts.gc_reserve_bytes >> 9 in bch2_recalc_capacity()
2471 : div64_u64(capacity * c->opts.gc_reserve_percent, 100); in bch2_recalc_capacity()
2477 c->reserved = reserved_sectors; in bch2_recalc_capacity()
2478 c->capacity = capacity - reserved_sectors; in bch2_recalc_capacity()
2480 c->bucket_size_max = bucket_size_max; in bch2_recalc_capacity()
2483 closure_wake_up(&c->freelist_wait); in bch2_recalc_capacity()
2486 u64 bch2_min_rw_member_capacity(struct bch_fs *c) in bch2_min_rw_member_capacity() argument
2490 for_each_rw_member(c, ca) in bch2_min_rw_member_capacity()
2491 ret = min(ret, ca->mi.nbuckets * ca->mi.bucket_size); in bch2_min_rw_member_capacity()
2495 static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca) in bch2_dev_has_open_write_point() argument
2500 for (ob = c->open_buckets; in bch2_dev_has_open_write_point()
2501 ob < c->open_buckets + ARRAY_SIZE(c->open_buckets); in bch2_dev_has_open_write_point()
2503 spin_lock(&ob->lock); in bch2_dev_has_open_write_point()
2504 if (ob->valid && !ob->on_partial_list && in bch2_dev_has_open_write_point()
2505 ob->dev == ca->dev_idx) in bch2_dev_has_open_write_point()
2507 spin_unlock(&ob->lock); in bch2_dev_has_open_write_point()
2514 void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca) in bch2_dev_allocator_remove() argument
2516 lockdep_assert_held(&c->state_lock); in bch2_dev_allocator_remove()
2520 for (unsigned i = 0; i < ARRAY_SIZE(c->rw_devs); i++) in bch2_dev_allocator_remove()
2521 clear_bit(ca->dev_idx, c->rw_devs[i].d); in bch2_dev_allocator_remove()
2523 c->rw_devs_change_count++; in bch2_dev_allocator_remove()
2528 bch2_recalc_capacity(c); in bch2_dev_allocator_remove()
2530 bch2_open_buckets_stop(c, ca, false); in bch2_dev_allocator_remove()
2536 closure_wake_up(&c->freelist_wait); in bch2_dev_allocator_remove()
2539 * journal_res_get() can block waiting for free space in the journal - in bch2_dev_allocator_remove()
2542 wake_up(&c->journal.wait); in bch2_dev_allocator_remove()
2546 closure_wait_event(&c->open_buckets_wait, in bch2_dev_allocator_remove()
2547 !bch2_dev_has_open_write_point(c, ca)); in bch2_dev_allocator_remove()
2551 void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca) in bch2_dev_allocator_add() argument
2553 lockdep_assert_held(&c->state_lock); in bch2_dev_allocator_add()
2555 for (unsigned i = 0; i < ARRAY_SIZE(c->rw_devs); i++) in bch2_dev_allocator_add()
2556 if (ca->mi.data_allowed & (1 << i)) in bch2_dev_allocator_add()
2557 set_bit(ca->dev_idx, c->rw_devs[i].d); in bch2_dev_allocator_add()
2559 c->rw_devs_change_count++; in bch2_dev_allocator_add()
2564 darray_exit(&ca->discard_buckets_in_flight); in bch2_dev_allocator_background_exit()
2569 mutex_init(&ca->discard_buckets_in_flight_lock); in bch2_dev_allocator_background_init()
2570 INIT_WORK(&ca->discard_work, bch2_do_discards_work); in bch2_dev_allocator_background_init()
2571 INIT_WORK(&ca->discard_fast_work, bch2_do_discards_fast_work); in bch2_dev_allocator_background_init()
2572 INIT_WORK(&ca->invalidate_work, bch2_do_invalidates_work); in bch2_dev_allocator_background_init()
2575 void bch2_fs_allocator_background_init(struct bch_fs *c) in bch2_fs_allocator_background_init() argument
2577 spin_lock_init(&c->freelist_lock); in bch2_fs_allocator_background_init()