1 // SPDX-License-Identifier: GPL-2.0
2 #include "bcachefs.h"
3 #include "btree_update.h"
4 #include "btree_update_interior.h"
5 #include "buckets.h"
6 #include "debug.h"
7 #include "extents.h"
8 #include "extent_update.h"
9
10 /*
11 * This counts the number of iterators to the alloc & ec btrees we'll need
12 * inserting/removing this extent:
13 */
bch2_bkey_nr_alloc_ptrs(struct bkey_s_c k)14 static unsigned bch2_bkey_nr_alloc_ptrs(struct bkey_s_c k)
15 {
16 struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
17 const union bch_extent_entry *entry;
18 unsigned ret = 0, lru = 0;
19
20 bkey_extent_entry_for_each(ptrs, entry) {
21 switch (__extent_entry_type(entry)) {
22 case BCH_EXTENT_ENTRY_ptr:
23 /* Might also be updating LRU btree */
24 if (entry->ptr.cached)
25 lru++;
26
27 fallthrough;
28 case BCH_EXTENT_ENTRY_stripe_ptr:
29 ret++;
30 }
31 }
32
33 /*
34 * Updating keys in the alloc btree may also update keys in the
35 * freespace or discard btrees:
36 */
37 return lru + ret * 2;
38 }
39
40 #define EXTENT_ITERS_MAX 64
41
count_iters_for_insert(struct btree_trans * trans,struct bkey_s_c k,unsigned offset,struct bpos * end,unsigned * nr_iters)42 static int count_iters_for_insert(struct btree_trans *trans,
43 struct bkey_s_c k,
44 unsigned offset,
45 struct bpos *end,
46 unsigned *nr_iters)
47 {
48 int ret = 0, ret2 = 0;
49
50 if (*nr_iters >= EXTENT_ITERS_MAX) {
51 *end = bpos_min(*end, k.k->p);
52 ret = 1;
53 }
54
55 switch (k.k->type) {
56 case KEY_TYPE_extent:
57 case KEY_TYPE_reflink_v:
58 *nr_iters += bch2_bkey_nr_alloc_ptrs(k);
59
60 if (*nr_iters >= EXTENT_ITERS_MAX) {
61 *end = bpos_min(*end, k.k->p);
62 ret = 1;
63 }
64
65 break;
66 case KEY_TYPE_reflink_p: {
67 struct bkey_s_c_reflink_p p = bkey_s_c_to_reflink_p(k);
68 u64 idx = REFLINK_P_IDX(p.v);
69 unsigned sectors = bpos_min(*end, p.k->p).offset -
70 bkey_start_offset(p.k);
71 struct btree_iter iter;
72 struct bkey_s_c r_k;
73
74 for_each_btree_key_norestart(trans, iter,
75 BTREE_ID_reflink, POS(0, idx + offset),
76 BTREE_ITER_slots, r_k, ret2) {
77 if (bkey_ge(bkey_start_pos(r_k.k), POS(0, idx + sectors)))
78 break;
79
80 /* extent_update_to_keys(), for the reflink_v update */
81 *nr_iters += 1;
82
83 *nr_iters += 1 + bch2_bkey_nr_alloc_ptrs(r_k);
84
85 if (*nr_iters >= EXTENT_ITERS_MAX) {
86 struct bpos pos = bkey_start_pos(k.k);
87 pos.offset += min_t(u64, k.k->size,
88 r_k.k->p.offset - idx);
89
90 *end = bpos_min(*end, pos);
91 ret = 1;
92 break;
93 }
94 }
95 bch2_trans_iter_exit(trans, &iter);
96
97 break;
98 }
99 }
100
101 return ret2 ?: ret;
102 }
103
bch2_extent_atomic_end(struct btree_trans * trans,struct btree_iter * iter,struct bpos * end)104 int bch2_extent_atomic_end(struct btree_trans *trans,
105 struct btree_iter *iter,
106 struct bpos *end)
107 {
108 unsigned nr_iters = 0;
109
110 struct btree_iter copy;
111 bch2_trans_copy_iter(trans, ©, iter);
112
113 int ret = bch2_btree_iter_traverse(trans, ©);
114 if (ret)
115 goto err;
116
117 struct bkey_s_c k;
118 for_each_btree_key_max_continue_norestart(trans, copy, *end, 0, k, ret) {
119 unsigned offset = 0;
120
121 if (bkey_gt(iter->pos, bkey_start_pos(k.k)))
122 offset = iter->pos.offset - bkey_start_offset(k.k);
123
124 ret = count_iters_for_insert(trans, k, offset, end, &nr_iters);
125 if (ret)
126 break;
127 }
128 err:
129 bch2_trans_iter_exit(trans, ©);
130 return ret < 0 ? ret : 0;
131 }
132
bch2_extent_trim_atomic(struct btree_trans * trans,struct btree_iter * iter,struct bkey_i * k)133 int bch2_extent_trim_atomic(struct btree_trans *trans,
134 struct btree_iter *iter,
135 struct bkey_i *k)
136 {
137 struct bpos end = k->k.p;
138 int ret = bch2_extent_atomic_end(trans, iter, &end);
139 if (ret)
140 return ret;
141
142 /* tracepoint */
143
144 if (bpos_lt(end, k->k.p)) {
145 if (trace_extent_trim_atomic_enabled()) {
146 CLASS(printbuf, buf)();
147 bch2_bpos_to_text(&buf, end);
148 prt_newline(&buf);
149 bch2_bkey_val_to_text(&buf, trans->c, bkey_i_to_s_c(k));
150 trace_extent_trim_atomic(trans->c, buf.buf);
151 }
152 bch2_cut_back(end, k);
153 }
154 return 0;
155 }
156