xref: /linux/fs/bcachefs/disk_accounting.h (revision b71817585383d96ddc51ebd126f6253fdb9a8568)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_DISK_ACCOUNTING_H
3 #define _BCACHEFS_DISK_ACCOUNTING_H
4 
5 #include "eytzinger.h"
6 #include "sb-members.h"
7 
bch2_u64s_neg(u64 * v,unsigned nr)8 static inline void bch2_u64s_neg(u64 *v, unsigned nr)
9 {
10 	for (unsigned i = 0; i < nr; i++)
11 		v[i] = -v[i];
12 }
13 
bch2_accounting_counters(const struct bkey * k)14 static inline unsigned bch2_accounting_counters(const struct bkey *k)
15 {
16 	return bkey_val_u64s(k) - offsetof(struct bch_accounting, d) / sizeof(u64);
17 }
18 
bch2_accounting_neg(struct bkey_s_accounting a)19 static inline void bch2_accounting_neg(struct bkey_s_accounting a)
20 {
21 	bch2_u64s_neg(a.v->d, bch2_accounting_counters(a.k));
22 }
23 
bch2_accounting_key_is_zero(struct bkey_s_c_accounting a)24 static inline bool bch2_accounting_key_is_zero(struct bkey_s_c_accounting a)
25 {
26 	for (unsigned i = 0;  i < bch2_accounting_counters(a.k); i++)
27 		if (a.v->d[i])
28 			return false;
29 	return true;
30 }
31 
bch2_accounting_accumulate(struct bkey_i_accounting * dst,struct bkey_s_c_accounting src)32 static inline void bch2_accounting_accumulate(struct bkey_i_accounting *dst,
33 					      struct bkey_s_c_accounting src)
34 {
35 	EBUG_ON(dst->k.u64s != src.k->u64s);
36 
37 	for (unsigned i = 0; i < bch2_accounting_counters(&dst->k); i++)
38 		dst->v.d[i] += src.v->d[i];
39 	if (bversion_cmp(dst->k.version, src.k->version) < 0)
40 		dst->k.version = src.k->version;
41 }
42 
fs_usage_data_type_to_base(struct bch_fs_usage_base * fs_usage,enum bch_data_type data_type,s64 sectors)43 static inline void fs_usage_data_type_to_base(struct bch_fs_usage_base *fs_usage,
44 					      enum bch_data_type data_type,
45 					      s64 sectors)
46 {
47 	switch (data_type) {
48 	case BCH_DATA_btree:
49 		fs_usage->btree		+= sectors;
50 		break;
51 	case BCH_DATA_user:
52 	case BCH_DATA_parity:
53 		fs_usage->data		+= sectors;
54 		break;
55 	case BCH_DATA_cached:
56 		fs_usage->cached	+= sectors;
57 		break;
58 	default:
59 		break;
60 	}
61 }
62 
bpos_to_disk_accounting_pos(struct disk_accounting_pos * acc,struct bpos p)63 static inline void bpos_to_disk_accounting_pos(struct disk_accounting_pos *acc, struct bpos p)
64 {
65 	acc->_pad = p;
66 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
67 	bch2_bpos_swab(&acc->_pad);
68 #endif
69 }
70 
disk_accounting_pos_to_bpos(struct disk_accounting_pos * k)71 static inline struct bpos disk_accounting_pos_to_bpos(struct disk_accounting_pos *k)
72 {
73 	struct bpos ret = k->_pad;
74 
75 #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
76 	bch2_bpos_swab(&ret);
77 #endif
78 	return ret;
79 }
80 
81 int bch2_disk_accounting_mod(struct btree_trans *, struct disk_accounting_pos *,
82 			     s64 *, unsigned, bool);
83 int bch2_mod_dev_cached_sectors(struct btree_trans *, unsigned, s64, bool);
84 
85 int bch2_accounting_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags);
86 void bch2_accounting_key_to_text(struct printbuf *, struct disk_accounting_pos *);
87 void bch2_accounting_to_text(struct printbuf *, struct bch_fs *, struct bkey_s_c);
88 void bch2_accounting_swab(struct bkey_s);
89 
90 #define bch2_bkey_ops_accounting ((struct bkey_ops) {	\
91 	.key_validate	= bch2_accounting_validate,	\
92 	.val_to_text	= bch2_accounting_to_text,	\
93 	.swab		= bch2_accounting_swab,		\
94 	.min_val_size	= 8,				\
95 })
96 
97 int bch2_accounting_update_sb(struct btree_trans *);
98 
accounting_pos_cmp(const void * _l,const void * _r)99 static inline int accounting_pos_cmp(const void *_l, const void *_r)
100 {
101 	const struct bpos *l = _l, *r = _r;
102 
103 	return bpos_cmp(*l, *r);
104 }
105 
106 int bch2_accounting_mem_insert(struct bch_fs *, struct bkey_s_c_accounting, bool);
107 void bch2_accounting_mem_gc(struct bch_fs *);
108 
109 /*
110  * Update in memory counters so they match the btree update we're doing; called
111  * from transaction commit path
112  */
bch2_accounting_mem_mod_locked(struct btree_trans * trans,struct bkey_s_c_accounting a,bool gc,bool read)113 static inline int bch2_accounting_mem_mod_locked(struct btree_trans *trans, struct bkey_s_c_accounting a, bool gc, bool read)
114 {
115 	struct bch_fs *c = trans->c;
116 	struct disk_accounting_pos acc_k;
117 	bpos_to_disk_accounting_pos(&acc_k, a.k->p);
118 
119 	if (acc_k.type == BCH_DISK_ACCOUNTING_inum)
120 		return 0;
121 
122 	if (!gc && !read) {
123 		switch (acc_k.type) {
124 		case BCH_DISK_ACCOUNTING_persistent_reserved:
125 			trans->fs_usage_delta.reserved += acc_k.persistent_reserved.nr_replicas * a.v->d[0];
126 			break;
127 		case BCH_DISK_ACCOUNTING_replicas:
128 			fs_usage_data_type_to_base(&trans->fs_usage_delta, acc_k.replicas.data_type, a.v->d[0]);
129 			break;
130 		case BCH_DISK_ACCOUNTING_dev_data_type:
131 			rcu_read_lock();
132 			struct bch_dev *ca = bch2_dev_rcu(c, acc_k.dev_data_type.dev);
133 			if (ca) {
134 				this_cpu_add(ca->usage->d[acc_k.dev_data_type.data_type].buckets, a.v->d[0]);
135 				this_cpu_add(ca->usage->d[acc_k.dev_data_type.data_type].sectors, a.v->d[1]);
136 				this_cpu_add(ca->usage->d[acc_k.dev_data_type.data_type].fragmented, a.v->d[2]);
137 			}
138 			rcu_read_unlock();
139 			break;
140 		}
141 	}
142 
143 	struct bch_accounting_mem *acc = &c->accounting;
144 	unsigned idx;
145 
146 	EBUG_ON(gc && !acc->gc_running);
147 
148 	while ((idx = eytzinger0_find(acc->k.data, acc->k.nr, sizeof(acc->k.data[0]),
149 				      accounting_pos_cmp, &a.k->p)) >= acc->k.nr) {
150 		int ret = bch2_accounting_mem_insert(c, a, gc);
151 		if (ret)
152 			return ret;
153 	}
154 
155 	struct accounting_mem_entry *e = &acc->k.data[idx];
156 
157 	EBUG_ON(bch2_accounting_counters(a.k) != e->nr_counters);
158 
159 	for (unsigned i = 0; i < bch2_accounting_counters(a.k); i++)
160 		this_cpu_add(e->v[gc][i], a.v->d[i]);
161 	return 0;
162 }
163 
bch2_accounting_mem_add(struct btree_trans * trans,struct bkey_s_c_accounting a,bool gc)164 static inline int bch2_accounting_mem_add(struct btree_trans *trans, struct bkey_s_c_accounting a, bool gc)
165 {
166 	percpu_down_read(&trans->c->mark_lock);
167 	int ret = bch2_accounting_mem_mod_locked(trans, a, gc, false);
168 	percpu_up_read(&trans->c->mark_lock);
169 	return ret;
170 }
171 
bch2_accounting_mem_read_counters(struct bch_accounting_mem * acc,unsigned idx,u64 * v,unsigned nr,bool gc)172 static inline void bch2_accounting_mem_read_counters(struct bch_accounting_mem *acc,
173 						     unsigned idx, u64 *v, unsigned nr, bool gc)
174 {
175 	memset(v, 0, sizeof(*v) * nr);
176 
177 	if (unlikely(idx >= acc->k.nr))
178 		return;
179 
180 	struct accounting_mem_entry *e = &acc->k.data[idx];
181 
182 	nr = min_t(unsigned, nr, e->nr_counters);
183 
184 	for (unsigned i = 0; i < nr; i++)
185 		v[i] = percpu_u64_get(e->v[gc] + i);
186 }
187 
bch2_accounting_mem_read(struct bch_fs * c,struct bpos p,u64 * v,unsigned nr)188 static inline void bch2_accounting_mem_read(struct bch_fs *c, struct bpos p,
189 					    u64 *v, unsigned nr)
190 {
191 	struct bch_accounting_mem *acc = &c->accounting;
192 	unsigned idx = eytzinger0_find(acc->k.data, acc->k.nr, sizeof(acc->k.data[0]),
193 				       accounting_pos_cmp, &p);
194 
195 	bch2_accounting_mem_read_counters(acc, idx, v, nr, false);
196 }
197 
198 int bch2_fs_replicas_usage_read(struct bch_fs *, darray_char *);
199 int bch2_fs_accounting_read(struct bch_fs *, darray_char *, unsigned);
200 void bch2_fs_accounting_to_text(struct printbuf *, struct bch_fs *);
201 
202 int bch2_gc_accounting_start(struct bch_fs *);
203 int bch2_gc_accounting_done(struct bch_fs *);
204 
205 int bch2_accounting_read(struct bch_fs *);
206 
207 int bch2_dev_usage_remove(struct bch_fs *, unsigned);
208 int bch2_dev_usage_init(struct bch_dev *, bool);
209 
210 void bch2_verify_accounting_clean(struct bch_fs *c);
211 
212 void bch2_accounting_gc_free(struct bch_fs *);
213 void bch2_fs_accounting_exit(struct bch_fs *);
214 
215 #endif /* _BCACHEFS_DISK_ACCOUNTING_H */
216