xref: /linux/fs/bcachefs/sb-members.h (revision 031fba65fc202abf1f193e321be7a2c274fd88ba)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_SB_MEMBERS_H
3 #define _BCACHEFS_SB_MEMBERS_H
4 
5 int bch2_members_v2_init(struct bch_fs *c);
6 int bch_members_cpy_v2_v1(struct bch_sb_handle *disk_sb);
7 struct bch_member *bch2_members_v2_get_mut(struct bch_sb *sb, int i);
8 struct bch_member bch2_sb_member_get(struct bch_sb *sb, int i);
9 
10 static inline bool bch2_dev_is_online(struct bch_dev *ca)
11 {
12 	return !percpu_ref_is_zero(&ca->io_ref);
13 }
14 
15 static inline bool bch2_dev_is_readable(struct bch_dev *ca)
16 {
17 	return bch2_dev_is_online(ca) &&
18 		ca->mi.state != BCH_MEMBER_STATE_failed;
19 }
20 
21 static inline bool bch2_dev_get_ioref(struct bch_dev *ca, int rw)
22 {
23 	if (!percpu_ref_tryget(&ca->io_ref))
24 		return false;
25 
26 	if (ca->mi.state == BCH_MEMBER_STATE_rw ||
27 	    (ca->mi.state == BCH_MEMBER_STATE_ro && rw == READ))
28 		return true;
29 
30 	percpu_ref_put(&ca->io_ref);
31 	return false;
32 }
33 
34 static inline unsigned dev_mask_nr(const struct bch_devs_mask *devs)
35 {
36 	return bitmap_weight(devs->d, BCH_SB_MEMBERS_MAX);
37 }
38 
39 static inline bool bch2_dev_list_has_dev(struct bch_devs_list devs,
40 					 unsigned dev)
41 {
42 	unsigned i;
43 
44 	for (i = 0; i < devs.nr; i++)
45 		if (devs.devs[i] == dev)
46 			return true;
47 
48 	return false;
49 }
50 
51 static inline void bch2_dev_list_drop_dev(struct bch_devs_list *devs,
52 					  unsigned dev)
53 {
54 	unsigned i;
55 
56 	for (i = 0; i < devs->nr; i++)
57 		if (devs->devs[i] == dev) {
58 			array_remove_item(devs->devs, devs->nr, i);
59 			return;
60 		}
61 }
62 
63 static inline void bch2_dev_list_add_dev(struct bch_devs_list *devs,
64 					 unsigned dev)
65 {
66 	if (!bch2_dev_list_has_dev(*devs, dev)) {
67 		BUG_ON(devs->nr >= ARRAY_SIZE(devs->devs));
68 		devs->devs[devs->nr++] = dev;
69 	}
70 }
71 
72 static inline struct bch_devs_list bch2_dev_list_single(unsigned dev)
73 {
74 	return (struct bch_devs_list) { .nr = 1, .devs[0] = dev };
75 }
76 
77 static inline struct bch_dev *__bch2_next_dev(struct bch_fs *c, unsigned *iter,
78 					      const struct bch_devs_mask *mask)
79 {
80 	struct bch_dev *ca = NULL;
81 
82 	while ((*iter = mask
83 		? find_next_bit(mask->d, c->sb.nr_devices, *iter)
84 		: *iter) < c->sb.nr_devices &&
85 	       !(ca = rcu_dereference_check(c->devs[*iter],
86 					    lockdep_is_held(&c->state_lock))))
87 		(*iter)++;
88 
89 	return ca;
90 }
91 
92 #define for_each_member_device_rcu(ca, c, iter, mask)			\
93 	for ((iter) = 0; ((ca) = __bch2_next_dev((c), &(iter), mask)); (iter)++)
94 
95 static inline struct bch_dev *bch2_get_next_dev(struct bch_fs *c, unsigned *iter)
96 {
97 	struct bch_dev *ca;
98 
99 	rcu_read_lock();
100 	if ((ca = __bch2_next_dev(c, iter, NULL)))
101 		percpu_ref_get(&ca->ref);
102 	rcu_read_unlock();
103 
104 	return ca;
105 }
106 
107 /*
108  * If you break early, you must drop your ref on the current device
109  */
110 #define for_each_member_device(ca, c, iter)				\
111 	for ((iter) = 0;						\
112 	     (ca = bch2_get_next_dev(c, &(iter)));			\
113 	     percpu_ref_put(&ca->ref), (iter)++)
114 
115 static inline struct bch_dev *bch2_get_next_online_dev(struct bch_fs *c,
116 						      unsigned *iter,
117 						      int state_mask)
118 {
119 	struct bch_dev *ca;
120 
121 	rcu_read_lock();
122 	while ((ca = __bch2_next_dev(c, iter, NULL)) &&
123 	       (!((1 << ca->mi.state) & state_mask) ||
124 		!percpu_ref_tryget(&ca->io_ref)))
125 		(*iter)++;
126 	rcu_read_unlock();
127 
128 	return ca;
129 }
130 
131 #define __for_each_online_member(ca, c, iter, state_mask)		\
132 	for ((iter) = 0;						\
133 	     (ca = bch2_get_next_online_dev(c, &(iter), state_mask));	\
134 	     percpu_ref_put(&ca->io_ref), (iter)++)
135 
136 #define for_each_online_member(ca, c, iter)				\
137 	__for_each_online_member(ca, c, iter, ~0)
138 
139 #define for_each_rw_member(ca, c, iter)					\
140 	__for_each_online_member(ca, c, iter, 1 << BCH_MEMBER_STATE_rw)
141 
142 #define for_each_readable_member(ca, c, iter)				\
143 	__for_each_online_member(ca, c, iter,				\
144 		(1 << BCH_MEMBER_STATE_rw)|(1 << BCH_MEMBER_STATE_ro))
145 
146 /*
147  * If a key exists that references a device, the device won't be going away and
148  * we can omit rcu_read_lock():
149  */
150 static inline struct bch_dev *bch_dev_bkey_exists(const struct bch_fs *c, unsigned idx)
151 {
152 	EBUG_ON(idx >= c->sb.nr_devices || !c->devs[idx]);
153 
154 	return rcu_dereference_check(c->devs[idx], 1);
155 }
156 
157 static inline struct bch_dev *bch_dev_locked(struct bch_fs *c, unsigned idx)
158 {
159 	EBUG_ON(idx >= c->sb.nr_devices || !c->devs[idx]);
160 
161 	return rcu_dereference_protected(c->devs[idx],
162 					 lockdep_is_held(&c->sb_lock) ||
163 					 lockdep_is_held(&c->state_lock));
164 }
165 
166 /* XXX kill, move to struct bch_fs */
167 static inline struct bch_devs_mask bch2_online_devs(struct bch_fs *c)
168 {
169 	struct bch_devs_mask devs;
170 	struct bch_dev *ca;
171 	unsigned i;
172 
173 	memset(&devs, 0, sizeof(devs));
174 	for_each_online_member(ca, c, i)
175 		__set_bit(ca->dev_idx, devs.d);
176 	return devs;
177 }
178 
179 extern const struct bch_sb_field_ops bch_sb_field_ops_members_v1;
180 extern const struct bch_sb_field_ops bch_sb_field_ops_members_v2;
181 
182 #endif /* _BCACHEFS_SB_MEMBERS_H */
183