1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _BCACHEFS_SB_MEMBERS_H 3 #define _BCACHEFS_SB_MEMBERS_H 4 5 #include "darray.h" 6 #include "bkey_types.h" 7 8 extern char * const bch2_member_error_strs[]; 9 10 static inline struct bch_member * 11 __bch2_members_v2_get_mut(struct bch_sb_field_members_v2 *mi, unsigned i) 12 { 13 return (void *) mi->_members + (i * le16_to_cpu(mi->member_bytes)); 14 } 15 16 int bch2_sb_members_v2_init(struct bch_fs *c); 17 int bch2_sb_members_cpy_v2_v1(struct bch_sb_handle *disk_sb); 18 struct bch_member *bch2_members_v2_get_mut(struct bch_sb *sb, int i); 19 struct bch_member bch2_sb_member_get(struct bch_sb *sb, int i); 20 21 static inline bool bch2_dev_is_online(struct bch_dev *ca) 22 { 23 return !percpu_ref_is_zero(&ca->io_ref); 24 } 25 26 static inline bool bch2_dev_is_readable(struct bch_dev *ca) 27 { 28 return bch2_dev_is_online(ca) && 29 ca->mi.state != BCH_MEMBER_STATE_failed; 30 } 31 32 static inline unsigned dev_mask_nr(const struct bch_devs_mask *devs) 33 { 34 return bitmap_weight(devs->d, BCH_SB_MEMBERS_MAX); 35 } 36 37 static inline bool bch2_dev_list_has_dev(struct bch_devs_list devs, 38 unsigned dev) 39 { 40 darray_for_each(devs, i) 41 if (*i == dev) 42 return true; 43 return false; 44 } 45 46 static inline void bch2_dev_list_drop_dev(struct bch_devs_list *devs, 47 unsigned dev) 48 { 49 darray_for_each(*devs, i) 50 if (*i == dev) { 51 darray_remove_item(devs, i); 52 return; 53 } 54 } 55 56 static inline void bch2_dev_list_add_dev(struct bch_devs_list *devs, 57 unsigned dev) 58 { 59 if (!bch2_dev_list_has_dev(*devs, dev)) { 60 BUG_ON(devs->nr >= ARRAY_SIZE(devs->data)); 61 devs->data[devs->nr++] = dev; 62 } 63 } 64 65 static inline struct bch_devs_list bch2_dev_list_single(unsigned dev) 66 { 67 return (struct bch_devs_list) { .nr = 1, .data[0] = dev }; 68 } 69 70 static inline struct bch_dev *__bch2_next_dev_idx(struct bch_fs *c, unsigned idx, 71 const struct bch_devs_mask *mask) 72 { 73 struct bch_dev *ca = NULL; 74 75 while ((idx = mask 76 ? find_next_bit(mask->d, c->sb.nr_devices, idx) 77 : idx) < c->sb.nr_devices && 78 !(ca = rcu_dereference_check(c->devs[idx], 79 lockdep_is_held(&c->state_lock)))) 80 idx++; 81 82 return ca; 83 } 84 85 static inline struct bch_dev *__bch2_next_dev(struct bch_fs *c, struct bch_dev *ca, 86 const struct bch_devs_mask *mask) 87 { 88 return __bch2_next_dev_idx(c, ca ? ca->dev_idx + 1 : 0, mask); 89 } 90 91 #define for_each_member_device_rcu(_c, _ca, _mask) \ 92 for (struct bch_dev *_ca = NULL; \ 93 (_ca = __bch2_next_dev((_c), _ca, (_mask)));) 94 95 static inline void bch2_dev_get(struct bch_dev *ca) 96 { 97 #ifdef CONFIG_BCACHEFS_DEBUG 98 BUG_ON(atomic_long_inc_return(&ca->ref) <= 1L); 99 #else 100 percpu_ref_get(&ca->ref); 101 #endif 102 } 103 104 static inline void __bch2_dev_put(struct bch_dev *ca) 105 { 106 #ifdef CONFIG_BCACHEFS_DEBUG 107 long r = atomic_long_dec_return(&ca->ref); 108 if (r < (long) !ca->dying) 109 panic("bch_dev->ref underflow, last put: %pS\n", (void *) ca->last_put); 110 ca->last_put = _THIS_IP_; 111 if (!r) 112 complete(&ca->ref_completion); 113 #else 114 percpu_ref_put(&ca->ref); 115 #endif 116 } 117 118 static inline void bch2_dev_put(struct bch_dev *ca) 119 { 120 if (ca) 121 __bch2_dev_put(ca); 122 } 123 124 static inline struct bch_dev *bch2_get_next_dev(struct bch_fs *c, struct bch_dev *ca) 125 { 126 rcu_read_lock(); 127 bch2_dev_put(ca); 128 if ((ca = __bch2_next_dev(c, ca, NULL))) 129 bch2_dev_get(ca); 130 rcu_read_unlock(); 131 132 return ca; 133 } 134 135 /* 136 * If you break early, you must drop your ref on the current device 137 */ 138 #define __for_each_member_device(_c, _ca) \ 139 for (; (_ca = bch2_get_next_dev(_c, _ca));) 140 141 #define for_each_member_device(_c, _ca) \ 142 for (struct bch_dev *_ca = NULL; \ 143 (_ca = bch2_get_next_dev(_c, _ca));) 144 145 static inline struct bch_dev *bch2_get_next_online_dev(struct bch_fs *c, 146 struct bch_dev *ca, 147 unsigned state_mask) 148 { 149 rcu_read_lock(); 150 if (ca) 151 percpu_ref_put(&ca->io_ref); 152 153 while ((ca = __bch2_next_dev(c, ca, NULL)) && 154 (!((1 << ca->mi.state) & state_mask) || 155 !percpu_ref_tryget(&ca->io_ref))) 156 ; 157 rcu_read_unlock(); 158 159 return ca; 160 } 161 162 #define __for_each_online_member(_c, _ca, state_mask) \ 163 for (struct bch_dev *_ca = NULL; \ 164 (_ca = bch2_get_next_online_dev(_c, _ca, state_mask));) 165 166 #define for_each_online_member(c, ca) \ 167 __for_each_online_member(c, ca, ~0) 168 169 #define for_each_rw_member(c, ca) \ 170 __for_each_online_member(c, ca, BIT(BCH_MEMBER_STATE_rw)) 171 172 #define for_each_readable_member(c, ca) \ 173 __for_each_online_member(c, ca, BIT( BCH_MEMBER_STATE_rw)|BIT(BCH_MEMBER_STATE_ro)) 174 175 static inline bool bch2_dev_exists(const struct bch_fs *c, unsigned dev) 176 { 177 return dev < c->sb.nr_devices && c->devs[dev]; 178 } 179 180 static inline bool bucket_valid(const struct bch_dev *ca, u64 b) 181 { 182 return b - ca->mi.first_bucket < ca->mi.nbuckets_minus_first; 183 } 184 185 static inline struct bch_dev *bch2_dev_have_ref(const struct bch_fs *c, unsigned dev) 186 { 187 EBUG_ON(!bch2_dev_exists(c, dev)); 188 189 return rcu_dereference_check(c->devs[dev], 1); 190 } 191 192 static inline struct bch_dev *bch2_dev_locked(struct bch_fs *c, unsigned dev) 193 { 194 EBUG_ON(!bch2_dev_exists(c, dev)); 195 196 return rcu_dereference_protected(c->devs[dev], 197 lockdep_is_held(&c->sb_lock) || 198 lockdep_is_held(&c->state_lock)); 199 } 200 201 static inline struct bch_dev *bch2_dev_rcu(struct bch_fs *c, unsigned dev) 202 { 203 return c && dev < c->sb.nr_devices 204 ? rcu_dereference(c->devs[dev]) 205 : NULL; 206 } 207 208 static inline struct bch_dev *bch2_dev_tryget_noerror(struct bch_fs *c, unsigned dev) 209 { 210 rcu_read_lock(); 211 struct bch_dev *ca = bch2_dev_rcu(c, dev); 212 if (ca) 213 bch2_dev_get(ca); 214 rcu_read_unlock(); 215 return ca; 216 } 217 218 void bch2_dev_missing(struct bch_fs *, unsigned); 219 220 static inline struct bch_dev *bch2_dev_tryget(struct bch_fs *c, unsigned dev) 221 { 222 struct bch_dev *ca = bch2_dev_tryget_noerror(c, dev); 223 if (!ca) 224 bch2_dev_missing(c, dev); 225 return ca; 226 } 227 228 static inline struct bch_dev *bch2_dev_bucket_tryget_noerror(struct bch_fs *c, struct bpos bucket) 229 { 230 struct bch_dev *ca = bch2_dev_tryget_noerror(c, bucket.inode); 231 if (ca && !bucket_valid(ca, bucket.offset)) { 232 bch2_dev_put(ca); 233 ca = NULL; 234 } 235 return ca; 236 } 237 238 void bch2_dev_bucket_missing(struct bch_fs *, struct bpos); 239 240 static inline struct bch_dev *bch2_dev_bucket_tryget(struct bch_fs *c, struct bpos bucket) 241 { 242 struct bch_dev *ca = bch2_dev_bucket_tryget_noerror(c, bucket); 243 if (!ca) 244 bch2_dev_bucket_missing(c, bucket); 245 return ca; 246 } 247 248 static inline struct bch_dev *bch2_dev_iterate_noerror(struct bch_fs *c, struct bch_dev *ca, unsigned dev_idx) 249 { 250 if (ca && ca->dev_idx == dev_idx) 251 return ca; 252 bch2_dev_put(ca); 253 return bch2_dev_tryget_noerror(c, dev_idx); 254 } 255 256 static inline struct bch_dev *bch2_dev_iterate(struct bch_fs *c, struct bch_dev *ca, unsigned dev_idx) 257 { 258 if (ca && ca->dev_idx == dev_idx) 259 return ca; 260 bch2_dev_put(ca); 261 return bch2_dev_tryget(c, dev_idx); 262 } 263 264 static inline struct bch_dev *bch2_dev_get_ioref(struct bch_fs *c, unsigned dev, int rw) 265 { 266 rcu_read_lock(); 267 struct bch_dev *ca = bch2_dev_rcu(c, dev); 268 if (ca && !percpu_ref_tryget(&ca->io_ref)) 269 ca = NULL; 270 rcu_read_unlock(); 271 272 if (ca && 273 (ca->mi.state == BCH_MEMBER_STATE_rw || 274 (ca->mi.state == BCH_MEMBER_STATE_ro && rw == READ))) 275 return ca; 276 277 if (ca) 278 percpu_ref_put(&ca->io_ref); 279 return NULL; 280 } 281 282 /* XXX kill, move to struct bch_fs */ 283 static inline struct bch_devs_mask bch2_online_devs(struct bch_fs *c) 284 { 285 struct bch_devs_mask devs; 286 287 memset(&devs, 0, sizeof(devs)); 288 for_each_online_member(c, ca) 289 __set_bit(ca->dev_idx, devs.d); 290 return devs; 291 } 292 293 extern const struct bch_sb_field_ops bch_sb_field_ops_members_v1; 294 extern const struct bch_sb_field_ops bch_sb_field_ops_members_v2; 295 296 static inline bool bch2_member_alive(struct bch_member *m) 297 { 298 return !bch2_is_zero(&m->uuid, sizeof(m->uuid)); 299 } 300 301 static inline bool bch2_member_exists(struct bch_sb *sb, unsigned dev) 302 { 303 if (dev < sb->nr_devices) { 304 struct bch_member m = bch2_sb_member_get(sb, dev); 305 return bch2_member_alive(&m); 306 } 307 return false; 308 } 309 310 static inline struct bch_member_cpu bch2_mi_to_cpu(struct bch_member *mi) 311 { 312 return (struct bch_member_cpu) { 313 .nbuckets = le64_to_cpu(mi->nbuckets), 314 .nbuckets_minus_first = le64_to_cpu(mi->nbuckets) - 315 le16_to_cpu(mi->first_bucket), 316 .first_bucket = le16_to_cpu(mi->first_bucket), 317 .bucket_size = le16_to_cpu(mi->bucket_size), 318 .group = BCH_MEMBER_GROUP(mi), 319 .state = BCH_MEMBER_STATE(mi), 320 .discard = BCH_MEMBER_DISCARD(mi), 321 .data_allowed = BCH_MEMBER_DATA_ALLOWED(mi), 322 .durability = BCH_MEMBER_DURABILITY(mi) 323 ? BCH_MEMBER_DURABILITY(mi) - 1 324 : 1, 325 .freespace_initialized = BCH_MEMBER_FREESPACE_INITIALIZED(mi), 326 .valid = bch2_member_alive(mi), 327 .btree_bitmap_shift = mi->btree_bitmap_shift, 328 .btree_allocated_bitmap = le64_to_cpu(mi->btree_allocated_bitmap), 329 }; 330 } 331 332 void bch2_sb_members_from_cpu(struct bch_fs *); 333 334 void bch2_dev_io_errors_to_text(struct printbuf *, struct bch_dev *); 335 void bch2_dev_errors_reset(struct bch_dev *); 336 337 static inline bool bch2_dev_btree_bitmap_marked_sectors(struct bch_dev *ca, u64 start, unsigned sectors) 338 { 339 u64 end = start + sectors; 340 341 if (end > 64ULL << ca->mi.btree_bitmap_shift) 342 return false; 343 344 for (unsigned bit = start >> ca->mi.btree_bitmap_shift; 345 (u64) bit << ca->mi.btree_bitmap_shift < end; 346 bit++) 347 if (!(ca->mi.btree_allocated_bitmap & BIT_ULL(bit))) 348 return false; 349 return true; 350 } 351 352 bool bch2_dev_btree_bitmap_marked(struct bch_fs *, struct bkey_s_c); 353 void bch2_dev_btree_bitmap_mark(struct bch_fs *, struct bkey_s_c); 354 355 #endif /* _BCACHEFS_SB_MEMBERS_H */ 356