1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _BCACHEFS_SB_MEMBERS_H 3 #define _BCACHEFS_SB_MEMBERS_H 4 5 #include "darray.h" 6 #include "bkey_types.h" 7 8 extern char * const bch2_member_error_strs[]; 9 10 static inline struct bch_member * 11 __bch2_members_v2_get_mut(struct bch_sb_field_members_v2 *mi, unsigned i) 12 { 13 return (void *) mi->_members + (i * le16_to_cpu(mi->member_bytes)); 14 } 15 16 int bch2_sb_members_v2_init(struct bch_fs *c); 17 int bch2_sb_members_cpy_v2_v1(struct bch_sb_handle *disk_sb); 18 struct bch_member *bch2_members_v2_get_mut(struct bch_sb *sb, int i); 19 struct bch_member bch2_sb_member_get(struct bch_sb *sb, int i); 20 21 static inline bool bch2_dev_is_online(struct bch_dev *ca) 22 { 23 return !percpu_ref_is_zero(&ca->io_ref); 24 } 25 26 static inline bool bch2_dev_is_readable(struct bch_dev *ca) 27 { 28 return bch2_dev_is_online(ca) && 29 ca->mi.state != BCH_MEMBER_STATE_failed; 30 } 31 32 static inline unsigned dev_mask_nr(const struct bch_devs_mask *devs) 33 { 34 return bitmap_weight(devs->d, BCH_SB_MEMBERS_MAX); 35 } 36 37 static inline bool bch2_dev_list_has_dev(struct bch_devs_list devs, 38 unsigned dev) 39 { 40 darray_for_each(devs, i) 41 if (*i == dev) 42 return true; 43 return false; 44 } 45 46 static inline void bch2_dev_list_drop_dev(struct bch_devs_list *devs, 47 unsigned dev) 48 { 49 darray_for_each(*devs, i) 50 if (*i == dev) { 51 darray_remove_item(devs, i); 52 return; 53 } 54 } 55 56 static inline void bch2_dev_list_add_dev(struct bch_devs_list *devs, 57 unsigned dev) 58 { 59 if (!bch2_dev_list_has_dev(*devs, dev)) { 60 BUG_ON(devs->nr >= ARRAY_SIZE(devs->data)); 61 devs->data[devs->nr++] = dev; 62 } 63 } 64 65 static inline struct bch_devs_list bch2_dev_list_single(unsigned dev) 66 { 67 return (struct bch_devs_list) { .nr = 1, .data[0] = dev }; 68 } 69 70 static inline struct bch_dev *__bch2_next_dev_idx(struct bch_fs *c, unsigned idx, 71 const struct bch_devs_mask *mask) 72 { 73 struct bch_dev *ca = NULL; 74 75 while ((idx = mask 76 ? find_next_bit(mask->d, c->sb.nr_devices, idx) 77 : idx) < c->sb.nr_devices && 78 !(ca = rcu_dereference_check(c->devs[idx], 79 lockdep_is_held(&c->state_lock)))) 80 idx++; 81 82 return ca; 83 } 84 85 static inline struct bch_dev *__bch2_next_dev(struct bch_fs *c, struct bch_dev *ca, 86 const struct bch_devs_mask *mask) 87 { 88 return __bch2_next_dev_idx(c, ca ? ca->dev_idx + 1 : 0, mask); 89 } 90 91 #define for_each_member_device_rcu(_c, _ca, _mask) \ 92 for (struct bch_dev *_ca = NULL; \ 93 (_ca = __bch2_next_dev((_c), _ca, (_mask)));) 94 95 static inline void bch2_dev_get(struct bch_dev *ca) 96 { 97 #ifdef CONFIG_BCACHEFS_DEBUG 98 BUG_ON(atomic_long_inc_return(&ca->ref) <= 1L); 99 #else 100 percpu_ref_get(&ca->ref); 101 #endif 102 } 103 104 static inline void __bch2_dev_put(struct bch_dev *ca) 105 { 106 #ifdef CONFIG_BCACHEFS_DEBUG 107 long r = atomic_long_dec_return(&ca->ref); 108 if (r < (long) !ca->dying) 109 panic("bch_dev->ref underflow, last put: %pS\n", (void *) ca->last_put); 110 ca->last_put = _THIS_IP_; 111 if (!r) 112 complete(&ca->ref_completion); 113 #else 114 percpu_ref_put(&ca->ref); 115 #endif 116 } 117 118 static inline void bch2_dev_put(struct bch_dev *ca) 119 { 120 if (ca) 121 __bch2_dev_put(ca); 122 } 123 124 static inline struct bch_dev *bch2_get_next_dev(struct bch_fs *c, struct bch_dev *ca) 125 { 126 rcu_read_lock(); 127 bch2_dev_put(ca); 128 if ((ca = __bch2_next_dev(c, ca, NULL))) 129 bch2_dev_get(ca); 130 rcu_read_unlock(); 131 132 return ca; 133 } 134 135 /* 136 * If you break early, you must drop your ref on the current device 137 */ 138 #define __for_each_member_device(_c, _ca) \ 139 for (; (_ca = bch2_get_next_dev(_c, _ca));) 140 141 #define for_each_member_device(_c, _ca) \ 142 for (struct bch_dev *_ca = NULL; \ 143 (_ca = bch2_get_next_dev(_c, _ca));) 144 145 static inline struct bch_dev *bch2_get_next_online_dev(struct bch_fs *c, 146 struct bch_dev *ca, 147 unsigned state_mask) 148 { 149 rcu_read_lock(); 150 if (ca) 151 percpu_ref_put(&ca->io_ref); 152 153 while ((ca = __bch2_next_dev(c, ca, NULL)) && 154 (!((1 << ca->mi.state) & state_mask) || 155 !percpu_ref_tryget(&ca->io_ref))) 156 ; 157 rcu_read_unlock(); 158 159 return ca; 160 } 161 162 #define __for_each_online_member(_c, _ca, state_mask) \ 163 for (struct bch_dev *_ca = NULL; \ 164 (_ca = bch2_get_next_online_dev(_c, _ca, state_mask));) 165 166 #define for_each_online_member(c, ca) \ 167 __for_each_online_member(c, ca, ~0) 168 169 #define for_each_rw_member(c, ca) \ 170 __for_each_online_member(c, ca, BIT(BCH_MEMBER_STATE_rw)) 171 172 #define for_each_readable_member(c, ca) \ 173 __for_each_online_member(c, ca, BIT( BCH_MEMBER_STATE_rw)|BIT(BCH_MEMBER_STATE_ro)) 174 175 static inline bool bch2_dev_exists(const struct bch_fs *c, unsigned dev) 176 { 177 return dev < c->sb.nr_devices && c->devs[dev]; 178 } 179 180 static inline bool bucket_valid(const struct bch_dev *ca, u64 b) 181 { 182 return b - ca->mi.first_bucket < ca->mi.nbuckets_minus_first; 183 } 184 185 static inline struct bch_dev *bch2_dev_have_ref(const struct bch_fs *c, unsigned dev) 186 { 187 EBUG_ON(!bch2_dev_exists(c, dev)); 188 189 return rcu_dereference_check(c->devs[dev], 1); 190 } 191 192 static inline struct bch_dev *bch2_dev_locked(struct bch_fs *c, unsigned dev) 193 { 194 EBUG_ON(!bch2_dev_exists(c, dev)); 195 196 return rcu_dereference_protected(c->devs[dev], 197 lockdep_is_held(&c->sb_lock) || 198 lockdep_is_held(&c->state_lock)); 199 } 200 201 static inline struct bch_dev *bch2_dev_rcu_noerror(struct bch_fs *c, unsigned dev) 202 { 203 return c && dev < c->sb.nr_devices 204 ? rcu_dereference(c->devs[dev]) 205 : NULL; 206 } 207 208 void bch2_dev_missing(struct bch_fs *, unsigned); 209 210 static inline struct bch_dev *bch2_dev_rcu(struct bch_fs *c, unsigned dev) 211 { 212 struct bch_dev *ca = bch2_dev_rcu_noerror(c, dev); 213 if (unlikely(!ca)) 214 bch2_dev_missing(c, dev); 215 return ca; 216 } 217 218 static inline struct bch_dev *bch2_dev_tryget_noerror(struct bch_fs *c, unsigned dev) 219 { 220 rcu_read_lock(); 221 struct bch_dev *ca = bch2_dev_rcu_noerror(c, dev); 222 if (ca) 223 bch2_dev_get(ca); 224 rcu_read_unlock(); 225 return ca; 226 } 227 228 static inline struct bch_dev *bch2_dev_tryget(struct bch_fs *c, unsigned dev) 229 { 230 struct bch_dev *ca = bch2_dev_tryget_noerror(c, dev); 231 if (unlikely(!ca)) 232 bch2_dev_missing(c, dev); 233 return ca; 234 } 235 236 static inline struct bch_dev *bch2_dev_bucket_tryget_noerror(struct bch_fs *c, struct bpos bucket) 237 { 238 struct bch_dev *ca = bch2_dev_tryget_noerror(c, bucket.inode); 239 if (ca && !bucket_valid(ca, bucket.offset)) { 240 bch2_dev_put(ca); 241 ca = NULL; 242 } 243 return ca; 244 } 245 246 void bch2_dev_bucket_missing(struct bch_fs *, struct bpos); 247 248 static inline struct bch_dev *bch2_dev_bucket_tryget(struct bch_fs *c, struct bpos bucket) 249 { 250 struct bch_dev *ca = bch2_dev_bucket_tryget_noerror(c, bucket); 251 if (!ca) 252 bch2_dev_bucket_missing(c, bucket); 253 return ca; 254 } 255 256 static inline struct bch_dev *bch2_dev_iterate_noerror(struct bch_fs *c, struct bch_dev *ca, unsigned dev_idx) 257 { 258 if (ca && ca->dev_idx == dev_idx) 259 return ca; 260 bch2_dev_put(ca); 261 return bch2_dev_tryget_noerror(c, dev_idx); 262 } 263 264 static inline struct bch_dev *bch2_dev_iterate(struct bch_fs *c, struct bch_dev *ca, unsigned dev_idx) 265 { 266 if (ca && ca->dev_idx == dev_idx) 267 return ca; 268 bch2_dev_put(ca); 269 return bch2_dev_tryget(c, dev_idx); 270 } 271 272 static inline struct bch_dev *bch2_dev_get_ioref(struct bch_fs *c, unsigned dev, int rw) 273 { 274 rcu_read_lock(); 275 struct bch_dev *ca = bch2_dev_rcu(c, dev); 276 if (ca && !percpu_ref_tryget(&ca->io_ref)) 277 ca = NULL; 278 rcu_read_unlock(); 279 280 if (ca && 281 (ca->mi.state == BCH_MEMBER_STATE_rw || 282 (ca->mi.state == BCH_MEMBER_STATE_ro && rw == READ))) 283 return ca; 284 285 if (ca) 286 percpu_ref_put(&ca->io_ref); 287 return NULL; 288 } 289 290 /* XXX kill, move to struct bch_fs */ 291 static inline struct bch_devs_mask bch2_online_devs(struct bch_fs *c) 292 { 293 struct bch_devs_mask devs; 294 295 memset(&devs, 0, sizeof(devs)); 296 for_each_online_member(c, ca) 297 __set_bit(ca->dev_idx, devs.d); 298 return devs; 299 } 300 301 extern const struct bch_sb_field_ops bch_sb_field_ops_members_v1; 302 extern const struct bch_sb_field_ops bch_sb_field_ops_members_v2; 303 304 static inline bool bch2_member_alive(struct bch_member *m) 305 { 306 return !bch2_is_zero(&m->uuid, sizeof(m->uuid)); 307 } 308 309 static inline bool bch2_member_exists(struct bch_sb *sb, unsigned dev) 310 { 311 if (dev < sb->nr_devices) { 312 struct bch_member m = bch2_sb_member_get(sb, dev); 313 return bch2_member_alive(&m); 314 } 315 return false; 316 } 317 318 unsigned bch2_sb_nr_devices(const struct bch_sb *); 319 320 static inline struct bch_member_cpu bch2_mi_to_cpu(struct bch_member *mi) 321 { 322 return (struct bch_member_cpu) { 323 .nbuckets = le64_to_cpu(mi->nbuckets), 324 .nbuckets_minus_first = le64_to_cpu(mi->nbuckets) - 325 le16_to_cpu(mi->first_bucket), 326 .first_bucket = le16_to_cpu(mi->first_bucket), 327 .bucket_size = le16_to_cpu(mi->bucket_size), 328 .group = BCH_MEMBER_GROUP(mi), 329 .state = BCH_MEMBER_STATE(mi), 330 .discard = BCH_MEMBER_DISCARD(mi), 331 .data_allowed = BCH_MEMBER_DATA_ALLOWED(mi), 332 .durability = BCH_MEMBER_DURABILITY(mi) 333 ? BCH_MEMBER_DURABILITY(mi) - 1 334 : 1, 335 .freespace_initialized = BCH_MEMBER_FREESPACE_INITIALIZED(mi), 336 .valid = bch2_member_alive(mi), 337 .btree_bitmap_shift = mi->btree_bitmap_shift, 338 .btree_allocated_bitmap = le64_to_cpu(mi->btree_allocated_bitmap), 339 }; 340 } 341 342 void bch2_sb_members_from_cpu(struct bch_fs *); 343 344 void bch2_dev_io_errors_to_text(struct printbuf *, struct bch_dev *); 345 void bch2_dev_errors_reset(struct bch_dev *); 346 347 static inline bool bch2_dev_btree_bitmap_marked_sectors(struct bch_dev *ca, u64 start, unsigned sectors) 348 { 349 u64 end = start + sectors; 350 351 if (end > 64ULL << ca->mi.btree_bitmap_shift) 352 return false; 353 354 for (unsigned bit = start >> ca->mi.btree_bitmap_shift; 355 (u64) bit << ca->mi.btree_bitmap_shift < end; 356 bit++) 357 if (!(ca->mi.btree_allocated_bitmap & BIT_ULL(bit))) 358 return false; 359 return true; 360 } 361 362 bool bch2_dev_btree_bitmap_marked(struct bch_fs *, struct bkey_s_c); 363 void bch2_dev_btree_bitmap_mark(struct bch_fs *, struct bkey_s_c); 364 365 int bch2_sb_member_alloc(struct bch_fs *); 366 367 #endif /* _BCACHEFS_SB_MEMBERS_H */ 368