1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _BCACHEFS_SB_MEMBERS_H 3 #define _BCACHEFS_SB_MEMBERS_H 4 5 #include "darray.h" 6 7 extern char * const bch2_member_error_strs[]; 8 9 static inline struct bch_member * 10 __bch2_members_v2_get_mut(struct bch_sb_field_members_v2 *mi, unsigned i) 11 { 12 return (void *) mi->_members + (i * le16_to_cpu(mi->member_bytes)); 13 } 14 15 int bch2_sb_members_v2_init(struct bch_fs *c); 16 int bch2_sb_members_cpy_v2_v1(struct bch_sb_handle *disk_sb); 17 struct bch_member *bch2_members_v2_get_mut(struct bch_sb *sb, int i); 18 struct bch_member bch2_sb_member_get(struct bch_sb *sb, int i); 19 20 static inline bool bch2_dev_is_online(struct bch_dev *ca) 21 { 22 return !percpu_ref_is_zero(&ca->io_ref); 23 } 24 25 static inline bool bch2_dev_is_readable(struct bch_dev *ca) 26 { 27 return bch2_dev_is_online(ca) && 28 ca->mi.state != BCH_MEMBER_STATE_failed; 29 } 30 31 static inline bool bch2_dev_get_ioref(struct bch_dev *ca, int rw) 32 { 33 if (!percpu_ref_tryget(&ca->io_ref)) 34 return false; 35 36 if (ca->mi.state == BCH_MEMBER_STATE_rw || 37 (ca->mi.state == BCH_MEMBER_STATE_ro && rw == READ)) 38 return true; 39 40 percpu_ref_put(&ca->io_ref); 41 return false; 42 } 43 44 static inline unsigned dev_mask_nr(const struct bch_devs_mask *devs) 45 { 46 return bitmap_weight(devs->d, BCH_SB_MEMBERS_MAX); 47 } 48 49 static inline bool bch2_dev_list_has_dev(struct bch_devs_list devs, 50 unsigned dev) 51 { 52 darray_for_each(devs, i) 53 if (*i == dev) 54 return true; 55 return false; 56 } 57 58 static inline void bch2_dev_list_drop_dev(struct bch_devs_list *devs, 59 unsigned dev) 60 { 61 darray_for_each(*devs, i) 62 if (*i == dev) { 63 darray_remove_item(devs, i); 64 return; 65 } 66 } 67 68 static inline void bch2_dev_list_add_dev(struct bch_devs_list *devs, 69 unsigned dev) 70 { 71 if (!bch2_dev_list_has_dev(*devs, dev)) { 72 BUG_ON(devs->nr >= ARRAY_SIZE(devs->data)); 73 devs->data[devs->nr++] = dev; 74 } 75 } 76 77 static inline struct bch_devs_list bch2_dev_list_single(unsigned dev) 78 { 79 return (struct bch_devs_list) { .nr = 1, .data[0] = dev }; 80 } 81 82 static inline struct bch_dev *__bch2_next_dev_idx(struct bch_fs *c, unsigned idx, 83 const struct bch_devs_mask *mask) 84 { 85 struct bch_dev *ca = NULL; 86 87 while ((idx = mask 88 ? find_next_bit(mask->d, c->sb.nr_devices, idx) 89 : idx) < c->sb.nr_devices && 90 !(ca = rcu_dereference_check(c->devs[idx], 91 lockdep_is_held(&c->state_lock)))) 92 idx++; 93 94 return ca; 95 } 96 97 static inline struct bch_dev *__bch2_next_dev(struct bch_fs *c, struct bch_dev *ca, 98 const struct bch_devs_mask *mask) 99 { 100 return __bch2_next_dev_idx(c, ca ? ca->dev_idx + 1 : 0, mask); 101 } 102 103 #define for_each_member_device_rcu(_c, _ca, _mask) \ 104 for (struct bch_dev *_ca = NULL; \ 105 (_ca = __bch2_next_dev((_c), _ca, (_mask)));) 106 107 static inline struct bch_dev *bch2_get_next_dev(struct bch_fs *c, struct bch_dev *ca) 108 { 109 if (ca) 110 percpu_ref_put(&ca->ref); 111 112 rcu_read_lock(); 113 if ((ca = __bch2_next_dev(c, ca, NULL))) 114 percpu_ref_get(&ca->ref); 115 rcu_read_unlock(); 116 117 return ca; 118 } 119 120 /* 121 * If you break early, you must drop your ref on the current device 122 */ 123 #define __for_each_member_device(_c, _ca) \ 124 for (; (_ca = bch2_get_next_dev(_c, _ca));) 125 126 #define for_each_member_device(_c, _ca) \ 127 for (struct bch_dev *_ca = NULL; \ 128 (_ca = bch2_get_next_dev(_c, _ca));) 129 130 static inline struct bch_dev *bch2_get_next_online_dev(struct bch_fs *c, 131 struct bch_dev *ca, 132 unsigned state_mask) 133 { 134 if (ca) 135 percpu_ref_put(&ca->io_ref); 136 137 rcu_read_lock(); 138 while ((ca = __bch2_next_dev(c, ca, NULL)) && 139 (!((1 << ca->mi.state) & state_mask) || 140 !percpu_ref_tryget(&ca->io_ref))) 141 ; 142 rcu_read_unlock(); 143 144 return ca; 145 } 146 147 #define __for_each_online_member(_c, _ca, state_mask) \ 148 for (struct bch_dev *_ca = NULL; \ 149 (_ca = bch2_get_next_online_dev(_c, _ca, state_mask));) 150 151 #define for_each_online_member(c, ca) \ 152 __for_each_online_member(c, ca, ~0) 153 154 #define for_each_rw_member(c, ca) \ 155 __for_each_online_member(c, ca, BIT(BCH_MEMBER_STATE_rw)) 156 157 #define for_each_readable_member(c, ca) \ 158 __for_each_online_member(c, ca, BIT( BCH_MEMBER_STATE_rw)|BIT(BCH_MEMBER_STATE_ro)) 159 160 /* 161 * If a key exists that references a device, the device won't be going away and 162 * we can omit rcu_read_lock(): 163 */ 164 static inline struct bch_dev *bch_dev_bkey_exists(const struct bch_fs *c, unsigned idx) 165 { 166 EBUG_ON(idx >= c->sb.nr_devices || !c->devs[idx]); 167 168 return rcu_dereference_check(c->devs[idx], 1); 169 } 170 171 static inline struct bch_dev *bch_dev_locked(struct bch_fs *c, unsigned idx) 172 { 173 EBUG_ON(idx >= c->sb.nr_devices || !c->devs[idx]); 174 175 return rcu_dereference_protected(c->devs[idx], 176 lockdep_is_held(&c->sb_lock) || 177 lockdep_is_held(&c->state_lock)); 178 } 179 180 /* XXX kill, move to struct bch_fs */ 181 static inline struct bch_devs_mask bch2_online_devs(struct bch_fs *c) 182 { 183 struct bch_devs_mask devs; 184 185 memset(&devs, 0, sizeof(devs)); 186 for_each_online_member(c, ca) 187 __set_bit(ca->dev_idx, devs.d); 188 return devs; 189 } 190 191 extern const struct bch_sb_field_ops bch_sb_field_ops_members_v1; 192 extern const struct bch_sb_field_ops bch_sb_field_ops_members_v2; 193 194 static inline bool bch2_member_exists(struct bch_member *m) 195 { 196 return !bch2_is_zero(&m->uuid, sizeof(m->uuid)); 197 } 198 199 static inline bool bch2_dev_exists(struct bch_sb *sb, unsigned dev) 200 { 201 if (dev < sb->nr_devices) { 202 struct bch_member m = bch2_sb_member_get(sb, dev); 203 return bch2_member_exists(&m); 204 } 205 return false; 206 } 207 208 static inline struct bch_member_cpu bch2_mi_to_cpu(struct bch_member *mi) 209 { 210 return (struct bch_member_cpu) { 211 .nbuckets = le64_to_cpu(mi->nbuckets), 212 .first_bucket = le16_to_cpu(mi->first_bucket), 213 .bucket_size = le16_to_cpu(mi->bucket_size), 214 .group = BCH_MEMBER_GROUP(mi), 215 .state = BCH_MEMBER_STATE(mi), 216 .discard = BCH_MEMBER_DISCARD(mi), 217 .data_allowed = BCH_MEMBER_DATA_ALLOWED(mi), 218 .durability = BCH_MEMBER_DURABILITY(mi) 219 ? BCH_MEMBER_DURABILITY(mi) - 1 220 : 1, 221 .freespace_initialized = BCH_MEMBER_FREESPACE_INITIALIZED(mi), 222 .valid = bch2_member_exists(mi), 223 }; 224 } 225 226 void bch2_sb_members_from_cpu(struct bch_fs *); 227 228 void bch2_dev_io_errors_to_text(struct printbuf *, struct bch_dev *); 229 void bch2_dev_errors_reset(struct bch_dev *); 230 231 #endif /* _BCACHEFS_SB_MEMBERS_H */ 232