1 /* SPDX-License-Identifier: GPL-2.0 */ 2 #ifndef _BCACHEFS_SB_MEMBERS_H 3 #define _BCACHEFS_SB_MEMBERS_H 4 5 extern char * const bch2_member_error_strs[]; 6 7 static inline struct bch_member * 8 __bch2_members_v2_get_mut(struct bch_sb_field_members_v2 *mi, unsigned i) 9 { 10 return (void *) mi->_members + (i * le16_to_cpu(mi->member_bytes)); 11 } 12 13 int bch2_sb_members_v2_init(struct bch_fs *c); 14 int bch2_sb_members_cpy_v2_v1(struct bch_sb_handle *disk_sb); 15 struct bch_member *bch2_members_v2_get_mut(struct bch_sb *sb, int i); 16 struct bch_member bch2_sb_member_get(struct bch_sb *sb, int i); 17 18 static inline bool bch2_dev_is_online(struct bch_dev *ca) 19 { 20 return !percpu_ref_is_zero(&ca->io_ref); 21 } 22 23 static inline bool bch2_dev_is_readable(struct bch_dev *ca) 24 { 25 return bch2_dev_is_online(ca) && 26 ca->mi.state != BCH_MEMBER_STATE_failed; 27 } 28 29 static inline bool bch2_dev_get_ioref(struct bch_dev *ca, int rw) 30 { 31 if (!percpu_ref_tryget(&ca->io_ref)) 32 return false; 33 34 if (ca->mi.state == BCH_MEMBER_STATE_rw || 35 (ca->mi.state == BCH_MEMBER_STATE_ro && rw == READ)) 36 return true; 37 38 percpu_ref_put(&ca->io_ref); 39 return false; 40 } 41 42 static inline unsigned dev_mask_nr(const struct bch_devs_mask *devs) 43 { 44 return bitmap_weight(devs->d, BCH_SB_MEMBERS_MAX); 45 } 46 47 static inline bool bch2_dev_list_has_dev(struct bch_devs_list devs, 48 unsigned dev) 49 { 50 unsigned i; 51 52 for (i = 0; i < devs.nr; i++) 53 if (devs.devs[i] == dev) 54 return true; 55 56 return false; 57 } 58 59 static inline void bch2_dev_list_drop_dev(struct bch_devs_list *devs, 60 unsigned dev) 61 { 62 unsigned i; 63 64 for (i = 0; i < devs->nr; i++) 65 if (devs->devs[i] == dev) { 66 array_remove_item(devs->devs, devs->nr, i); 67 return; 68 } 69 } 70 71 static inline void bch2_dev_list_add_dev(struct bch_devs_list *devs, 72 unsigned dev) 73 { 74 if (!bch2_dev_list_has_dev(*devs, dev)) { 75 BUG_ON(devs->nr >= ARRAY_SIZE(devs->devs)); 76 devs->devs[devs->nr++] = dev; 77 } 78 } 79 80 static inline struct bch_devs_list bch2_dev_list_single(unsigned dev) 81 { 82 return (struct bch_devs_list) { .nr = 1, .devs[0] = dev }; 83 } 84 85 static inline struct bch_dev *__bch2_next_dev(struct bch_fs *c, unsigned *iter, 86 const struct bch_devs_mask *mask) 87 { 88 struct bch_dev *ca = NULL; 89 90 while ((*iter = mask 91 ? find_next_bit(mask->d, c->sb.nr_devices, *iter) 92 : *iter) < c->sb.nr_devices && 93 !(ca = rcu_dereference_check(c->devs[*iter], 94 lockdep_is_held(&c->state_lock)))) 95 (*iter)++; 96 97 return ca; 98 } 99 100 #define for_each_member_device_rcu(ca, c, iter, mask) \ 101 for ((iter) = 0; ((ca) = __bch2_next_dev((c), &(iter), mask)); (iter)++) 102 103 static inline struct bch_dev *bch2_get_next_dev(struct bch_fs *c, unsigned *iter) 104 { 105 struct bch_dev *ca; 106 107 rcu_read_lock(); 108 if ((ca = __bch2_next_dev(c, iter, NULL))) 109 percpu_ref_get(&ca->ref); 110 rcu_read_unlock(); 111 112 return ca; 113 } 114 115 /* 116 * If you break early, you must drop your ref on the current device 117 */ 118 #define for_each_member_device(ca, c, iter) \ 119 for ((iter) = 0; \ 120 (ca = bch2_get_next_dev(c, &(iter))); \ 121 percpu_ref_put(&ca->ref), (iter)++) 122 123 static inline struct bch_dev *bch2_get_next_online_dev(struct bch_fs *c, 124 unsigned *iter, 125 int state_mask) 126 { 127 struct bch_dev *ca; 128 129 rcu_read_lock(); 130 while ((ca = __bch2_next_dev(c, iter, NULL)) && 131 (!((1 << ca->mi.state) & state_mask) || 132 !percpu_ref_tryget(&ca->io_ref))) 133 (*iter)++; 134 rcu_read_unlock(); 135 136 return ca; 137 } 138 139 #define __for_each_online_member(ca, c, iter, state_mask) \ 140 for ((iter) = 0; \ 141 (ca = bch2_get_next_online_dev(c, &(iter), state_mask)); \ 142 percpu_ref_put(&ca->io_ref), (iter)++) 143 144 #define for_each_online_member(ca, c, iter) \ 145 __for_each_online_member(ca, c, iter, ~0) 146 147 #define for_each_rw_member(ca, c, iter) \ 148 __for_each_online_member(ca, c, iter, 1 << BCH_MEMBER_STATE_rw) 149 150 #define for_each_readable_member(ca, c, iter) \ 151 __for_each_online_member(ca, c, iter, \ 152 (1 << BCH_MEMBER_STATE_rw)|(1 << BCH_MEMBER_STATE_ro)) 153 154 /* 155 * If a key exists that references a device, the device won't be going away and 156 * we can omit rcu_read_lock(): 157 */ 158 static inline struct bch_dev *bch_dev_bkey_exists(const struct bch_fs *c, unsigned idx) 159 { 160 EBUG_ON(idx >= c->sb.nr_devices || !c->devs[idx]); 161 162 return rcu_dereference_check(c->devs[idx], 1); 163 } 164 165 static inline struct bch_dev *bch_dev_locked(struct bch_fs *c, unsigned idx) 166 { 167 EBUG_ON(idx >= c->sb.nr_devices || !c->devs[idx]); 168 169 return rcu_dereference_protected(c->devs[idx], 170 lockdep_is_held(&c->sb_lock) || 171 lockdep_is_held(&c->state_lock)); 172 } 173 174 /* XXX kill, move to struct bch_fs */ 175 static inline struct bch_devs_mask bch2_online_devs(struct bch_fs *c) 176 { 177 struct bch_devs_mask devs; 178 struct bch_dev *ca; 179 unsigned i; 180 181 memset(&devs, 0, sizeof(devs)); 182 for_each_online_member(ca, c, i) 183 __set_bit(ca->dev_idx, devs.d); 184 return devs; 185 } 186 187 extern const struct bch_sb_field_ops bch_sb_field_ops_members_v1; 188 extern const struct bch_sb_field_ops bch_sb_field_ops_members_v2; 189 190 static inline bool bch2_member_exists(struct bch_member *m) 191 { 192 return !bch2_is_zero(&m->uuid, sizeof(m->uuid)); 193 } 194 195 static inline bool bch2_dev_exists(struct bch_sb *sb, unsigned dev) 196 { 197 if (dev < sb->nr_devices) { 198 struct bch_member m = bch2_sb_member_get(sb, dev); 199 return bch2_member_exists(&m); 200 } 201 return false; 202 } 203 204 static inline struct bch_member_cpu bch2_mi_to_cpu(struct bch_member *mi) 205 { 206 return (struct bch_member_cpu) { 207 .nbuckets = le64_to_cpu(mi->nbuckets), 208 .first_bucket = le16_to_cpu(mi->first_bucket), 209 .bucket_size = le16_to_cpu(mi->bucket_size), 210 .group = BCH_MEMBER_GROUP(mi), 211 .state = BCH_MEMBER_STATE(mi), 212 .discard = BCH_MEMBER_DISCARD(mi), 213 .data_allowed = BCH_MEMBER_DATA_ALLOWED(mi), 214 .durability = BCH_MEMBER_DURABILITY(mi) 215 ? BCH_MEMBER_DURABILITY(mi) - 1 216 : 1, 217 .freespace_initialized = BCH_MEMBER_FREESPACE_INITIALIZED(mi), 218 .valid = bch2_member_exists(mi), 219 }; 220 } 221 222 void bch2_sb_members_from_cpu(struct bch_fs *); 223 224 void bch2_dev_io_errors_to_text(struct printbuf *, struct bch_dev *); 225 void bch2_dev_errors_reset(struct bch_dev *); 226 227 #endif /* _BCACHEFS_SB_MEMBERS_H */ 228