1 // SPDX-License-Identifier: GPL-2.0 2 3 #include "bcachefs.h" 4 #include "disk_groups.h" 5 #include "opts.h" 6 #include "replicas.h" 7 #include "sb-members.h" 8 #include "super-io.h" 9 10 #define x(t, n, ...) [n] = #t, 11 static const char * const bch2_iops_measurements[] = { 12 BCH_IOPS_MEASUREMENTS() 13 NULL 14 }; 15 16 char * const bch2_member_error_strs[] = { 17 BCH_MEMBER_ERROR_TYPES() 18 NULL 19 }; 20 #undef x 21 22 /* Code for bch_sb_field_members_v1: */ 23 24 struct bch_member *bch2_members_v2_get_mut(struct bch_sb *sb, int i) 25 { 26 return __bch2_members_v2_get_mut(bch2_sb_field_get(sb, members_v2), i); 27 } 28 29 static struct bch_member members_v2_get(struct bch_sb_field_members_v2 *mi, int i) 30 { 31 struct bch_member ret, *p = __bch2_members_v2_get_mut(mi, i); 32 memset(&ret, 0, sizeof(ret)); 33 memcpy(&ret, p, min_t(size_t, le16_to_cpu(mi->member_bytes), sizeof(ret))); 34 return ret; 35 } 36 37 static struct bch_member *members_v1_get_mut(struct bch_sb_field_members_v1 *mi, int i) 38 { 39 return (void *) mi->_members + (i * BCH_MEMBER_V1_BYTES); 40 } 41 42 static struct bch_member members_v1_get(struct bch_sb_field_members_v1 *mi, int i) 43 { 44 struct bch_member ret, *p = members_v1_get_mut(mi, i); 45 memset(&ret, 0, sizeof(ret)); 46 memcpy(&ret, p, min_t(size_t, BCH_MEMBER_V1_BYTES, sizeof(ret))); 47 return ret; 48 } 49 50 struct bch_member bch2_sb_member_get(struct bch_sb *sb, int i) 51 { 52 struct bch_sb_field_members_v2 *mi2 = bch2_sb_field_get(sb, members_v2); 53 if (mi2) 54 return members_v2_get(mi2, i); 55 struct bch_sb_field_members_v1 *mi1 = bch2_sb_field_get(sb, members_v1); 56 return members_v1_get(mi1, i); 57 } 58 59 static int sb_members_v2_resize_entries(struct bch_fs *c) 60 { 61 struct bch_sb_field_members_v2 *mi = bch2_sb_field_get(c->disk_sb.sb, members_v2); 62 63 if (le16_to_cpu(mi->member_bytes) < sizeof(struct bch_member)) { 64 unsigned u64s = DIV_ROUND_UP((sizeof(*mi) + sizeof(mi->_members[0]) * 65 c->disk_sb.sb->nr_devices), 8); 66 67 mi = bch2_sb_field_resize(&c->disk_sb, members_v2, u64s); 68 if (!mi) 69 return -BCH_ERR_ENOSPC_sb_members_v2; 70 71 for (int i = c->disk_sb.sb->nr_devices - 1; i >= 0; --i) { 72 void *dst = (void *) mi->_members + (i * sizeof(struct bch_member)); 73 memmove(dst, __bch2_members_v2_get_mut(mi, i), le16_to_cpu(mi->member_bytes)); 74 memset(dst + le16_to_cpu(mi->member_bytes), 75 0, (sizeof(struct bch_member) - le16_to_cpu(mi->member_bytes))); 76 } 77 mi->member_bytes = cpu_to_le16(sizeof(struct bch_member)); 78 } 79 return 0; 80 } 81 82 int bch2_sb_members_v2_init(struct bch_fs *c) 83 { 84 struct bch_sb_field_members_v1 *mi1; 85 struct bch_sb_field_members_v2 *mi2; 86 87 if (!bch2_sb_field_get(c->disk_sb.sb, members_v2)) { 88 mi2 = bch2_sb_field_resize(&c->disk_sb, members_v2, 89 DIV_ROUND_UP(sizeof(*mi2) + 90 sizeof(struct bch_member) * c->sb.nr_devices, 91 sizeof(u64))); 92 mi1 = bch2_sb_field_get(c->disk_sb.sb, members_v1); 93 memcpy(&mi2->_members[0], &mi1->_members[0], 94 BCH_MEMBER_V1_BYTES * c->sb.nr_devices); 95 memset(&mi2->pad[0], 0, sizeof(mi2->pad)); 96 mi2->member_bytes = cpu_to_le16(BCH_MEMBER_V1_BYTES); 97 } 98 99 return sb_members_v2_resize_entries(c); 100 } 101 102 int bch2_sb_members_cpy_v2_v1(struct bch_sb_handle *disk_sb) 103 { 104 struct bch_sb_field_members_v1 *mi1; 105 struct bch_sb_field_members_v2 *mi2; 106 107 mi1 = bch2_sb_field_resize(disk_sb, members_v1, 108 DIV_ROUND_UP(sizeof(*mi1) + BCH_MEMBER_V1_BYTES * 109 disk_sb->sb->nr_devices, sizeof(u64))); 110 if (!mi1) 111 return -BCH_ERR_ENOSPC_sb_members; 112 113 mi2 = bch2_sb_field_get(disk_sb->sb, members_v2); 114 115 for (unsigned i = 0; i < disk_sb->sb->nr_devices; i++) 116 memcpy(members_v1_get_mut(mi1, i), __bch2_members_v2_get_mut(mi2, i), BCH_MEMBER_V1_BYTES); 117 118 return 0; 119 } 120 121 static int validate_member(struct printbuf *err, 122 struct bch_member m, 123 struct bch_sb *sb, 124 int i) 125 { 126 if (le64_to_cpu(m.nbuckets) > LONG_MAX) { 127 prt_printf(err, "device %u: too many buckets (got %llu, max %lu)", 128 i, le64_to_cpu(m.nbuckets), LONG_MAX); 129 return -BCH_ERR_invalid_sb_members; 130 } 131 132 if (le64_to_cpu(m.nbuckets) - 133 le16_to_cpu(m.first_bucket) < BCH_MIN_NR_NBUCKETS) { 134 prt_printf(err, "device %u: not enough buckets (got %llu, max %u)", 135 i, le64_to_cpu(m.nbuckets), BCH_MIN_NR_NBUCKETS); 136 return -BCH_ERR_invalid_sb_members; 137 } 138 139 if (le16_to_cpu(m.bucket_size) < 140 le16_to_cpu(sb->block_size)) { 141 prt_printf(err, "device %u: bucket size %u smaller than block size %u", 142 i, le16_to_cpu(m.bucket_size), le16_to_cpu(sb->block_size)); 143 return -BCH_ERR_invalid_sb_members; 144 } 145 146 if (le16_to_cpu(m.bucket_size) < 147 BCH_SB_BTREE_NODE_SIZE(sb)) { 148 prt_printf(err, "device %u: bucket size %u smaller than btree node size %llu", 149 i, le16_to_cpu(m.bucket_size), BCH_SB_BTREE_NODE_SIZE(sb)); 150 return -BCH_ERR_invalid_sb_members; 151 } 152 153 return 0; 154 } 155 156 static void member_to_text(struct printbuf *out, 157 struct bch_member m, 158 struct bch_sb_field_disk_groups *gi, 159 struct bch_sb *sb, 160 int i) 161 { 162 unsigned data_have = bch2_sb_dev_has_data(sb, i); 163 u64 bucket_size = le16_to_cpu(m.bucket_size); 164 u64 device_size = le64_to_cpu(m.nbuckets) * bucket_size; 165 166 if (!bch2_member_exists(&m)) 167 return; 168 169 prt_printf(out, "Device:"); 170 prt_tab(out); 171 prt_printf(out, "%u", i); 172 prt_newline(out); 173 174 printbuf_indent_add(out, 2); 175 176 prt_printf(out, "Label:"); 177 prt_tab(out); 178 if (BCH_MEMBER_GROUP(&m)) { 179 unsigned idx = BCH_MEMBER_GROUP(&m) - 1; 180 181 if (idx < disk_groups_nr(gi)) 182 prt_printf(out, "%s (%u)", 183 gi->entries[idx].label, idx); 184 else 185 prt_printf(out, "(bad disk labels section)"); 186 } else { 187 prt_printf(out, "(none)"); 188 } 189 prt_newline(out); 190 191 prt_printf(out, "UUID:"); 192 prt_tab(out); 193 pr_uuid(out, m.uuid.b); 194 prt_newline(out); 195 196 prt_printf(out, "Size:"); 197 prt_tab(out); 198 prt_units_u64(out, device_size << 9); 199 prt_newline(out); 200 201 for (unsigned i = 0; i < BCH_MEMBER_ERROR_NR; i++) { 202 prt_printf(out, "%s errors:", bch2_member_error_strs[i]); 203 prt_tab(out); 204 prt_u64(out, le64_to_cpu(m.errors[i])); 205 prt_newline(out); 206 } 207 208 for (unsigned i = 0; i < BCH_IOPS_NR; i++) { 209 prt_printf(out, "%s iops:", bch2_iops_measurements[i]); 210 prt_tab(out); 211 prt_printf(out, "%u", le32_to_cpu(m.iops[i])); 212 prt_newline(out); 213 } 214 215 prt_printf(out, "Bucket size:"); 216 prt_tab(out); 217 prt_units_u64(out, bucket_size << 9); 218 prt_newline(out); 219 220 prt_printf(out, "First bucket:"); 221 prt_tab(out); 222 prt_printf(out, "%u", le16_to_cpu(m.first_bucket)); 223 prt_newline(out); 224 225 prt_printf(out, "Buckets:"); 226 prt_tab(out); 227 prt_printf(out, "%llu", le64_to_cpu(m.nbuckets)); 228 prt_newline(out); 229 230 prt_printf(out, "Last mount:"); 231 prt_tab(out); 232 if (m.last_mount) 233 bch2_prt_datetime(out, le64_to_cpu(m.last_mount)); 234 else 235 prt_printf(out, "(never)"); 236 prt_newline(out); 237 238 prt_printf(out, "State:"); 239 prt_tab(out); 240 prt_printf(out, "%s", 241 BCH_MEMBER_STATE(&m) < BCH_MEMBER_STATE_NR 242 ? bch2_member_states[BCH_MEMBER_STATE(&m)] 243 : "unknown"); 244 prt_newline(out); 245 246 prt_printf(out, "Data allowed:"); 247 prt_tab(out); 248 if (BCH_MEMBER_DATA_ALLOWED(&m)) 249 prt_bitflags(out, bch2_data_types, BCH_MEMBER_DATA_ALLOWED(&m)); 250 else 251 prt_printf(out, "(none)"); 252 prt_newline(out); 253 254 prt_printf(out, "Has data:"); 255 prt_tab(out); 256 if (data_have) 257 prt_bitflags(out, bch2_data_types, data_have); 258 else 259 prt_printf(out, "(none)"); 260 prt_newline(out); 261 262 prt_printf(out, "Discard:"); 263 prt_tab(out); 264 prt_printf(out, "%llu", BCH_MEMBER_DISCARD(&m)); 265 prt_newline(out); 266 267 prt_printf(out, "Freespace initialized:"); 268 prt_tab(out); 269 prt_printf(out, "%llu", BCH_MEMBER_FREESPACE_INITIALIZED(&m)); 270 prt_newline(out); 271 272 printbuf_indent_sub(out, 2); 273 } 274 275 static int bch2_sb_members_v1_validate(struct bch_sb *sb, 276 struct bch_sb_field *f, 277 struct printbuf *err) 278 { 279 struct bch_sb_field_members_v1 *mi = field_to_type(f, members_v1); 280 unsigned i; 281 282 if ((void *) members_v1_get_mut(mi, sb->nr_devices) > vstruct_end(&mi->field)) { 283 prt_printf(err, "too many devices for section size"); 284 return -BCH_ERR_invalid_sb_members; 285 } 286 287 for (i = 0; i < sb->nr_devices; i++) { 288 struct bch_member m = members_v1_get(mi, i); 289 290 int ret = validate_member(err, m, sb, i); 291 if (ret) 292 return ret; 293 } 294 295 return 0; 296 } 297 298 static void bch2_sb_members_v1_to_text(struct printbuf *out, struct bch_sb *sb, 299 struct bch_sb_field *f) 300 { 301 struct bch_sb_field_members_v1 *mi = field_to_type(f, members_v1); 302 struct bch_sb_field_disk_groups *gi = bch2_sb_field_get(sb, disk_groups); 303 unsigned i; 304 305 for (i = 0; i < sb->nr_devices; i++) 306 member_to_text(out, members_v1_get(mi, i), gi, sb, i); 307 } 308 309 const struct bch_sb_field_ops bch_sb_field_ops_members_v1 = { 310 .validate = bch2_sb_members_v1_validate, 311 .to_text = bch2_sb_members_v1_to_text, 312 }; 313 314 static void bch2_sb_members_v2_to_text(struct printbuf *out, struct bch_sb *sb, 315 struct bch_sb_field *f) 316 { 317 struct bch_sb_field_members_v2 *mi = field_to_type(f, members_v2); 318 struct bch_sb_field_disk_groups *gi = bch2_sb_field_get(sb, disk_groups); 319 unsigned i; 320 321 for (i = 0; i < sb->nr_devices; i++) 322 member_to_text(out, members_v2_get(mi, i), gi, sb, i); 323 } 324 325 static int bch2_sb_members_v2_validate(struct bch_sb *sb, 326 struct bch_sb_field *f, 327 struct printbuf *err) 328 { 329 struct bch_sb_field_members_v2 *mi = field_to_type(f, members_v2); 330 size_t mi_bytes = (void *) __bch2_members_v2_get_mut(mi, sb->nr_devices) - 331 (void *) mi; 332 333 if (mi_bytes > vstruct_bytes(&mi->field)) { 334 prt_printf(err, "section too small (%zu > %zu)", 335 mi_bytes, vstruct_bytes(&mi->field)); 336 return -BCH_ERR_invalid_sb_members; 337 } 338 339 for (unsigned i = 0; i < sb->nr_devices; i++) { 340 int ret = validate_member(err, members_v2_get(mi, i), sb, i); 341 if (ret) 342 return ret; 343 } 344 345 return 0; 346 } 347 348 const struct bch_sb_field_ops bch_sb_field_ops_members_v2 = { 349 .validate = bch2_sb_members_v2_validate, 350 .to_text = bch2_sb_members_v2_to_text, 351 }; 352 353 void bch2_sb_members_from_cpu(struct bch_fs *c) 354 { 355 struct bch_sb_field_members_v2 *mi = bch2_sb_field_get(c->disk_sb.sb, members_v2); 356 struct bch_dev *ca; 357 unsigned i, e; 358 359 rcu_read_lock(); 360 for_each_member_device_rcu(ca, c, i, NULL) { 361 struct bch_member *m = __bch2_members_v2_get_mut(mi, i); 362 363 for (e = 0; e < BCH_MEMBER_ERROR_NR; e++) 364 m->errors[e] = cpu_to_le64(atomic64_read(&ca->errors[e])); 365 } 366 rcu_read_unlock(); 367 } 368 369 void bch2_dev_io_errors_to_text(struct printbuf *out, struct bch_dev *ca) 370 { 371 struct bch_fs *c = ca->fs; 372 struct bch_member m; 373 374 mutex_lock(&ca->fs->sb_lock); 375 m = bch2_sb_member_get(c->disk_sb.sb, ca->dev_idx); 376 mutex_unlock(&ca->fs->sb_lock); 377 378 printbuf_tabstop_push(out, 12); 379 380 prt_str(out, "IO errors since filesystem creation"); 381 prt_newline(out); 382 383 printbuf_indent_add(out, 2); 384 for (unsigned i = 0; i < BCH_MEMBER_ERROR_NR; i++) { 385 prt_printf(out, "%s:", bch2_member_error_strs[i]); 386 prt_tab(out); 387 prt_u64(out, atomic64_read(&ca->errors[i])); 388 prt_newline(out); 389 } 390 printbuf_indent_sub(out, 2); 391 392 prt_str(out, "IO errors since "); 393 bch2_pr_time_units(out, (ktime_get_real_seconds() - le64_to_cpu(m.errors_reset_time)) * NSEC_PER_SEC); 394 prt_str(out, " ago"); 395 prt_newline(out); 396 397 printbuf_indent_add(out, 2); 398 for (unsigned i = 0; i < BCH_MEMBER_ERROR_NR; i++) { 399 prt_printf(out, "%s:", bch2_member_error_strs[i]); 400 prt_tab(out); 401 prt_u64(out, atomic64_read(&ca->errors[i]) - le64_to_cpu(m.errors_at_reset[i])); 402 prt_newline(out); 403 } 404 printbuf_indent_sub(out, 2); 405 } 406 407 void bch2_dev_errors_reset(struct bch_dev *ca) 408 { 409 struct bch_fs *c = ca->fs; 410 struct bch_member *m; 411 412 mutex_lock(&c->sb_lock); 413 m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx); 414 for (unsigned i = 0; i < ARRAY_SIZE(m->errors_at_reset); i++) 415 m->errors_at_reset[i] = cpu_to_le64(atomic64_read(&ca->errors[i])); 416 m->errors_reset_time = ktime_get_real_seconds(); 417 418 bch2_write_super(c); 419 mutex_unlock(&c->sb_lock); 420 } 421