1 // SPDX-License-Identifier: GPL-2.0 2 #ifndef NO_BCACHEFS_CHARDEV 3 4 #include "bcachefs.h" 5 #include "bcachefs_ioctl.h" 6 #include "buckets.h" 7 #include "chardev.h" 8 #include "journal.h" 9 #include "move.h" 10 #include "recovery_passes.h" 11 #include "replicas.h" 12 #include "super.h" 13 #include "super-io.h" 14 #include "thread_with_file.h" 15 16 #include <linux/cdev.h> 17 #include <linux/device.h> 18 #include <linux/fs.h> 19 #include <linux/ioctl.h> 20 #include <linux/major.h> 21 #include <linux/sched/task.h> 22 #include <linux/slab.h> 23 #include <linux/uaccess.h> 24 25 /* returns with ref on ca->ref */ 26 static struct bch_dev *bch2_device_lookup(struct bch_fs *c, u64 dev, 27 unsigned flags) 28 { 29 struct bch_dev *ca; 30 31 if (flags & BCH_BY_INDEX) { 32 if (dev >= c->sb.nr_devices) 33 return ERR_PTR(-EINVAL); 34 35 ca = bch2_dev_tryget_noerror(c, dev); 36 if (!ca) 37 return ERR_PTR(-EINVAL); 38 } else { 39 char *path; 40 41 path = strndup_user((const char __user *) 42 (unsigned long) dev, PATH_MAX); 43 if (IS_ERR(path)) 44 return ERR_CAST(path); 45 46 ca = bch2_dev_lookup(c, path); 47 kfree(path); 48 } 49 50 return ca; 51 } 52 53 #if 0 54 static long bch2_ioctl_assemble(struct bch_ioctl_assemble __user *user_arg) 55 { 56 struct bch_ioctl_assemble arg; 57 struct bch_fs *c; 58 u64 *user_devs = NULL; 59 char **devs = NULL; 60 unsigned i; 61 int ret = -EFAULT; 62 63 if (copy_from_user(&arg, user_arg, sizeof(arg))) 64 return -EFAULT; 65 66 if (arg.flags || arg.pad) 67 return -EINVAL; 68 69 user_devs = kmalloc_array(arg.nr_devs, sizeof(u64), GFP_KERNEL); 70 if (!user_devs) 71 return -ENOMEM; 72 73 devs = kcalloc(arg.nr_devs, sizeof(char *), GFP_KERNEL); 74 75 if (copy_from_user(user_devs, user_arg->devs, 76 sizeof(u64) * arg.nr_devs)) 77 goto err; 78 79 for (i = 0; i < arg.nr_devs; i++) { 80 devs[i] = strndup_user((const char __user *)(unsigned long) 81 user_devs[i], 82 PATH_MAX); 83 ret= PTR_ERR_OR_ZERO(devs[i]); 84 if (ret) 85 goto err; 86 } 87 88 c = bch2_fs_open(devs, arg.nr_devs, bch2_opts_empty()); 89 ret = PTR_ERR_OR_ZERO(c); 90 if (!ret) 91 closure_put(&c->cl); 92 err: 93 if (devs) 94 for (i = 0; i < arg.nr_devs; i++) 95 kfree(devs[i]); 96 kfree(devs); 97 return ret; 98 } 99 100 static long bch2_ioctl_incremental(struct bch_ioctl_incremental __user *user_arg) 101 { 102 struct bch_ioctl_incremental arg; 103 const char *err; 104 char *path; 105 106 if (copy_from_user(&arg, user_arg, sizeof(arg))) 107 return -EFAULT; 108 109 if (arg.flags || arg.pad) 110 return -EINVAL; 111 112 path = strndup_user((const char __user *)(unsigned long) arg.dev, PATH_MAX); 113 ret = PTR_ERR_OR_ZERO(path); 114 if (ret) 115 return ret; 116 117 err = bch2_fs_open_incremental(path); 118 kfree(path); 119 120 if (err) { 121 pr_err("Could not register bcachefs devices: %s", err); 122 return -EINVAL; 123 } 124 125 return 0; 126 } 127 #endif 128 129 struct fsck_thread { 130 struct thread_with_stdio thr; 131 struct bch_fs *c; 132 struct bch_opts opts; 133 }; 134 135 static void bch2_fsck_thread_exit(struct thread_with_stdio *_thr) 136 { 137 struct fsck_thread *thr = container_of(_thr, struct fsck_thread, thr); 138 kfree(thr); 139 } 140 141 static int bch2_fsck_offline_thread_fn(struct thread_with_stdio *stdio) 142 { 143 struct fsck_thread *thr = container_of(stdio, struct fsck_thread, thr); 144 struct bch_fs *c = thr->c; 145 146 int ret = PTR_ERR_OR_ZERO(c); 147 if (ret) 148 return ret; 149 150 ret = bch2_fs_start(thr->c); 151 if (ret) 152 goto err; 153 154 if (test_bit(BCH_FS_errors_fixed, &c->flags)) { 155 bch2_stdio_redirect_printf(&stdio->stdio, false, "%s: errors fixed\n", c->name); 156 ret |= 1; 157 } 158 if (test_bit(BCH_FS_error, &c->flags)) { 159 bch2_stdio_redirect_printf(&stdio->stdio, false, "%s: still has errors\n", c->name); 160 ret |= 4; 161 } 162 err: 163 bch2_fs_stop(c); 164 return ret; 165 } 166 167 static const struct thread_with_stdio_ops bch2_offline_fsck_ops = { 168 .exit = bch2_fsck_thread_exit, 169 .fn = bch2_fsck_offline_thread_fn, 170 }; 171 172 static long bch2_ioctl_fsck_offline(struct bch_ioctl_fsck_offline __user *user_arg) 173 { 174 struct bch_ioctl_fsck_offline arg; 175 struct fsck_thread *thr = NULL; 176 darray_str(devs) = {}; 177 long ret = 0; 178 179 if (copy_from_user(&arg, user_arg, sizeof(arg))) 180 return -EFAULT; 181 182 if (arg.flags) 183 return -EINVAL; 184 185 if (!capable(CAP_SYS_ADMIN)) 186 return -EPERM; 187 188 for (size_t i = 0; i < arg.nr_devs; i++) { 189 u64 dev_u64; 190 ret = copy_from_user_errcode(&dev_u64, &user_arg->devs[i], sizeof(u64)); 191 if (ret) 192 goto err; 193 194 char *dev_str = strndup_user((char __user *)(unsigned long) dev_u64, PATH_MAX); 195 ret = PTR_ERR_OR_ZERO(dev_str); 196 if (ret) 197 goto err; 198 199 ret = darray_push(&devs, dev_str); 200 if (ret) { 201 kfree(dev_str); 202 goto err; 203 } 204 } 205 206 thr = kzalloc(sizeof(*thr), GFP_KERNEL); 207 if (!thr) { 208 ret = -ENOMEM; 209 goto err; 210 } 211 212 thr->opts = bch2_opts_empty(); 213 214 if (arg.opts) { 215 char *optstr = strndup_user((char __user *)(unsigned long) arg.opts, 1 << 16); 216 217 ret = PTR_ERR_OR_ZERO(optstr) ?: 218 bch2_parse_mount_opts(NULL, &thr->opts, optstr); 219 kfree(optstr); 220 221 if (ret) 222 goto err; 223 } 224 225 opt_set(thr->opts, stdio, (u64)(unsigned long)&thr->thr.stdio); 226 227 /* We need request_key() to be called before we punt to kthread: */ 228 opt_set(thr->opts, nostart, true); 229 230 bch2_thread_with_stdio_init(&thr->thr, &bch2_offline_fsck_ops); 231 232 thr->c = bch2_fs_open(devs.data, arg.nr_devs, thr->opts); 233 234 if (!IS_ERR(thr->c) && 235 thr->c->opts.errors == BCH_ON_ERROR_panic) 236 thr->c->opts.errors = BCH_ON_ERROR_ro; 237 238 ret = __bch2_run_thread_with_stdio(&thr->thr); 239 out: 240 darray_for_each(devs, i) 241 kfree(*i); 242 darray_exit(&devs); 243 return ret; 244 err: 245 if (thr) 246 bch2_fsck_thread_exit(&thr->thr); 247 pr_err("ret %s", bch2_err_str(ret)); 248 goto out; 249 } 250 251 static long bch2_global_ioctl(unsigned cmd, void __user *arg) 252 { 253 long ret; 254 255 switch (cmd) { 256 #if 0 257 case BCH_IOCTL_ASSEMBLE: 258 return bch2_ioctl_assemble(arg); 259 case BCH_IOCTL_INCREMENTAL: 260 return bch2_ioctl_incremental(arg); 261 #endif 262 case BCH_IOCTL_FSCK_OFFLINE: { 263 ret = bch2_ioctl_fsck_offline(arg); 264 break; 265 } 266 default: 267 ret = -ENOTTY; 268 break; 269 } 270 271 if (ret < 0) 272 ret = bch2_err_class(ret); 273 return ret; 274 } 275 276 static long bch2_ioctl_query_uuid(struct bch_fs *c, 277 struct bch_ioctl_query_uuid __user *user_arg) 278 { 279 return copy_to_user_errcode(&user_arg->uuid, &c->sb.user_uuid, 280 sizeof(c->sb.user_uuid)); 281 } 282 283 #if 0 284 static long bch2_ioctl_start(struct bch_fs *c, struct bch_ioctl_start arg) 285 { 286 if (!capable(CAP_SYS_ADMIN)) 287 return -EPERM; 288 289 if (arg.flags || arg.pad) 290 return -EINVAL; 291 292 return bch2_fs_start(c); 293 } 294 295 static long bch2_ioctl_stop(struct bch_fs *c) 296 { 297 if (!capable(CAP_SYS_ADMIN)) 298 return -EPERM; 299 300 bch2_fs_stop(c); 301 return 0; 302 } 303 #endif 304 305 static long bch2_ioctl_disk_add(struct bch_fs *c, struct bch_ioctl_disk arg) 306 { 307 char *path; 308 int ret; 309 310 if (!capable(CAP_SYS_ADMIN)) 311 return -EPERM; 312 313 if (arg.flags || arg.pad) 314 return -EINVAL; 315 316 path = strndup_user((const char __user *)(unsigned long) arg.dev, PATH_MAX); 317 ret = PTR_ERR_OR_ZERO(path); 318 if (ret) 319 return ret; 320 321 ret = bch2_dev_add(c, path); 322 kfree(path); 323 324 return ret; 325 } 326 327 static long bch2_ioctl_disk_remove(struct bch_fs *c, struct bch_ioctl_disk arg) 328 { 329 struct bch_dev *ca; 330 331 if (!capable(CAP_SYS_ADMIN)) 332 return -EPERM; 333 334 if ((arg.flags & ~(BCH_FORCE_IF_DATA_LOST| 335 BCH_FORCE_IF_METADATA_LOST| 336 BCH_FORCE_IF_DEGRADED| 337 BCH_BY_INDEX)) || 338 arg.pad) 339 return -EINVAL; 340 341 ca = bch2_device_lookup(c, arg.dev, arg.flags); 342 if (IS_ERR(ca)) 343 return PTR_ERR(ca); 344 345 return bch2_dev_remove(c, ca, arg.flags); 346 } 347 348 static long bch2_ioctl_disk_online(struct bch_fs *c, struct bch_ioctl_disk arg) 349 { 350 char *path; 351 int ret; 352 353 if (!capable(CAP_SYS_ADMIN)) 354 return -EPERM; 355 356 if (arg.flags || arg.pad) 357 return -EINVAL; 358 359 path = strndup_user((const char __user *)(unsigned long) arg.dev, PATH_MAX); 360 ret = PTR_ERR_OR_ZERO(path); 361 if (ret) 362 return ret; 363 364 ret = bch2_dev_online(c, path); 365 kfree(path); 366 return ret; 367 } 368 369 static long bch2_ioctl_disk_offline(struct bch_fs *c, struct bch_ioctl_disk arg) 370 { 371 struct bch_dev *ca; 372 int ret; 373 374 if (!capable(CAP_SYS_ADMIN)) 375 return -EPERM; 376 377 if ((arg.flags & ~(BCH_FORCE_IF_DATA_LOST| 378 BCH_FORCE_IF_METADATA_LOST| 379 BCH_FORCE_IF_DEGRADED| 380 BCH_BY_INDEX)) || 381 arg.pad) 382 return -EINVAL; 383 384 ca = bch2_device_lookup(c, arg.dev, arg.flags); 385 if (IS_ERR(ca)) 386 return PTR_ERR(ca); 387 388 ret = bch2_dev_offline(c, ca, arg.flags); 389 bch2_dev_put(ca); 390 return ret; 391 } 392 393 static long bch2_ioctl_disk_set_state(struct bch_fs *c, 394 struct bch_ioctl_disk_set_state arg) 395 { 396 struct bch_dev *ca; 397 int ret; 398 399 if (!capable(CAP_SYS_ADMIN)) 400 return -EPERM; 401 402 if ((arg.flags & ~(BCH_FORCE_IF_DATA_LOST| 403 BCH_FORCE_IF_METADATA_LOST| 404 BCH_FORCE_IF_DEGRADED| 405 BCH_BY_INDEX)) || 406 arg.pad[0] || arg.pad[1] || arg.pad[2] || 407 arg.new_state >= BCH_MEMBER_STATE_NR) 408 return -EINVAL; 409 410 ca = bch2_device_lookup(c, arg.dev, arg.flags); 411 if (IS_ERR(ca)) 412 return PTR_ERR(ca); 413 414 ret = bch2_dev_set_state(c, ca, arg.new_state, arg.flags); 415 if (ret) 416 bch_err(c, "Error setting device state: %s", bch2_err_str(ret)); 417 418 bch2_dev_put(ca); 419 return ret; 420 } 421 422 struct bch_data_ctx { 423 struct thread_with_file thr; 424 425 struct bch_fs *c; 426 struct bch_ioctl_data arg; 427 struct bch_move_stats stats; 428 }; 429 430 static int bch2_data_thread(void *arg) 431 { 432 struct bch_data_ctx *ctx = container_of(arg, struct bch_data_ctx, thr); 433 434 ctx->thr.ret = bch2_data_job(ctx->c, &ctx->stats, ctx->arg); 435 ctx->stats.data_type = U8_MAX; 436 return 0; 437 } 438 439 static int bch2_data_job_release(struct inode *inode, struct file *file) 440 { 441 struct bch_data_ctx *ctx = container_of(file->private_data, struct bch_data_ctx, thr); 442 443 bch2_thread_with_file_exit(&ctx->thr); 444 kfree(ctx); 445 return 0; 446 } 447 448 static ssize_t bch2_data_job_read(struct file *file, char __user *buf, 449 size_t len, loff_t *ppos) 450 { 451 struct bch_data_ctx *ctx = container_of(file->private_data, struct bch_data_ctx, thr); 452 struct bch_fs *c = ctx->c; 453 struct bch_ioctl_data_event e = { 454 .type = BCH_DATA_EVENT_PROGRESS, 455 .p.data_type = ctx->stats.data_type, 456 .p.btree_id = ctx->stats.pos.btree, 457 .p.pos = ctx->stats.pos.pos, 458 .p.sectors_done = atomic64_read(&ctx->stats.sectors_seen), 459 .p.sectors_total = bch2_fs_usage_read_short(c).used, 460 }; 461 462 if (len < sizeof(e)) 463 return -EINVAL; 464 465 return copy_to_user_errcode(buf, &e, sizeof(e)) ?: sizeof(e); 466 } 467 468 static const struct file_operations bcachefs_data_ops = { 469 .release = bch2_data_job_release, 470 .read = bch2_data_job_read, 471 .llseek = no_llseek, 472 }; 473 474 static long bch2_ioctl_data(struct bch_fs *c, 475 struct bch_ioctl_data arg) 476 { 477 struct bch_data_ctx *ctx; 478 int ret; 479 480 if (!capable(CAP_SYS_ADMIN)) 481 return -EPERM; 482 483 if (arg.op >= BCH_DATA_OP_NR || arg.flags) 484 return -EINVAL; 485 486 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); 487 if (!ctx) 488 return -ENOMEM; 489 490 ctx->c = c; 491 ctx->arg = arg; 492 493 ret = bch2_run_thread_with_file(&ctx->thr, 494 &bcachefs_data_ops, 495 bch2_data_thread); 496 if (ret < 0) 497 kfree(ctx); 498 return ret; 499 } 500 501 static long bch2_ioctl_fs_usage(struct bch_fs *c, 502 struct bch_ioctl_fs_usage __user *user_arg) 503 { 504 struct bch_ioctl_fs_usage *arg = NULL; 505 struct bch_replicas_usage *dst_e, *dst_end; 506 struct bch_fs_usage_online *src; 507 u32 replica_entries_bytes; 508 unsigned i; 509 int ret = 0; 510 511 if (!test_bit(BCH_FS_started, &c->flags)) 512 return -EINVAL; 513 514 if (get_user(replica_entries_bytes, &user_arg->replica_entries_bytes)) 515 return -EFAULT; 516 517 arg = kzalloc(size_add(sizeof(*arg), replica_entries_bytes), GFP_KERNEL); 518 if (!arg) 519 return -ENOMEM; 520 521 src = bch2_fs_usage_read(c); 522 if (!src) { 523 ret = -ENOMEM; 524 goto err; 525 } 526 527 arg->capacity = c->capacity; 528 arg->used = bch2_fs_sectors_used(c, src); 529 arg->online_reserved = src->online_reserved; 530 531 for (i = 0; i < BCH_REPLICAS_MAX; i++) 532 arg->persistent_reserved[i] = src->u.persistent_reserved[i]; 533 534 dst_e = arg->replicas; 535 dst_end = (void *) arg->replicas + replica_entries_bytes; 536 537 for (i = 0; i < c->replicas.nr; i++) { 538 struct bch_replicas_entry_v1 *src_e = 539 cpu_replicas_entry(&c->replicas, i); 540 541 /* check that we have enough space for one replicas entry */ 542 if (dst_e + 1 > dst_end) { 543 ret = -ERANGE; 544 break; 545 } 546 547 dst_e->sectors = src->u.replicas[i]; 548 dst_e->r = *src_e; 549 550 /* recheck after setting nr_devs: */ 551 if (replicas_usage_next(dst_e) > dst_end) { 552 ret = -ERANGE; 553 break; 554 } 555 556 memcpy(dst_e->r.devs, src_e->devs, src_e->nr_devs); 557 558 dst_e = replicas_usage_next(dst_e); 559 } 560 561 arg->replica_entries_bytes = (void *) dst_e - (void *) arg->replicas; 562 563 percpu_up_read(&c->mark_lock); 564 kfree(src); 565 566 if (ret) 567 goto err; 568 569 ret = copy_to_user_errcode(user_arg, arg, 570 sizeof(*arg) + arg->replica_entries_bytes); 571 err: 572 kfree(arg); 573 return ret; 574 } 575 576 /* obsolete, didn't allow for new data types: */ 577 static long bch2_ioctl_dev_usage(struct bch_fs *c, 578 struct bch_ioctl_dev_usage __user *user_arg) 579 { 580 struct bch_ioctl_dev_usage arg; 581 struct bch_dev_usage src; 582 struct bch_dev *ca; 583 unsigned i; 584 585 if (!test_bit(BCH_FS_started, &c->flags)) 586 return -EINVAL; 587 588 if (copy_from_user(&arg, user_arg, sizeof(arg))) 589 return -EFAULT; 590 591 if ((arg.flags & ~BCH_BY_INDEX) || 592 arg.pad[0] || 593 arg.pad[1] || 594 arg.pad[2]) 595 return -EINVAL; 596 597 ca = bch2_device_lookup(c, arg.dev, arg.flags); 598 if (IS_ERR(ca)) 599 return PTR_ERR(ca); 600 601 src = bch2_dev_usage_read(ca); 602 603 arg.state = ca->mi.state; 604 arg.bucket_size = ca->mi.bucket_size; 605 arg.nr_buckets = ca->mi.nbuckets - ca->mi.first_bucket; 606 607 for (i = 0; i < BCH_DATA_NR; i++) { 608 arg.d[i].buckets = src.d[i].buckets; 609 arg.d[i].sectors = src.d[i].sectors; 610 arg.d[i].fragmented = src.d[i].fragmented; 611 } 612 613 bch2_dev_put(ca); 614 615 return copy_to_user_errcode(user_arg, &arg, sizeof(arg)); 616 } 617 618 static long bch2_ioctl_dev_usage_v2(struct bch_fs *c, 619 struct bch_ioctl_dev_usage_v2 __user *user_arg) 620 { 621 struct bch_ioctl_dev_usage_v2 arg; 622 struct bch_dev_usage src; 623 struct bch_dev *ca; 624 int ret = 0; 625 626 if (!test_bit(BCH_FS_started, &c->flags)) 627 return -EINVAL; 628 629 if (copy_from_user(&arg, user_arg, sizeof(arg))) 630 return -EFAULT; 631 632 if ((arg.flags & ~BCH_BY_INDEX) || 633 arg.pad[0] || 634 arg.pad[1] || 635 arg.pad[2]) 636 return -EINVAL; 637 638 ca = bch2_device_lookup(c, arg.dev, arg.flags); 639 if (IS_ERR(ca)) 640 return PTR_ERR(ca); 641 642 src = bch2_dev_usage_read(ca); 643 644 arg.state = ca->mi.state; 645 arg.bucket_size = ca->mi.bucket_size; 646 arg.nr_data_types = min(arg.nr_data_types, BCH_DATA_NR); 647 arg.nr_buckets = ca->mi.nbuckets - ca->mi.first_bucket; 648 649 ret = copy_to_user_errcode(user_arg, &arg, sizeof(arg)); 650 if (ret) 651 goto err; 652 653 for (unsigned i = 0; i < arg.nr_data_types; i++) { 654 struct bch_ioctl_dev_usage_type t = { 655 .buckets = src.d[i].buckets, 656 .sectors = src.d[i].sectors, 657 .fragmented = src.d[i].fragmented, 658 }; 659 660 ret = copy_to_user_errcode(&user_arg->d[i], &t, sizeof(t)); 661 if (ret) 662 goto err; 663 } 664 err: 665 bch2_dev_put(ca); 666 return ret; 667 } 668 669 static long bch2_ioctl_read_super(struct bch_fs *c, 670 struct bch_ioctl_read_super arg) 671 { 672 struct bch_dev *ca = NULL; 673 struct bch_sb *sb; 674 int ret = 0; 675 676 if (!capable(CAP_SYS_ADMIN)) 677 return -EPERM; 678 679 if ((arg.flags & ~(BCH_BY_INDEX|BCH_READ_DEV)) || 680 arg.pad) 681 return -EINVAL; 682 683 mutex_lock(&c->sb_lock); 684 685 if (arg.flags & BCH_READ_DEV) { 686 ca = bch2_device_lookup(c, arg.dev, arg.flags); 687 ret = PTR_ERR_OR_ZERO(ca); 688 if (ret) 689 goto err_unlock; 690 691 sb = ca->disk_sb.sb; 692 } else { 693 sb = c->disk_sb.sb; 694 } 695 696 if (vstruct_bytes(sb) > arg.size) { 697 ret = -ERANGE; 698 goto err; 699 } 700 701 ret = copy_to_user_errcode((void __user *)(unsigned long)arg.sb, sb, 702 vstruct_bytes(sb)); 703 err: 704 bch2_dev_put(ca); 705 err_unlock: 706 mutex_unlock(&c->sb_lock); 707 return ret; 708 } 709 710 static long bch2_ioctl_disk_get_idx(struct bch_fs *c, 711 struct bch_ioctl_disk_get_idx arg) 712 { 713 dev_t dev = huge_decode_dev(arg.dev); 714 715 if (!capable(CAP_SYS_ADMIN)) 716 return -EPERM; 717 718 if (!dev) 719 return -EINVAL; 720 721 for_each_online_member(c, ca) 722 if (ca->dev == dev) { 723 percpu_ref_put(&ca->io_ref); 724 return ca->dev_idx; 725 } 726 727 return -BCH_ERR_ENOENT_dev_idx_not_found; 728 } 729 730 static long bch2_ioctl_disk_resize(struct bch_fs *c, 731 struct bch_ioctl_disk_resize arg) 732 { 733 struct bch_dev *ca; 734 int ret; 735 736 if (!capable(CAP_SYS_ADMIN)) 737 return -EPERM; 738 739 if ((arg.flags & ~BCH_BY_INDEX) || 740 arg.pad) 741 return -EINVAL; 742 743 ca = bch2_device_lookup(c, arg.dev, arg.flags); 744 if (IS_ERR(ca)) 745 return PTR_ERR(ca); 746 747 ret = bch2_dev_resize(c, ca, arg.nbuckets); 748 749 bch2_dev_put(ca); 750 return ret; 751 } 752 753 static long bch2_ioctl_disk_resize_journal(struct bch_fs *c, 754 struct bch_ioctl_disk_resize_journal arg) 755 { 756 struct bch_dev *ca; 757 int ret; 758 759 if (!capable(CAP_SYS_ADMIN)) 760 return -EPERM; 761 762 if ((arg.flags & ~BCH_BY_INDEX) || 763 arg.pad) 764 return -EINVAL; 765 766 if (arg.nbuckets > U32_MAX) 767 return -EINVAL; 768 769 ca = bch2_device_lookup(c, arg.dev, arg.flags); 770 if (IS_ERR(ca)) 771 return PTR_ERR(ca); 772 773 ret = bch2_set_nr_journal_buckets(c, ca, arg.nbuckets); 774 775 bch2_dev_put(ca); 776 return ret; 777 } 778 779 static int bch2_fsck_online_thread_fn(struct thread_with_stdio *stdio) 780 { 781 struct fsck_thread *thr = container_of(stdio, struct fsck_thread, thr); 782 struct bch_fs *c = thr->c; 783 784 c->stdio_filter = current; 785 c->stdio = &thr->thr.stdio; 786 787 /* 788 * XXX: can we figure out a way to do this without mucking with c->opts? 789 */ 790 unsigned old_fix_errors = c->opts.fix_errors; 791 if (opt_defined(thr->opts, fix_errors)) 792 c->opts.fix_errors = thr->opts.fix_errors; 793 else 794 c->opts.fix_errors = FSCK_FIX_ask; 795 796 c->opts.fsck = true; 797 set_bit(BCH_FS_fsck_running, &c->flags); 798 799 c->curr_recovery_pass = BCH_RECOVERY_PASS_check_alloc_info; 800 int ret = bch2_run_online_recovery_passes(c); 801 802 clear_bit(BCH_FS_fsck_running, &c->flags); 803 bch_err_fn(c, ret); 804 805 c->stdio = NULL; 806 c->stdio_filter = NULL; 807 c->opts.fix_errors = old_fix_errors; 808 809 up(&c->online_fsck_mutex); 810 bch2_ro_ref_put(c); 811 return ret; 812 } 813 814 static const struct thread_with_stdio_ops bch2_online_fsck_ops = { 815 .exit = bch2_fsck_thread_exit, 816 .fn = bch2_fsck_online_thread_fn, 817 }; 818 819 static long bch2_ioctl_fsck_online(struct bch_fs *c, 820 struct bch_ioctl_fsck_online arg) 821 { 822 struct fsck_thread *thr = NULL; 823 long ret = 0; 824 825 if (arg.flags) 826 return -EINVAL; 827 828 if (!capable(CAP_SYS_ADMIN)) 829 return -EPERM; 830 831 if (!bch2_ro_ref_tryget(c)) 832 return -EROFS; 833 834 if (down_trylock(&c->online_fsck_mutex)) { 835 bch2_ro_ref_put(c); 836 return -EAGAIN; 837 } 838 839 thr = kzalloc(sizeof(*thr), GFP_KERNEL); 840 if (!thr) { 841 ret = -ENOMEM; 842 goto err; 843 } 844 845 thr->c = c; 846 thr->opts = bch2_opts_empty(); 847 848 if (arg.opts) { 849 char *optstr = strndup_user((char __user *)(unsigned long) arg.opts, 1 << 16); 850 851 ret = PTR_ERR_OR_ZERO(optstr) ?: 852 bch2_parse_mount_opts(c, &thr->opts, optstr); 853 kfree(optstr); 854 855 if (ret) 856 goto err; 857 } 858 859 ret = bch2_run_thread_with_stdio(&thr->thr, &bch2_online_fsck_ops); 860 err: 861 if (ret < 0) { 862 bch_err_fn(c, ret); 863 if (thr) 864 bch2_fsck_thread_exit(&thr->thr); 865 up(&c->online_fsck_mutex); 866 bch2_ro_ref_put(c); 867 } 868 return ret; 869 } 870 871 #define BCH_IOCTL(_name, _argtype) \ 872 do { \ 873 _argtype i; \ 874 \ 875 if (copy_from_user(&i, arg, sizeof(i))) \ 876 return -EFAULT; \ 877 ret = bch2_ioctl_##_name(c, i); \ 878 goto out; \ 879 } while (0) 880 881 long bch2_fs_ioctl(struct bch_fs *c, unsigned cmd, void __user *arg) 882 { 883 long ret; 884 885 switch (cmd) { 886 case BCH_IOCTL_QUERY_UUID: 887 return bch2_ioctl_query_uuid(c, arg); 888 case BCH_IOCTL_FS_USAGE: 889 return bch2_ioctl_fs_usage(c, arg); 890 case BCH_IOCTL_DEV_USAGE: 891 return bch2_ioctl_dev_usage(c, arg); 892 case BCH_IOCTL_DEV_USAGE_V2: 893 return bch2_ioctl_dev_usage_v2(c, arg); 894 #if 0 895 case BCH_IOCTL_START: 896 BCH_IOCTL(start, struct bch_ioctl_start); 897 case BCH_IOCTL_STOP: 898 return bch2_ioctl_stop(c); 899 #endif 900 case BCH_IOCTL_READ_SUPER: 901 BCH_IOCTL(read_super, struct bch_ioctl_read_super); 902 case BCH_IOCTL_DISK_GET_IDX: 903 BCH_IOCTL(disk_get_idx, struct bch_ioctl_disk_get_idx); 904 } 905 906 if (!test_bit(BCH_FS_started, &c->flags)) 907 return -EINVAL; 908 909 switch (cmd) { 910 case BCH_IOCTL_DISK_ADD: 911 BCH_IOCTL(disk_add, struct bch_ioctl_disk); 912 case BCH_IOCTL_DISK_REMOVE: 913 BCH_IOCTL(disk_remove, struct bch_ioctl_disk); 914 case BCH_IOCTL_DISK_ONLINE: 915 BCH_IOCTL(disk_online, struct bch_ioctl_disk); 916 case BCH_IOCTL_DISK_OFFLINE: 917 BCH_IOCTL(disk_offline, struct bch_ioctl_disk); 918 case BCH_IOCTL_DISK_SET_STATE: 919 BCH_IOCTL(disk_set_state, struct bch_ioctl_disk_set_state); 920 case BCH_IOCTL_DATA: 921 BCH_IOCTL(data, struct bch_ioctl_data); 922 case BCH_IOCTL_DISK_RESIZE: 923 BCH_IOCTL(disk_resize, struct bch_ioctl_disk_resize); 924 case BCH_IOCTL_DISK_RESIZE_JOURNAL: 925 BCH_IOCTL(disk_resize_journal, struct bch_ioctl_disk_resize_journal); 926 case BCH_IOCTL_FSCK_ONLINE: 927 BCH_IOCTL(fsck_online, struct bch_ioctl_fsck_online); 928 default: 929 return -ENOTTY; 930 } 931 out: 932 if (ret < 0) 933 ret = bch2_err_class(ret); 934 return ret; 935 } 936 937 static DEFINE_IDR(bch_chardev_minor); 938 939 static long bch2_chardev_ioctl(struct file *filp, unsigned cmd, unsigned long v) 940 { 941 unsigned minor = iminor(file_inode(filp)); 942 struct bch_fs *c = minor < U8_MAX ? idr_find(&bch_chardev_minor, minor) : NULL; 943 void __user *arg = (void __user *) v; 944 945 return c 946 ? bch2_fs_ioctl(c, cmd, arg) 947 : bch2_global_ioctl(cmd, arg); 948 } 949 950 static const struct file_operations bch_chardev_fops = { 951 .owner = THIS_MODULE, 952 .unlocked_ioctl = bch2_chardev_ioctl, 953 .open = nonseekable_open, 954 }; 955 956 static int bch_chardev_major; 957 static const struct class bch_chardev_class = { 958 .name = "bcachefs", 959 }; 960 static struct device *bch_chardev; 961 962 void bch2_fs_chardev_exit(struct bch_fs *c) 963 { 964 if (!IS_ERR_OR_NULL(c->chardev)) 965 device_unregister(c->chardev); 966 if (c->minor >= 0) 967 idr_remove(&bch_chardev_minor, c->minor); 968 } 969 970 int bch2_fs_chardev_init(struct bch_fs *c) 971 { 972 c->minor = idr_alloc(&bch_chardev_minor, c, 0, 0, GFP_KERNEL); 973 if (c->minor < 0) 974 return c->minor; 975 976 c->chardev = device_create(&bch_chardev_class, NULL, 977 MKDEV(bch_chardev_major, c->minor), c, 978 "bcachefs%u-ctl", c->minor); 979 if (IS_ERR(c->chardev)) 980 return PTR_ERR(c->chardev); 981 982 return 0; 983 } 984 985 void bch2_chardev_exit(void) 986 { 987 device_destroy(&bch_chardev_class, MKDEV(bch_chardev_major, U8_MAX)); 988 class_unregister(&bch_chardev_class); 989 if (bch_chardev_major > 0) 990 unregister_chrdev(bch_chardev_major, "bcachefs"); 991 } 992 993 int __init bch2_chardev_init(void) 994 { 995 int ret; 996 997 bch_chardev_major = register_chrdev(0, "bcachefs-ctl", &bch_chardev_fops); 998 if (bch_chardev_major < 0) 999 return bch_chardev_major; 1000 1001 ret = class_register(&bch_chardev_class); 1002 if (ret) 1003 goto major_out; 1004 1005 bch_chardev = device_create(&bch_chardev_class, NULL, 1006 MKDEV(bch_chardev_major, U8_MAX), 1007 NULL, "bcachefs-ctl"); 1008 if (IS_ERR(bch_chardev)) { 1009 ret = PTR_ERR(bch_chardev); 1010 goto class_out; 1011 } 1012 1013 return 0; 1014 1015 class_out: 1016 class_unregister(&bch_chardev_class); 1017 major_out: 1018 unregister_chrdev(bch_chardev_major, "bcachefs-ctl"); 1019 return ret; 1020 } 1021 1022 #endif /* NO_BCACHEFS_CHARDEV */ 1023