1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 md.c : Multiple Devices driver for Linux 4 Copyright (C) 1998, 1999, 2000 Ingo Molnar 5 6 completely rewritten, based on the MD driver code from Marc Zyngier 7 8 Changes: 9 10 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar 11 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com> 12 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net> 13 - kerneld support by Boris Tobotras <boris@xtalk.msk.su> 14 - kmod support by: Cyrus Durgin 15 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com> 16 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au> 17 18 - lots of fixes and improvements to the RAID1/RAID5 and generic 19 RAID code (such as request based resynchronization): 20 21 Neil Brown <neilb@cse.unsw.edu.au>. 22 23 - persistent bitmap code 24 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc. 25 26 27 Errors, Warnings, etc. 28 Please use: 29 pr_crit() for error conditions that risk data loss 30 pr_err() for error conditions that are unexpected, like an IO error 31 or internal inconsistency 32 pr_warn() for error conditions that could have been predicated, like 33 adding a device to an array when it has incompatible metadata 34 pr_info() for every interesting, very rare events, like an array starting 35 or stopping, or resync starting or stopping 36 pr_debug() for everything else. 37 38 */ 39 40 #include <linux/sched/mm.h> 41 #include <linux/sched/signal.h> 42 #include <linux/kthread.h> 43 #include <linux/blkdev.h> 44 #include <linux/blk-integrity.h> 45 #include <linux/badblocks.h> 46 #include <linux/sysctl.h> 47 #include <linux/seq_file.h> 48 #include <linux/fs.h> 49 #include <linux/poll.h> 50 #include <linux/ctype.h> 51 #include <linux/string.h> 52 #include <linux/hdreg.h> 53 #include <linux/proc_fs.h> 54 #include <linux/random.h> 55 #include <linux/major.h> 56 #include <linux/module.h> 57 #include <linux/reboot.h> 58 #include <linux/file.h> 59 #include <linux/compat.h> 60 #include <linux/delay.h> 61 #include <linux/raid/md_p.h> 62 #include <linux/raid/md_u.h> 63 #include <linux/raid/detect.h> 64 #include <linux/slab.h> 65 #include <linux/percpu-refcount.h> 66 #include <linux/part_stat.h> 67 68 #include "md.h" 69 #include "md-bitmap.h" 70 #include "md-cluster.h" 71 72 static const char *action_name[NR_SYNC_ACTIONS] = { 73 [ACTION_RESYNC] = "resync", 74 [ACTION_RECOVER] = "recover", 75 [ACTION_CHECK] = "check", 76 [ACTION_REPAIR] = "repair", 77 [ACTION_RESHAPE] = "reshape", 78 [ACTION_FROZEN] = "frozen", 79 [ACTION_IDLE] = "idle", 80 }; 81 82 static DEFINE_XARRAY(md_submodule); 83 84 static const struct kobj_type md_ktype; 85 86 static DECLARE_WAIT_QUEUE_HEAD(resync_wait); 87 static struct workqueue_struct *md_wq; 88 89 /* 90 * This workqueue is used for sync_work to register new sync_thread, and for 91 * del_work to remove rdev, and for event_work that is only set by dm-raid. 92 * 93 * Noted that sync_work will grab reconfig_mutex, hence never flush this 94 * workqueue whith reconfig_mutex grabbed. 95 */ 96 static struct workqueue_struct *md_misc_wq; 97 98 static int remove_and_add_spares(struct mddev *mddev, 99 struct md_rdev *this); 100 static void mddev_detach(struct mddev *mddev); 101 static void export_rdev(struct md_rdev *rdev, struct mddev *mddev); 102 static void md_wakeup_thread_directly(struct md_thread __rcu **thread); 103 104 /* 105 * Default number of read corrections we'll attempt on an rdev 106 * before ejecting it from the array. We divide the read error 107 * count by 2 for every hour elapsed between read errors. 108 */ 109 #define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20 110 /* Default safemode delay: 200 msec */ 111 #define DEFAULT_SAFEMODE_DELAY ((200 * HZ)/1000 +1) 112 /* 113 * Current RAID-1,4,5,6,10 parallel reconstruction 'guaranteed speed limit' 114 * is sysctl_speed_limit_min, 1000 KB/sec by default, so the extra system load 115 * does not show up that much. Increase it if you want to have more guaranteed 116 * speed. Note that the RAID driver will use the maximum bandwidth 117 * sysctl_speed_limit_max, 200 MB/sec by default, if the IO subsystem is idle. 118 * 119 * Background sync IO speed control: 120 * 121 * - below speed min: 122 * no limit; 123 * - above speed min and below speed max: 124 * a) if mddev is idle, then no limit; 125 * b) if mddev is busy handling normal IO, then limit inflight sync IO 126 * to sync_io_depth; 127 * - above speed max: 128 * sync IO can't be issued; 129 * 130 * Following configurations can be changed via /proc/sys/dev/raid/ for system 131 * or /sys/block/mdX/md/ for one array. 132 */ 133 static int sysctl_speed_limit_min = 1000; 134 static int sysctl_speed_limit_max = 200000; 135 static int sysctl_sync_io_depth = 32; 136 137 static int speed_min(struct mddev *mddev) 138 { 139 return mddev->sync_speed_min ? 140 mddev->sync_speed_min : sysctl_speed_limit_min; 141 } 142 143 static int speed_max(struct mddev *mddev) 144 { 145 return mddev->sync_speed_max ? 146 mddev->sync_speed_max : sysctl_speed_limit_max; 147 } 148 149 static int sync_io_depth(struct mddev *mddev) 150 { 151 return mddev->sync_io_depth ? 152 mddev->sync_io_depth : sysctl_sync_io_depth; 153 } 154 155 static void rdev_uninit_serial(struct md_rdev *rdev) 156 { 157 if (!test_and_clear_bit(CollisionCheck, &rdev->flags)) 158 return; 159 160 kvfree(rdev->serial); 161 rdev->serial = NULL; 162 } 163 164 static void rdevs_uninit_serial(struct mddev *mddev) 165 { 166 struct md_rdev *rdev; 167 168 rdev_for_each(rdev, mddev) 169 rdev_uninit_serial(rdev); 170 } 171 172 static int rdev_init_serial(struct md_rdev *rdev) 173 { 174 /* serial_nums equals with BARRIER_BUCKETS_NR */ 175 int i, serial_nums = 1 << ((PAGE_SHIFT - ilog2(sizeof(atomic_t)))); 176 struct serial_in_rdev *serial = NULL; 177 178 if (test_bit(CollisionCheck, &rdev->flags)) 179 return 0; 180 181 serial = kvmalloc(sizeof(struct serial_in_rdev) * serial_nums, 182 GFP_KERNEL); 183 if (!serial) 184 return -ENOMEM; 185 186 for (i = 0; i < serial_nums; i++) { 187 struct serial_in_rdev *serial_tmp = &serial[i]; 188 189 spin_lock_init(&serial_tmp->serial_lock); 190 serial_tmp->serial_rb = RB_ROOT_CACHED; 191 init_waitqueue_head(&serial_tmp->serial_io_wait); 192 } 193 194 rdev->serial = serial; 195 set_bit(CollisionCheck, &rdev->flags); 196 197 return 0; 198 } 199 200 static int rdevs_init_serial(struct mddev *mddev) 201 { 202 struct md_rdev *rdev; 203 int ret = 0; 204 205 rdev_for_each(rdev, mddev) { 206 ret = rdev_init_serial(rdev); 207 if (ret) 208 break; 209 } 210 211 /* Free all resources if pool is not existed */ 212 if (ret && !mddev->serial_info_pool) 213 rdevs_uninit_serial(mddev); 214 215 return ret; 216 } 217 218 /* 219 * rdev needs to enable serial stuffs if it meets the conditions: 220 * 1. it is multi-queue device flaged with writemostly. 221 * 2. the write-behind mode is enabled. 222 */ 223 static int rdev_need_serial(struct md_rdev *rdev) 224 { 225 return (rdev && rdev->mddev->bitmap_info.max_write_behind > 0 && 226 rdev->bdev->bd_disk->queue->nr_hw_queues != 1 && 227 test_bit(WriteMostly, &rdev->flags)); 228 } 229 230 /* 231 * Init resource for rdev(s), then create serial_info_pool if: 232 * 1. rdev is the first device which return true from rdev_enable_serial. 233 * 2. rdev is NULL, means we want to enable serialization for all rdevs. 234 */ 235 void mddev_create_serial_pool(struct mddev *mddev, struct md_rdev *rdev) 236 { 237 int ret = 0; 238 239 if (rdev && !rdev_need_serial(rdev) && 240 !test_bit(CollisionCheck, &rdev->flags)) 241 return; 242 243 if (!rdev) 244 ret = rdevs_init_serial(mddev); 245 else 246 ret = rdev_init_serial(rdev); 247 if (ret) 248 return; 249 250 if (mddev->serial_info_pool == NULL) { 251 /* 252 * already in memalloc noio context by 253 * mddev_suspend() 254 */ 255 mddev->serial_info_pool = 256 mempool_create_kmalloc_pool(NR_SERIAL_INFOS, 257 sizeof(struct serial_info)); 258 if (!mddev->serial_info_pool) { 259 rdevs_uninit_serial(mddev); 260 pr_err("can't alloc memory pool for serialization\n"); 261 } 262 } 263 } 264 265 /* 266 * Free resource from rdev(s), and destroy serial_info_pool under conditions: 267 * 1. rdev is the last device flaged with CollisionCheck. 268 * 2. when bitmap is destroyed while policy is not enabled. 269 * 3. for disable policy, the pool is destroyed only when no rdev needs it. 270 */ 271 void mddev_destroy_serial_pool(struct mddev *mddev, struct md_rdev *rdev) 272 { 273 if (rdev && !test_bit(CollisionCheck, &rdev->flags)) 274 return; 275 276 if (mddev->serial_info_pool) { 277 struct md_rdev *temp; 278 int num = 0; /* used to track if other rdevs need the pool */ 279 280 rdev_for_each(temp, mddev) { 281 if (!rdev) { 282 if (!mddev->serialize_policy || 283 !rdev_need_serial(temp)) 284 rdev_uninit_serial(temp); 285 else 286 num++; 287 } else if (temp != rdev && 288 test_bit(CollisionCheck, &temp->flags)) 289 num++; 290 } 291 292 if (rdev) 293 rdev_uninit_serial(rdev); 294 295 if (num) 296 pr_info("The mempool could be used by other devices\n"); 297 else { 298 mempool_destroy(mddev->serial_info_pool); 299 mddev->serial_info_pool = NULL; 300 } 301 } 302 } 303 304 static struct ctl_table_header *raid_table_header; 305 306 static const struct ctl_table raid_table[] = { 307 { 308 .procname = "speed_limit_min", 309 .data = &sysctl_speed_limit_min, 310 .maxlen = sizeof(int), 311 .mode = 0644, 312 .proc_handler = proc_dointvec, 313 }, 314 { 315 .procname = "speed_limit_max", 316 .data = &sysctl_speed_limit_max, 317 .maxlen = sizeof(int), 318 .mode = 0644, 319 .proc_handler = proc_dointvec, 320 }, 321 { 322 .procname = "sync_io_depth", 323 .data = &sysctl_sync_io_depth, 324 .maxlen = sizeof(int), 325 .mode = 0644, 326 .proc_handler = proc_dointvec, 327 }, 328 }; 329 330 static int start_readonly; 331 332 /* 333 * The original mechanism for creating an md device is to create 334 * a device node in /dev and to open it. This causes races with device-close. 335 * The preferred method is to write to the "new_array" module parameter. 336 * This can avoid races. 337 * Setting create_on_open to false disables the original mechanism 338 * so all the races disappear. 339 */ 340 static bool create_on_open = true; 341 static bool legacy_async_del_gendisk = true; 342 static bool check_new_feature = true; 343 344 /* 345 * We have a system wide 'event count' that is incremented 346 * on any 'interesting' event, and readers of /proc/mdstat 347 * can use 'poll' or 'select' to find out when the event 348 * count increases. 349 * 350 * Events are: 351 * start array, stop array, error, add device, remove device, 352 * start build, activate spare 353 */ 354 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters); 355 static atomic_t md_event_count; 356 void md_new_event(void) 357 { 358 atomic_inc(&md_event_count); 359 wake_up(&md_event_waiters); 360 } 361 EXPORT_SYMBOL_GPL(md_new_event); 362 363 /* 364 * Enables to iterate over all existing md arrays 365 * all_mddevs_lock protects this list. 366 */ 367 static LIST_HEAD(all_mddevs); 368 static DEFINE_SPINLOCK(all_mddevs_lock); 369 370 static bool is_md_suspended(struct mddev *mddev) 371 { 372 return percpu_ref_is_dying(&mddev->active_io); 373 } 374 /* Rather than calling directly into the personality make_request function, 375 * IO requests come here first so that we can check if the device is 376 * being suspended pending a reconfiguration. 377 * We hold a refcount over the call to ->make_request. By the time that 378 * call has finished, the bio has been linked into some internal structure 379 * and so is visible to ->quiesce(), so we don't need the refcount any more. 380 */ 381 static bool is_suspended(struct mddev *mddev, struct bio *bio) 382 { 383 if (is_md_suspended(mddev)) 384 return true; 385 if (bio_data_dir(bio) != WRITE) 386 return false; 387 if (READ_ONCE(mddev->suspend_lo) >= READ_ONCE(mddev->suspend_hi)) 388 return false; 389 if (bio->bi_iter.bi_sector >= READ_ONCE(mddev->suspend_hi)) 390 return false; 391 if (bio_end_sector(bio) < READ_ONCE(mddev->suspend_lo)) 392 return false; 393 return true; 394 } 395 396 bool md_handle_request(struct mddev *mddev, struct bio *bio) 397 { 398 check_suspended: 399 if (is_suspended(mddev, bio)) { 400 DEFINE_WAIT(__wait); 401 /* Bail out if REQ_NOWAIT is set for the bio */ 402 if (bio->bi_opf & REQ_NOWAIT) { 403 bio_wouldblock_error(bio); 404 return true; 405 } 406 for (;;) { 407 prepare_to_wait(&mddev->sb_wait, &__wait, 408 TASK_UNINTERRUPTIBLE); 409 if (!is_suspended(mddev, bio)) 410 break; 411 schedule(); 412 } 413 finish_wait(&mddev->sb_wait, &__wait); 414 } 415 if (!percpu_ref_tryget_live(&mddev->active_io)) 416 goto check_suspended; 417 418 if (!mddev->pers->make_request(mddev, bio)) { 419 percpu_ref_put(&mddev->active_io); 420 if (!mddev->gendisk && mddev->pers->prepare_suspend) 421 return false; 422 goto check_suspended; 423 } 424 425 percpu_ref_put(&mddev->active_io); 426 return true; 427 } 428 EXPORT_SYMBOL(md_handle_request); 429 430 static void md_submit_bio(struct bio *bio) 431 { 432 const int rw = bio_data_dir(bio); 433 struct mddev *mddev = bio->bi_bdev->bd_disk->private_data; 434 435 if (mddev == NULL || mddev->pers == NULL) { 436 bio_io_error(bio); 437 return; 438 } 439 440 if (unlikely(test_bit(MD_BROKEN, &mddev->flags)) && (rw == WRITE)) { 441 bio_io_error(bio); 442 return; 443 } 444 445 bio = bio_split_to_limits(bio); 446 if (!bio) 447 return; 448 449 if (mddev->ro == MD_RDONLY && unlikely(rw == WRITE)) { 450 if (bio_sectors(bio) != 0) 451 bio->bi_status = BLK_STS_IOERR; 452 bio_endio(bio); 453 return; 454 } 455 456 /* bio could be mergeable after passing to underlayer */ 457 bio->bi_opf &= ~REQ_NOMERGE; 458 459 md_handle_request(mddev, bio); 460 } 461 462 /* 463 * Make sure no new requests are submitted to the device, and any requests that 464 * have been submitted are completely handled. 465 */ 466 int mddev_suspend(struct mddev *mddev, bool interruptible) 467 { 468 int err = 0; 469 470 /* 471 * hold reconfig_mutex to wait for normal io will deadlock, because 472 * other context can't update super_block, and normal io can rely on 473 * updating super_block. 474 */ 475 lockdep_assert_not_held(&mddev->reconfig_mutex); 476 477 if (interruptible) 478 err = mutex_lock_interruptible(&mddev->suspend_mutex); 479 else 480 mutex_lock(&mddev->suspend_mutex); 481 if (err) 482 return err; 483 484 if (mddev->suspended) { 485 WRITE_ONCE(mddev->suspended, mddev->suspended + 1); 486 mutex_unlock(&mddev->suspend_mutex); 487 return 0; 488 } 489 490 percpu_ref_kill(&mddev->active_io); 491 if (interruptible) 492 err = wait_event_interruptible(mddev->sb_wait, 493 percpu_ref_is_zero(&mddev->active_io)); 494 else 495 wait_event(mddev->sb_wait, 496 percpu_ref_is_zero(&mddev->active_io)); 497 if (err) { 498 percpu_ref_resurrect(&mddev->active_io); 499 mutex_unlock(&mddev->suspend_mutex); 500 return err; 501 } 502 503 /* 504 * For raid456, io might be waiting for reshape to make progress, 505 * allow new reshape to start while waiting for io to be done to 506 * prevent deadlock. 507 */ 508 WRITE_ONCE(mddev->suspended, mddev->suspended + 1); 509 510 /* restrict memory reclaim I/O during raid array is suspend */ 511 mddev->noio_flag = memalloc_noio_save(); 512 513 mutex_unlock(&mddev->suspend_mutex); 514 return 0; 515 } 516 EXPORT_SYMBOL_GPL(mddev_suspend); 517 518 static void __mddev_resume(struct mddev *mddev, bool recovery_needed) 519 { 520 lockdep_assert_not_held(&mddev->reconfig_mutex); 521 522 mutex_lock(&mddev->suspend_mutex); 523 WRITE_ONCE(mddev->suspended, mddev->suspended - 1); 524 if (mddev->suspended) { 525 mutex_unlock(&mddev->suspend_mutex); 526 return; 527 } 528 529 /* entred the memalloc scope from mddev_suspend() */ 530 memalloc_noio_restore(mddev->noio_flag); 531 532 percpu_ref_resurrect(&mddev->active_io); 533 wake_up(&mddev->sb_wait); 534 535 if (recovery_needed) 536 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 537 md_wakeup_thread(mddev->thread); 538 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ 539 540 mutex_unlock(&mddev->suspend_mutex); 541 } 542 543 void mddev_resume(struct mddev *mddev) 544 { 545 return __mddev_resume(mddev, true); 546 } 547 EXPORT_SYMBOL_GPL(mddev_resume); 548 549 /* sync bdev before setting device to readonly or stopping raid*/ 550 static int mddev_set_closing_and_sync_blockdev(struct mddev *mddev, int opener_num) 551 { 552 mutex_lock(&mddev->open_mutex); 553 if (mddev->pers && atomic_read(&mddev->openers) > opener_num) { 554 mutex_unlock(&mddev->open_mutex); 555 return -EBUSY; 556 } 557 if (test_and_set_bit(MD_CLOSING, &mddev->flags)) { 558 mutex_unlock(&mddev->open_mutex); 559 return -EBUSY; 560 } 561 mutex_unlock(&mddev->open_mutex); 562 563 sync_blockdev(mddev->gendisk->part0); 564 return 0; 565 } 566 567 /* 568 * The only difference from bio_chain_endio() is that the current 569 * bi_status of bio does not affect the bi_status of parent. 570 */ 571 static void md_end_flush(struct bio *bio) 572 { 573 struct bio *parent = bio->bi_private; 574 575 /* 576 * If any flush io error before the power failure, 577 * disk data may be lost. 578 */ 579 if (bio->bi_status) 580 pr_err("md: %pg flush io error %d\n", bio->bi_bdev, 581 blk_status_to_errno(bio->bi_status)); 582 583 bio_put(bio); 584 bio_endio(parent); 585 } 586 587 bool md_flush_request(struct mddev *mddev, struct bio *bio) 588 { 589 struct md_rdev *rdev; 590 struct bio *new; 591 592 /* 593 * md_flush_reqeust() should be called under md_handle_request() and 594 * 'active_io' is already grabbed. Hence it's safe to get rdev directly 595 * without rcu protection. 596 */ 597 WARN_ON(percpu_ref_is_zero(&mddev->active_io)); 598 599 rdev_for_each(rdev, mddev) { 600 if (rdev->raid_disk < 0 || test_bit(Faulty, &rdev->flags)) 601 continue; 602 603 new = bio_alloc_bioset(rdev->bdev, 0, 604 REQ_OP_WRITE | REQ_PREFLUSH, GFP_NOIO, 605 &mddev->bio_set); 606 new->bi_private = bio; 607 new->bi_end_io = md_end_flush; 608 bio_inc_remaining(bio); 609 submit_bio(new); 610 } 611 612 if (bio_sectors(bio) == 0) { 613 bio_endio(bio); 614 return true; 615 } 616 617 bio->bi_opf &= ~REQ_PREFLUSH; 618 return false; 619 } 620 EXPORT_SYMBOL(md_flush_request); 621 622 static inline struct mddev *mddev_get(struct mddev *mddev) 623 { 624 lockdep_assert_held(&all_mddevs_lock); 625 626 if (test_bit(MD_DELETED, &mddev->flags)) 627 return NULL; 628 atomic_inc(&mddev->active); 629 return mddev; 630 } 631 632 static void mddev_delayed_delete(struct work_struct *ws); 633 634 static void __mddev_put(struct mddev *mddev) 635 { 636 if (mddev->raid_disks || !list_empty(&mddev->disks) || 637 mddev->ctime || mddev->hold_active) 638 return; 639 640 /* 641 * If array is freed by stopping array, MD_DELETED is set by 642 * do_md_stop(), MD_DELETED is still set here in case mddev is freed 643 * directly by closing a mddev that is created by create_on_open. 644 */ 645 set_bit(MD_DELETED, &mddev->flags); 646 /* 647 * Call queue_work inside the spinlock so that flush_workqueue() after 648 * mddev_find will succeed in waiting for the work to be done. 649 */ 650 queue_work(md_misc_wq, &mddev->del_work); 651 } 652 653 static void mddev_put_locked(struct mddev *mddev) 654 { 655 if (atomic_dec_and_test(&mddev->active)) 656 __mddev_put(mddev); 657 } 658 659 void mddev_put(struct mddev *mddev) 660 { 661 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock)) 662 return; 663 664 __mddev_put(mddev); 665 spin_unlock(&all_mddevs_lock); 666 } 667 668 static void md_safemode_timeout(struct timer_list *t); 669 static void md_start_sync(struct work_struct *ws); 670 671 static void active_io_release(struct percpu_ref *ref) 672 { 673 struct mddev *mddev = container_of(ref, struct mddev, active_io); 674 675 wake_up(&mddev->sb_wait); 676 } 677 678 static void no_op(struct percpu_ref *r) {} 679 680 static bool mddev_set_bitmap_ops(struct mddev *mddev) 681 { 682 struct bitmap_operations *old = mddev->bitmap_ops; 683 struct md_submodule_head *head; 684 685 if (mddev->bitmap_id == ID_BITMAP_NONE || 686 (old && old->head.id == mddev->bitmap_id)) 687 return true; 688 689 xa_lock(&md_submodule); 690 head = xa_load(&md_submodule, mddev->bitmap_id); 691 692 if (!head) { 693 pr_warn("md: can't find bitmap id %d\n", mddev->bitmap_id); 694 goto err; 695 } 696 697 if (head->type != MD_BITMAP) { 698 pr_warn("md: invalid bitmap id %d\n", mddev->bitmap_id); 699 goto err; 700 } 701 702 mddev->bitmap_ops = (void *)head; 703 xa_unlock(&md_submodule); 704 705 if (!mddev_is_dm(mddev) && mddev->bitmap_ops->group) { 706 if (sysfs_create_group(&mddev->kobj, mddev->bitmap_ops->group)) 707 pr_warn("md: cannot register extra bitmap attributes for %s\n", 708 mdname(mddev)); 709 else 710 /* 711 * Inform user with KOBJ_CHANGE about new bitmap 712 * attributes. 713 */ 714 kobject_uevent(&mddev->kobj, KOBJ_CHANGE); 715 } 716 return true; 717 718 err: 719 xa_unlock(&md_submodule); 720 return false; 721 } 722 723 static void mddev_clear_bitmap_ops(struct mddev *mddev) 724 { 725 if (!mddev_is_dm(mddev) && mddev->bitmap_ops && 726 mddev->bitmap_ops->group) 727 sysfs_remove_group(&mddev->kobj, mddev->bitmap_ops->group); 728 729 mddev->bitmap_ops = NULL; 730 } 731 732 int mddev_init(struct mddev *mddev) 733 { 734 int err = 0; 735 736 if (!IS_ENABLED(CONFIG_MD_BITMAP)) 737 mddev->bitmap_id = ID_BITMAP_NONE; 738 else 739 mddev->bitmap_id = ID_BITMAP; 740 741 if (percpu_ref_init(&mddev->active_io, active_io_release, 742 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) 743 return -ENOMEM; 744 745 if (percpu_ref_init(&mddev->writes_pending, no_op, 746 PERCPU_REF_ALLOW_REINIT, GFP_KERNEL)) { 747 err = -ENOMEM; 748 goto exit_acitve_io; 749 } 750 751 err = bioset_init(&mddev->bio_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); 752 if (err) 753 goto exit_writes_pending; 754 755 err = bioset_init(&mddev->sync_set, BIO_POOL_SIZE, 0, BIOSET_NEED_BVECS); 756 if (err) 757 goto exit_bio_set; 758 759 err = bioset_init(&mddev->io_clone_set, BIO_POOL_SIZE, 760 offsetof(struct md_io_clone, bio_clone), 0); 761 if (err) 762 goto exit_sync_set; 763 764 /* We want to start with the refcount at zero */ 765 percpu_ref_put(&mddev->writes_pending); 766 767 mutex_init(&mddev->open_mutex); 768 mutex_init(&mddev->reconfig_mutex); 769 mutex_init(&mddev->suspend_mutex); 770 mutex_init(&mddev->bitmap_info.mutex); 771 INIT_LIST_HEAD(&mddev->disks); 772 INIT_LIST_HEAD(&mddev->all_mddevs); 773 INIT_LIST_HEAD(&mddev->deleting); 774 timer_setup(&mddev->safemode_timer, md_safemode_timeout, 0); 775 atomic_set(&mddev->active, 1); 776 atomic_set(&mddev->openers, 0); 777 atomic_set(&mddev->sync_seq, 0); 778 spin_lock_init(&mddev->lock); 779 init_waitqueue_head(&mddev->sb_wait); 780 init_waitqueue_head(&mddev->recovery_wait); 781 mddev->reshape_position = MaxSector; 782 mddev->reshape_backwards = 0; 783 mddev->last_sync_action = ACTION_IDLE; 784 mddev->resync_min = 0; 785 mddev->resync_max = MaxSector; 786 mddev->level = LEVEL_NONE; 787 788 INIT_WORK(&mddev->sync_work, md_start_sync); 789 INIT_WORK(&mddev->del_work, mddev_delayed_delete); 790 791 return 0; 792 793 exit_sync_set: 794 bioset_exit(&mddev->sync_set); 795 exit_bio_set: 796 bioset_exit(&mddev->bio_set); 797 exit_writes_pending: 798 percpu_ref_exit(&mddev->writes_pending); 799 exit_acitve_io: 800 percpu_ref_exit(&mddev->active_io); 801 return err; 802 } 803 EXPORT_SYMBOL_GPL(mddev_init); 804 805 void mddev_destroy(struct mddev *mddev) 806 { 807 bioset_exit(&mddev->bio_set); 808 bioset_exit(&mddev->sync_set); 809 bioset_exit(&mddev->io_clone_set); 810 percpu_ref_exit(&mddev->active_io); 811 percpu_ref_exit(&mddev->writes_pending); 812 } 813 EXPORT_SYMBOL_GPL(mddev_destroy); 814 815 static struct mddev *mddev_find_locked(dev_t unit) 816 { 817 struct mddev *mddev; 818 819 list_for_each_entry(mddev, &all_mddevs, all_mddevs) 820 if (mddev->unit == unit) 821 return mddev; 822 823 return NULL; 824 } 825 826 /* find an unused unit number */ 827 static dev_t mddev_alloc_unit(void) 828 { 829 static int next_minor = 512; 830 int start = next_minor; 831 bool is_free = 0; 832 dev_t dev = 0; 833 834 while (!is_free) { 835 dev = MKDEV(MD_MAJOR, next_minor); 836 next_minor++; 837 if (next_minor > MINORMASK) 838 next_minor = 0; 839 if (next_minor == start) 840 return 0; /* Oh dear, all in use. */ 841 is_free = !mddev_find_locked(dev); 842 } 843 844 return dev; 845 } 846 847 static struct mddev *mddev_alloc(dev_t unit) 848 { 849 struct mddev *new; 850 int error; 851 852 if (unit && MAJOR(unit) != MD_MAJOR) 853 unit &= ~((1 << MdpMinorShift) - 1); 854 855 new = kzalloc(sizeof(*new), GFP_KERNEL); 856 if (!new) 857 return ERR_PTR(-ENOMEM); 858 859 error = mddev_init(new); 860 if (error) 861 goto out_free_new; 862 863 spin_lock(&all_mddevs_lock); 864 if (unit) { 865 error = -EEXIST; 866 if (mddev_find_locked(unit)) 867 goto out_destroy_new; 868 new->unit = unit; 869 if (MAJOR(unit) == MD_MAJOR) 870 new->md_minor = MINOR(unit); 871 else 872 new->md_minor = MINOR(unit) >> MdpMinorShift; 873 new->hold_active = UNTIL_IOCTL; 874 } else { 875 error = -ENODEV; 876 new->unit = mddev_alloc_unit(); 877 if (!new->unit) 878 goto out_destroy_new; 879 new->md_minor = MINOR(new->unit); 880 new->hold_active = UNTIL_STOP; 881 } 882 883 list_add(&new->all_mddevs, &all_mddevs); 884 spin_unlock(&all_mddevs_lock); 885 return new; 886 887 out_destroy_new: 888 spin_unlock(&all_mddevs_lock); 889 mddev_destroy(new); 890 out_free_new: 891 kfree(new); 892 return ERR_PTR(error); 893 } 894 895 static void mddev_free(struct mddev *mddev) 896 { 897 spin_lock(&all_mddevs_lock); 898 list_del(&mddev->all_mddevs); 899 spin_unlock(&all_mddevs_lock); 900 901 mddev_destroy(mddev); 902 kfree(mddev); 903 } 904 905 static const struct attribute_group md_redundancy_group; 906 907 void mddev_unlock(struct mddev *mddev) 908 { 909 struct md_rdev *rdev; 910 struct md_rdev *tmp; 911 LIST_HEAD(delete); 912 913 if (!list_empty(&mddev->deleting)) 914 list_splice_init(&mddev->deleting, &delete); 915 916 if (mddev->to_remove) { 917 /* These cannot be removed under reconfig_mutex as 918 * an access to the files will try to take reconfig_mutex 919 * while holding the file unremovable, which leads to 920 * a deadlock. 921 * So hold set sysfs_active while the remove in happeing, 922 * and anything else which might set ->to_remove or my 923 * otherwise change the sysfs namespace will fail with 924 * -EBUSY if sysfs_active is still set. 925 * We set sysfs_active under reconfig_mutex and elsewhere 926 * test it under the same mutex to ensure its correct value 927 * is seen. 928 */ 929 const struct attribute_group *to_remove = mddev->to_remove; 930 mddev->to_remove = NULL; 931 mddev->sysfs_active = 1; 932 mutex_unlock(&mddev->reconfig_mutex); 933 934 if (mddev->kobj.sd) { 935 if (to_remove != &md_redundancy_group) 936 sysfs_remove_group(&mddev->kobj, to_remove); 937 if (mddev->pers == NULL || 938 mddev->pers->sync_request == NULL) { 939 sysfs_remove_group(&mddev->kobj, &md_redundancy_group); 940 if (mddev->sysfs_action) 941 sysfs_put(mddev->sysfs_action); 942 if (mddev->sysfs_completed) 943 sysfs_put(mddev->sysfs_completed); 944 if (mddev->sysfs_degraded) 945 sysfs_put(mddev->sysfs_degraded); 946 mddev->sysfs_action = NULL; 947 mddev->sysfs_completed = NULL; 948 mddev->sysfs_degraded = NULL; 949 } 950 } 951 mddev->sysfs_active = 0; 952 } else 953 mutex_unlock(&mddev->reconfig_mutex); 954 955 md_wakeup_thread(mddev->thread); 956 wake_up(&mddev->sb_wait); 957 958 list_for_each_entry_safe(rdev, tmp, &delete, same_set) { 959 list_del_init(&rdev->same_set); 960 kobject_del(&rdev->kobj); 961 export_rdev(rdev, mddev); 962 } 963 964 if (!legacy_async_del_gendisk) { 965 /* 966 * Call del_gendisk after release reconfig_mutex to avoid 967 * deadlock (e.g. call del_gendisk under the lock and an 968 * access to sysfs files waits the lock) 969 * And MD_DELETED is only used for md raid which is set in 970 * do_md_stop. dm raid only uses md_stop to stop. So dm raid 971 * doesn't need to check MD_DELETED when getting reconfig lock 972 */ 973 if (test_bit(MD_DELETED, &mddev->flags) && 974 !test_and_set_bit(MD_DO_DELETE, &mddev->flags)) { 975 kobject_del(&mddev->kobj); 976 del_gendisk(mddev->gendisk); 977 } 978 } 979 } 980 EXPORT_SYMBOL_GPL(mddev_unlock); 981 982 struct md_rdev *md_find_rdev_nr_rcu(struct mddev *mddev, int nr) 983 { 984 struct md_rdev *rdev; 985 986 rdev_for_each_rcu(rdev, mddev) 987 if (rdev->desc_nr == nr) 988 return rdev; 989 990 return NULL; 991 } 992 EXPORT_SYMBOL_GPL(md_find_rdev_nr_rcu); 993 994 static struct md_rdev *find_rdev(struct mddev *mddev, dev_t dev) 995 { 996 struct md_rdev *rdev; 997 998 rdev_for_each(rdev, mddev) 999 if (rdev->bdev->bd_dev == dev) 1000 return rdev; 1001 1002 return NULL; 1003 } 1004 1005 struct md_rdev *md_find_rdev_rcu(struct mddev *mddev, dev_t dev) 1006 { 1007 struct md_rdev *rdev; 1008 1009 rdev_for_each_rcu(rdev, mddev) 1010 if (rdev->bdev->bd_dev == dev) 1011 return rdev; 1012 1013 return NULL; 1014 } 1015 EXPORT_SYMBOL_GPL(md_find_rdev_rcu); 1016 1017 static struct md_personality *get_pers(int level, char *clevel) 1018 { 1019 struct md_personality *ret = NULL; 1020 struct md_submodule_head *head; 1021 unsigned long i; 1022 1023 xa_lock(&md_submodule); 1024 xa_for_each(&md_submodule, i, head) { 1025 if (head->type != MD_PERSONALITY) 1026 continue; 1027 if ((level != LEVEL_NONE && head->id == level) || 1028 !strcmp(head->name, clevel)) { 1029 if (try_module_get(head->owner)) 1030 ret = (void *)head; 1031 break; 1032 } 1033 } 1034 xa_unlock(&md_submodule); 1035 1036 if (!ret) { 1037 if (level != LEVEL_NONE) 1038 pr_warn("md: personality for level %d is not loaded!\n", 1039 level); 1040 else 1041 pr_warn("md: personality for level %s is not loaded!\n", 1042 clevel); 1043 } 1044 1045 return ret; 1046 } 1047 1048 static void put_pers(struct md_personality *pers) 1049 { 1050 module_put(pers->head.owner); 1051 } 1052 1053 /* return the offset of the super block in 512byte sectors */ 1054 static inline sector_t calc_dev_sboffset(struct md_rdev *rdev) 1055 { 1056 return MD_NEW_SIZE_SECTORS(bdev_nr_sectors(rdev->bdev)); 1057 } 1058 1059 static int alloc_disk_sb(struct md_rdev *rdev) 1060 { 1061 rdev->sb_page = alloc_page(GFP_KERNEL); 1062 if (!rdev->sb_page) 1063 return -ENOMEM; 1064 return 0; 1065 } 1066 1067 void md_rdev_clear(struct md_rdev *rdev) 1068 { 1069 if (rdev->sb_page) { 1070 put_page(rdev->sb_page); 1071 rdev->sb_loaded = 0; 1072 rdev->sb_page = NULL; 1073 rdev->sb_start = 0; 1074 rdev->sectors = 0; 1075 } 1076 if (rdev->bb_page) { 1077 put_page(rdev->bb_page); 1078 rdev->bb_page = NULL; 1079 } 1080 badblocks_exit(&rdev->badblocks); 1081 } 1082 EXPORT_SYMBOL_GPL(md_rdev_clear); 1083 1084 static void super_written(struct bio *bio) 1085 { 1086 struct md_rdev *rdev = bio->bi_private; 1087 struct mddev *mddev = rdev->mddev; 1088 1089 if (bio->bi_status) { 1090 pr_err("md: %s gets error=%d\n", __func__, 1091 blk_status_to_errno(bio->bi_status)); 1092 md_error(mddev, rdev); 1093 if (!test_bit(Faulty, &rdev->flags) 1094 && (bio->bi_opf & MD_FAILFAST)) { 1095 set_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags); 1096 set_bit(LastDev, &rdev->flags); 1097 } 1098 } else 1099 clear_bit(LastDev, &rdev->flags); 1100 1101 bio_put(bio); 1102 1103 rdev_dec_pending(rdev, mddev); 1104 1105 if (atomic_dec_and_test(&mddev->pending_writes)) 1106 wake_up(&mddev->sb_wait); 1107 } 1108 1109 /** 1110 * md_write_metadata - write metadata to underlying disk, including 1111 * array superblock, badblocks, bitmap superblock and bitmap bits. 1112 * @mddev: the array to write 1113 * @rdev: the underlying disk to write 1114 * @sector: the offset to @rdev 1115 * @size: the length of the metadata 1116 * @page: the metadata 1117 * @offset: the offset to @page 1118 * 1119 * Write @size bytes of @page start from @offset, to @sector of @rdev, Increment 1120 * mddev->pending_writes before returning, and decrement it on completion, 1121 * waking up sb_wait. Caller must call md_super_wait() after issuing io to all 1122 * rdev. If an error occurred, md_error() will be called, and the @rdev will be 1123 * kicked out from @mddev. 1124 */ 1125 void md_write_metadata(struct mddev *mddev, struct md_rdev *rdev, 1126 sector_t sector, int size, struct page *page, 1127 unsigned int offset) 1128 { 1129 struct bio *bio; 1130 1131 if (!page) 1132 return; 1133 1134 if (test_bit(Faulty, &rdev->flags)) 1135 return; 1136 1137 bio = bio_alloc_bioset(rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev, 1138 1, 1139 REQ_OP_WRITE | REQ_SYNC | REQ_IDLE | REQ_META 1140 | REQ_PREFLUSH | REQ_FUA, 1141 GFP_NOIO, &mddev->sync_set); 1142 1143 atomic_inc(&rdev->nr_pending); 1144 1145 bio->bi_iter.bi_sector = sector; 1146 __bio_add_page(bio, page, size, offset); 1147 bio->bi_private = rdev; 1148 bio->bi_end_io = super_written; 1149 1150 if (test_bit(MD_FAILFAST_SUPPORTED, &mddev->flags) && 1151 test_bit(FailFast, &rdev->flags) && 1152 !test_bit(LastDev, &rdev->flags)) 1153 bio->bi_opf |= MD_FAILFAST; 1154 1155 atomic_inc(&mddev->pending_writes); 1156 submit_bio(bio); 1157 } 1158 1159 int md_super_wait(struct mddev *mddev) 1160 { 1161 /* wait for all superblock writes that were scheduled to complete */ 1162 wait_event(mddev->sb_wait, atomic_read(&mddev->pending_writes)==0); 1163 if (test_and_clear_bit(MD_SB_NEED_REWRITE, &mddev->sb_flags)) 1164 return -EAGAIN; 1165 return 0; 1166 } 1167 1168 int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, 1169 struct page *page, blk_opf_t opf, bool metadata_op) 1170 { 1171 struct bio bio; 1172 struct bio_vec bvec; 1173 1174 if (metadata_op && rdev->meta_bdev) 1175 bio_init(&bio, rdev->meta_bdev, &bvec, 1, opf); 1176 else 1177 bio_init(&bio, rdev->bdev, &bvec, 1, opf); 1178 1179 if (metadata_op) 1180 bio.bi_iter.bi_sector = sector + rdev->sb_start; 1181 else if (rdev->mddev->reshape_position != MaxSector && 1182 (rdev->mddev->reshape_backwards == 1183 (sector >= rdev->mddev->reshape_position))) 1184 bio.bi_iter.bi_sector = sector + rdev->new_data_offset; 1185 else 1186 bio.bi_iter.bi_sector = sector + rdev->data_offset; 1187 __bio_add_page(&bio, page, size, 0); 1188 1189 submit_bio_wait(&bio); 1190 1191 return !bio.bi_status; 1192 } 1193 EXPORT_SYMBOL_GPL(sync_page_io); 1194 1195 static int read_disk_sb(struct md_rdev *rdev, int size) 1196 { 1197 if (rdev->sb_loaded) 1198 return 0; 1199 1200 if (!sync_page_io(rdev, 0, size, rdev->sb_page, REQ_OP_READ, true)) 1201 goto fail; 1202 rdev->sb_loaded = 1; 1203 return 0; 1204 1205 fail: 1206 pr_err("md: disabled device %pg, could not read superblock.\n", 1207 rdev->bdev); 1208 return -EINVAL; 1209 } 1210 1211 static int md_uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2) 1212 { 1213 return sb1->set_uuid0 == sb2->set_uuid0 && 1214 sb1->set_uuid1 == sb2->set_uuid1 && 1215 sb1->set_uuid2 == sb2->set_uuid2 && 1216 sb1->set_uuid3 == sb2->set_uuid3; 1217 } 1218 1219 static int md_sb_equal(mdp_super_t *sb1, mdp_super_t *sb2) 1220 { 1221 int ret; 1222 mdp_super_t *tmp1, *tmp2; 1223 1224 tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL); 1225 tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL); 1226 1227 if (!tmp1 || !tmp2) { 1228 ret = 0; 1229 goto abort; 1230 } 1231 1232 *tmp1 = *sb1; 1233 *tmp2 = *sb2; 1234 1235 /* 1236 * nr_disks is not constant 1237 */ 1238 tmp1->nr_disks = 0; 1239 tmp2->nr_disks = 0; 1240 1241 ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0); 1242 abort: 1243 kfree(tmp1); 1244 kfree(tmp2); 1245 return ret; 1246 } 1247 1248 static u32 md_csum_fold(u32 csum) 1249 { 1250 csum = (csum & 0xffff) + (csum >> 16); 1251 return (csum & 0xffff) + (csum >> 16); 1252 } 1253 1254 static unsigned int calc_sb_csum(mdp_super_t *sb) 1255 { 1256 u64 newcsum = 0; 1257 u32 *sb32 = (u32*)sb; 1258 int i; 1259 unsigned int disk_csum, csum; 1260 1261 disk_csum = sb->sb_csum; 1262 sb->sb_csum = 0; 1263 1264 for (i = 0; i < MD_SB_BYTES/4 ; i++) 1265 newcsum += sb32[i]; 1266 csum = (newcsum & 0xffffffff) + (newcsum>>32); 1267 1268 #ifdef CONFIG_ALPHA 1269 /* This used to use csum_partial, which was wrong for several 1270 * reasons including that different results are returned on 1271 * different architectures. It isn't critical that we get exactly 1272 * the same return value as before (we always csum_fold before 1273 * testing, and that removes any differences). However as we 1274 * know that csum_partial always returned a 16bit value on 1275 * alphas, do a fold to maximise conformity to previous behaviour. 1276 */ 1277 sb->sb_csum = md_csum_fold(disk_csum); 1278 #else 1279 sb->sb_csum = disk_csum; 1280 #endif 1281 return csum; 1282 } 1283 1284 /* 1285 * Handle superblock details. 1286 * We want to be able to handle multiple superblock formats 1287 * so we have a common interface to them all, and an array of 1288 * different handlers. 1289 * We rely on user-space to write the initial superblock, and support 1290 * reading and updating of superblocks. 1291 * Interface methods are: 1292 * int load_super(struct md_rdev *dev, struct md_rdev *refdev, int minor_version) 1293 * loads and validates a superblock on dev. 1294 * if refdev != NULL, compare superblocks on both devices 1295 * Return: 1296 * 0 - dev has a superblock that is compatible with refdev 1297 * 1 - dev has a superblock that is compatible and newer than refdev 1298 * so dev should be used as the refdev in future 1299 * -EINVAL superblock incompatible or invalid 1300 * -othererror e.g. -EIO 1301 * 1302 * int validate_super(struct mddev *mddev, struct md_rdev *dev) 1303 * Verify that dev is acceptable into mddev. 1304 * The first time, mddev->raid_disks will be 0, and data from 1305 * dev should be merged in. Subsequent calls check that dev 1306 * is new enough. Return 0 or -EINVAL 1307 * 1308 * void sync_super(struct mddev *mddev, struct md_rdev *dev) 1309 * Update the superblock for rdev with data in mddev 1310 * This does not write to disc. 1311 * 1312 */ 1313 1314 struct super_type { 1315 char *name; 1316 struct module *owner; 1317 int (*load_super)(struct md_rdev *rdev, 1318 struct md_rdev *refdev, 1319 int minor_version); 1320 int (*validate_super)(struct mddev *mddev, 1321 struct md_rdev *freshest, 1322 struct md_rdev *rdev); 1323 void (*sync_super)(struct mddev *mddev, 1324 struct md_rdev *rdev); 1325 unsigned long long (*rdev_size_change)(struct md_rdev *rdev, 1326 sector_t num_sectors); 1327 int (*allow_new_offset)(struct md_rdev *rdev, 1328 unsigned long long new_offset); 1329 }; 1330 1331 /* 1332 * Check that the given mddev has no bitmap. 1333 * 1334 * This function is called from the run method of all personalities that do not 1335 * support bitmaps. It prints an error message and returns non-zero if mddev 1336 * has a bitmap. Otherwise, it returns 0. 1337 * 1338 */ 1339 int md_check_no_bitmap(struct mddev *mddev) 1340 { 1341 if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset) 1342 return 0; 1343 pr_warn("%s: bitmaps are not supported for %s\n", 1344 mdname(mddev), mddev->pers->head.name); 1345 return 1; 1346 } 1347 EXPORT_SYMBOL(md_check_no_bitmap); 1348 1349 /* 1350 * load_super for 0.90.0 1351 */ 1352 static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version) 1353 { 1354 mdp_super_t *sb; 1355 int ret; 1356 bool spare_disk = true; 1357 1358 /* 1359 * Calculate the position of the superblock (512byte sectors), 1360 * it's at the end of the disk. 1361 * 1362 * It also happens to be a multiple of 4Kb. 1363 */ 1364 rdev->sb_start = calc_dev_sboffset(rdev); 1365 1366 ret = read_disk_sb(rdev, MD_SB_BYTES); 1367 if (ret) 1368 return ret; 1369 1370 ret = -EINVAL; 1371 1372 sb = page_address(rdev->sb_page); 1373 1374 if (sb->md_magic != MD_SB_MAGIC) { 1375 pr_warn("md: invalid raid superblock magic on %pg\n", 1376 rdev->bdev); 1377 goto abort; 1378 } 1379 1380 if (sb->major_version != 0 || 1381 sb->minor_version < 90 || 1382 sb->minor_version > 91) { 1383 pr_warn("Bad version number %d.%d on %pg\n", 1384 sb->major_version, sb->minor_version, rdev->bdev); 1385 goto abort; 1386 } 1387 1388 if (sb->raid_disks <= 0) 1389 goto abort; 1390 1391 if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) { 1392 pr_warn("md: invalid superblock checksum on %pg\n", rdev->bdev); 1393 goto abort; 1394 } 1395 1396 rdev->preferred_minor = sb->md_minor; 1397 rdev->data_offset = 0; 1398 rdev->new_data_offset = 0; 1399 rdev->sb_size = MD_SB_BYTES; 1400 rdev->badblocks.shift = -1; 1401 1402 rdev->desc_nr = sb->this_disk.number; 1403 1404 /* not spare disk */ 1405 if (rdev->desc_nr >= 0 && rdev->desc_nr < MD_SB_DISKS && 1406 sb->disks[rdev->desc_nr].state & ((1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE))) 1407 spare_disk = false; 1408 1409 if (!refdev) { 1410 if (!spare_disk) 1411 ret = 1; 1412 else 1413 ret = 0; 1414 } else { 1415 __u64 ev1, ev2; 1416 mdp_super_t *refsb = page_address(refdev->sb_page); 1417 if (!md_uuid_equal(refsb, sb)) { 1418 pr_warn("md: %pg has different UUID to %pg\n", 1419 rdev->bdev, refdev->bdev); 1420 goto abort; 1421 } 1422 if (!md_sb_equal(refsb, sb)) { 1423 pr_warn("md: %pg has same UUID but different superblock to %pg\n", 1424 rdev->bdev, refdev->bdev); 1425 goto abort; 1426 } 1427 ev1 = md_event(sb); 1428 ev2 = md_event(refsb); 1429 1430 if (!spare_disk && ev1 > ev2) 1431 ret = 1; 1432 else 1433 ret = 0; 1434 } 1435 rdev->sectors = rdev->sb_start; 1436 /* Limit to 4TB as metadata cannot record more than that. 1437 * (not needed for Linear and RAID0 as metadata doesn't 1438 * record this size) 1439 */ 1440 if ((u64)rdev->sectors >= (2ULL << 32) && sb->level >= 1) 1441 rdev->sectors = (sector_t)(2ULL << 32) - 2; 1442 1443 if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1) 1444 /* "this cannot possibly happen" ... */ 1445 ret = -EINVAL; 1446 1447 abort: 1448 return ret; 1449 } 1450 1451 static u64 md_bitmap_events_cleared(struct mddev *mddev) 1452 { 1453 struct md_bitmap_stats stats; 1454 int err; 1455 1456 if (!md_bitmap_enabled(mddev, false)) 1457 return 0; 1458 1459 err = mddev->bitmap_ops->get_stats(mddev->bitmap, &stats); 1460 if (err) 1461 return 0; 1462 1463 return stats.events_cleared; 1464 } 1465 1466 /* 1467 * validate_super for 0.90.0 1468 * note: we are not using "freshest" for 0.9 superblock 1469 */ 1470 static int super_90_validate(struct mddev *mddev, struct md_rdev *freshest, struct md_rdev *rdev) 1471 { 1472 mdp_disk_t *desc; 1473 mdp_super_t *sb = page_address(rdev->sb_page); 1474 __u64 ev1 = md_event(sb); 1475 1476 rdev->raid_disk = -1; 1477 clear_bit(Faulty, &rdev->flags); 1478 clear_bit(In_sync, &rdev->flags); 1479 clear_bit(Bitmap_sync, &rdev->flags); 1480 clear_bit(WriteMostly, &rdev->flags); 1481 1482 if (mddev->raid_disks == 0) { 1483 mddev->major_version = 0; 1484 mddev->minor_version = sb->minor_version; 1485 mddev->patch_version = sb->patch_version; 1486 mddev->external = 0; 1487 mddev->chunk_sectors = sb->chunk_size >> 9; 1488 mddev->ctime = sb->ctime; 1489 mddev->utime = sb->utime; 1490 mddev->level = sb->level; 1491 mddev->clevel[0] = 0; 1492 mddev->layout = sb->layout; 1493 mddev->raid_disks = sb->raid_disks; 1494 mddev->dev_sectors = ((sector_t)sb->size) * 2; 1495 mddev->events = ev1; 1496 mddev->bitmap_info.offset = 0; 1497 mddev->bitmap_info.space = 0; 1498 /* bitmap can use 60 K after the 4K superblocks */ 1499 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; 1500 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9); 1501 mddev->reshape_backwards = 0; 1502 1503 if (mddev->minor_version >= 91) { 1504 mddev->reshape_position = sb->reshape_position; 1505 mddev->delta_disks = sb->delta_disks; 1506 mddev->new_level = sb->new_level; 1507 mddev->new_layout = sb->new_layout; 1508 mddev->new_chunk_sectors = sb->new_chunk >> 9; 1509 if (mddev->delta_disks < 0) 1510 mddev->reshape_backwards = 1; 1511 } else { 1512 mddev->reshape_position = MaxSector; 1513 mddev->delta_disks = 0; 1514 mddev->new_level = mddev->level; 1515 mddev->new_layout = mddev->layout; 1516 mddev->new_chunk_sectors = mddev->chunk_sectors; 1517 } 1518 if (mddev->level == 0) 1519 mddev->layout = -1; 1520 1521 if (sb->state & (1<<MD_SB_CLEAN)) 1522 mddev->resync_offset = MaxSector; 1523 else { 1524 if (sb->events_hi == sb->cp_events_hi && 1525 sb->events_lo == sb->cp_events_lo) { 1526 mddev->resync_offset = sb->recovery_cp; 1527 } else 1528 mddev->resync_offset = 0; 1529 } 1530 1531 memcpy(mddev->uuid+0, &sb->set_uuid0, 4); 1532 memcpy(mddev->uuid+4, &sb->set_uuid1, 4); 1533 memcpy(mddev->uuid+8, &sb->set_uuid2, 4); 1534 memcpy(mddev->uuid+12,&sb->set_uuid3, 4); 1535 1536 mddev->max_disks = MD_SB_DISKS; 1537 1538 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) && 1539 mddev->bitmap_info.file == NULL) { 1540 mddev->bitmap_info.offset = 1541 mddev->bitmap_info.default_offset; 1542 mddev->bitmap_info.space = 1543 mddev->bitmap_info.default_space; 1544 } 1545 1546 } else if (mddev->pers == NULL) { 1547 /* Insist on good event counter while assembling, except 1548 * for spares (which don't need an event count) */ 1549 ++ev1; 1550 if (sb->disks[rdev->desc_nr].state & ( 1551 (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE))) 1552 if (ev1 < mddev->events) 1553 return -EINVAL; 1554 } else if (mddev->bitmap) { 1555 /* if adding to array with a bitmap, then we can accept an 1556 * older device ... but not too old. 1557 */ 1558 if (ev1 < md_bitmap_events_cleared(mddev)) 1559 return 0; 1560 if (ev1 < mddev->events) 1561 set_bit(Bitmap_sync, &rdev->flags); 1562 } else { 1563 if (ev1 < mddev->events) 1564 /* just a hot-add of a new device, leave raid_disk at -1 */ 1565 return 0; 1566 } 1567 1568 desc = sb->disks + rdev->desc_nr; 1569 1570 if (desc->state & (1<<MD_DISK_FAULTY)) 1571 set_bit(Faulty, &rdev->flags); 1572 else if (desc->state & (1<<MD_DISK_SYNC)) { 1573 set_bit(In_sync, &rdev->flags); 1574 rdev->raid_disk = desc->raid_disk; 1575 rdev->saved_raid_disk = desc->raid_disk; 1576 } else if (desc->state & (1<<MD_DISK_ACTIVE)) { 1577 /* active but not in sync implies recovery up to 1578 * reshape position. We don't know exactly where 1579 * that is, so set to zero for now 1580 */ 1581 if (mddev->minor_version >= 91) { 1582 rdev->recovery_offset = 0; 1583 rdev->raid_disk = desc->raid_disk; 1584 } 1585 } 1586 if (desc->state & (1<<MD_DISK_WRITEMOSTLY)) 1587 set_bit(WriteMostly, &rdev->flags); 1588 if (desc->state & (1<<MD_DISK_FAILFAST)) 1589 set_bit(FailFast, &rdev->flags); 1590 return 0; 1591 } 1592 1593 /* 1594 * sync_super for 0.90.0 1595 */ 1596 static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev) 1597 { 1598 mdp_super_t *sb; 1599 struct md_rdev *rdev2; 1600 int next_spare = mddev->raid_disks; 1601 1602 /* make rdev->sb match mddev data.. 1603 * 1604 * 1/ zero out disks 1605 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare); 1606 * 3/ any empty disks < next_spare become removed 1607 * 1608 * disks[0] gets initialised to REMOVED because 1609 * we cannot be sure from other fields if it has 1610 * been initialised or not. 1611 */ 1612 int i; 1613 int active=0, working=0,failed=0,spare=0,nr_disks=0; 1614 1615 rdev->sb_size = MD_SB_BYTES; 1616 1617 sb = page_address(rdev->sb_page); 1618 1619 memset(sb, 0, sizeof(*sb)); 1620 1621 sb->md_magic = MD_SB_MAGIC; 1622 sb->major_version = mddev->major_version; 1623 sb->patch_version = mddev->patch_version; 1624 sb->gvalid_words = 0; /* ignored */ 1625 memcpy(&sb->set_uuid0, mddev->uuid+0, 4); 1626 memcpy(&sb->set_uuid1, mddev->uuid+4, 4); 1627 memcpy(&sb->set_uuid2, mddev->uuid+8, 4); 1628 memcpy(&sb->set_uuid3, mddev->uuid+12,4); 1629 1630 sb->ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX); 1631 sb->level = mddev->level; 1632 sb->size = mddev->dev_sectors / 2; 1633 sb->raid_disks = mddev->raid_disks; 1634 sb->md_minor = mddev->md_minor; 1635 sb->not_persistent = 0; 1636 sb->utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX); 1637 sb->state = 0; 1638 sb->events_hi = (mddev->events>>32); 1639 sb->events_lo = (u32)mddev->events; 1640 1641 if (mddev->reshape_position == MaxSector) 1642 sb->minor_version = 90; 1643 else { 1644 sb->minor_version = 91; 1645 sb->reshape_position = mddev->reshape_position; 1646 sb->new_level = mddev->new_level; 1647 sb->delta_disks = mddev->delta_disks; 1648 sb->new_layout = mddev->new_layout; 1649 sb->new_chunk = mddev->new_chunk_sectors << 9; 1650 } 1651 mddev->minor_version = sb->minor_version; 1652 if (mddev->in_sync) 1653 { 1654 sb->recovery_cp = mddev->resync_offset; 1655 sb->cp_events_hi = (mddev->events>>32); 1656 sb->cp_events_lo = (u32)mddev->events; 1657 if (mddev->resync_offset == MaxSector) 1658 sb->state = (1<< MD_SB_CLEAN); 1659 } else 1660 sb->recovery_cp = 0; 1661 1662 sb->layout = mddev->layout; 1663 sb->chunk_size = mddev->chunk_sectors << 9; 1664 1665 if (mddev->bitmap && mddev->bitmap_info.file == NULL) 1666 sb->state |= (1<<MD_SB_BITMAP_PRESENT); 1667 1668 sb->disks[0].state = (1<<MD_DISK_REMOVED); 1669 rdev_for_each(rdev2, mddev) { 1670 mdp_disk_t *d; 1671 int desc_nr; 1672 int is_active = test_bit(In_sync, &rdev2->flags); 1673 1674 if (rdev2->raid_disk >= 0 && 1675 sb->minor_version >= 91) 1676 /* we have nowhere to store the recovery_offset, 1677 * but if it is not below the reshape_position, 1678 * we can piggy-back on that. 1679 */ 1680 is_active = 1; 1681 if (rdev2->raid_disk < 0 || 1682 test_bit(Faulty, &rdev2->flags)) 1683 is_active = 0; 1684 if (is_active) 1685 desc_nr = rdev2->raid_disk; 1686 else 1687 desc_nr = next_spare++; 1688 rdev2->desc_nr = desc_nr; 1689 d = &sb->disks[rdev2->desc_nr]; 1690 nr_disks++; 1691 d->number = rdev2->desc_nr; 1692 d->major = MAJOR(rdev2->bdev->bd_dev); 1693 d->minor = MINOR(rdev2->bdev->bd_dev); 1694 if (is_active) 1695 d->raid_disk = rdev2->raid_disk; 1696 else 1697 d->raid_disk = rdev2->desc_nr; /* compatibility */ 1698 if (test_bit(Faulty, &rdev2->flags)) 1699 d->state = (1<<MD_DISK_FAULTY); 1700 else if (is_active) { 1701 d->state = (1<<MD_DISK_ACTIVE); 1702 if (test_bit(In_sync, &rdev2->flags)) 1703 d->state |= (1<<MD_DISK_SYNC); 1704 active++; 1705 working++; 1706 } else { 1707 d->state = 0; 1708 spare++; 1709 working++; 1710 } 1711 if (test_bit(WriteMostly, &rdev2->flags)) 1712 d->state |= (1<<MD_DISK_WRITEMOSTLY); 1713 if (test_bit(FailFast, &rdev2->flags)) 1714 d->state |= (1<<MD_DISK_FAILFAST); 1715 } 1716 /* now set the "removed" and "faulty" bits on any missing devices */ 1717 for (i=0 ; i < mddev->raid_disks ; i++) { 1718 mdp_disk_t *d = &sb->disks[i]; 1719 if (d->state == 0 && d->number == 0) { 1720 d->number = i; 1721 d->raid_disk = i; 1722 d->state = (1<<MD_DISK_REMOVED); 1723 d->state |= (1<<MD_DISK_FAULTY); 1724 failed++; 1725 } 1726 } 1727 sb->nr_disks = nr_disks; 1728 sb->active_disks = active; 1729 sb->working_disks = working; 1730 sb->failed_disks = failed; 1731 sb->spare_disks = spare; 1732 1733 sb->this_disk = sb->disks[rdev->desc_nr]; 1734 sb->sb_csum = calc_sb_csum(sb); 1735 } 1736 1737 /* 1738 * rdev_size_change for 0.90.0 1739 */ 1740 static unsigned long long 1741 super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) 1742 { 1743 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) 1744 return 0; /* component must fit device */ 1745 if (rdev->mddev->bitmap_info.offset) 1746 return 0; /* can't move bitmap */ 1747 rdev->sb_start = calc_dev_sboffset(rdev); 1748 if (!num_sectors || num_sectors > rdev->sb_start) 1749 num_sectors = rdev->sb_start; 1750 /* Limit to 4TB as metadata cannot record more than that. 1751 * 4TB == 2^32 KB, or 2*2^32 sectors. 1752 */ 1753 if ((u64)num_sectors >= (2ULL << 32) && rdev->mddev->level >= 1) 1754 num_sectors = (sector_t)(2ULL << 32) - 2; 1755 do { 1756 md_write_metadata(rdev->mddev, rdev, rdev->sb_start, 1757 rdev->sb_size, rdev->sb_page, 0); 1758 } while (md_super_wait(rdev->mddev) < 0); 1759 return num_sectors; 1760 } 1761 1762 static int 1763 super_90_allow_new_offset(struct md_rdev *rdev, unsigned long long new_offset) 1764 { 1765 /* non-zero offset changes not possible with v0.90 */ 1766 return new_offset == 0; 1767 } 1768 1769 /* 1770 * version 1 superblock 1771 */ 1772 1773 static __le32 calc_sb_1_csum(struct mdp_superblock_1 *sb) 1774 { 1775 __le32 disk_csum; 1776 u32 csum; 1777 unsigned long long newcsum; 1778 int size = 256 + le32_to_cpu(sb->max_dev)*2; 1779 __le32 *isuper = (__le32*)sb; 1780 1781 disk_csum = sb->sb_csum; 1782 sb->sb_csum = 0; 1783 newcsum = 0; 1784 for (; size >= 4; size -= 4) 1785 newcsum += le32_to_cpu(*isuper++); 1786 1787 if (size == 2) 1788 newcsum += le16_to_cpu(*(__le16*) isuper); 1789 1790 csum = (newcsum & 0xffffffff) + (newcsum >> 32); 1791 sb->sb_csum = disk_csum; 1792 return cpu_to_le32(csum); 1793 } 1794 1795 static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version) 1796 { 1797 struct mdp_superblock_1 *sb; 1798 int ret; 1799 sector_t sb_start; 1800 sector_t sectors; 1801 int bmask; 1802 bool spare_disk = true; 1803 1804 /* 1805 * Calculate the position of the superblock in 512byte sectors. 1806 * It is always aligned to a 4K boundary and 1807 * depeding on minor_version, it can be: 1808 * 0: At least 8K, but less than 12K, from end of device 1809 * 1: At start of device 1810 * 2: 4K from start of device. 1811 */ 1812 switch(minor_version) { 1813 case 0: 1814 sb_start = bdev_nr_sectors(rdev->bdev) - 8 * 2; 1815 sb_start &= ~(sector_t)(4*2-1); 1816 break; 1817 case 1: 1818 sb_start = 0; 1819 break; 1820 case 2: 1821 sb_start = 8; 1822 break; 1823 default: 1824 return -EINVAL; 1825 } 1826 rdev->sb_start = sb_start; 1827 1828 /* superblock is rarely larger than 1K, but it can be larger, 1829 * and it is safe to read 4k, so we do that 1830 */ 1831 ret = read_disk_sb(rdev, 4096); 1832 if (ret) return ret; 1833 1834 sb = page_address(rdev->sb_page); 1835 1836 if (sb->magic != cpu_to_le32(MD_SB_MAGIC) || 1837 sb->major_version != cpu_to_le32(1) || 1838 le32_to_cpu(sb->max_dev) > (4096-256)/2 || 1839 le64_to_cpu(sb->super_offset) != rdev->sb_start || 1840 (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0) 1841 return -EINVAL; 1842 1843 if (calc_sb_1_csum(sb) != sb->sb_csum) { 1844 pr_warn("md: invalid superblock checksum on %pg\n", 1845 rdev->bdev); 1846 return -EINVAL; 1847 } 1848 if (le64_to_cpu(sb->data_size) < 10) { 1849 pr_warn("md: data_size too small on %pg\n", 1850 rdev->bdev); 1851 return -EINVAL; 1852 } 1853 if (sb->pad0 || 1854 sb->pad3[0] || 1855 memcmp(sb->pad3, sb->pad3+1, sizeof(sb->pad3) - sizeof(sb->pad3[1]))) { 1856 pr_warn("Some padding is non-zero on %pg, might be a new feature\n", 1857 rdev->bdev); 1858 if (check_new_feature) 1859 return -EINVAL; 1860 pr_warn("check_new_feature is disabled, data corruption possible\n"); 1861 } 1862 1863 rdev->preferred_minor = 0xffff; 1864 rdev->data_offset = le64_to_cpu(sb->data_offset); 1865 rdev->new_data_offset = rdev->data_offset; 1866 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) && 1867 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET)) 1868 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset); 1869 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); 1870 1871 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; 1872 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; 1873 if (rdev->sb_size & bmask) 1874 rdev->sb_size = (rdev->sb_size | bmask) + 1; 1875 1876 if (minor_version 1877 && rdev->data_offset < sb_start + (rdev->sb_size/512)) 1878 return -EINVAL; 1879 if (minor_version 1880 && rdev->new_data_offset < sb_start + (rdev->sb_size/512)) 1881 return -EINVAL; 1882 1883 rdev->desc_nr = le32_to_cpu(sb->dev_number); 1884 1885 if (!rdev->bb_page) { 1886 rdev->bb_page = alloc_page(GFP_KERNEL); 1887 if (!rdev->bb_page) 1888 return -ENOMEM; 1889 } 1890 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) && 1891 rdev->badblocks.count == 0) { 1892 /* need to load the bad block list. 1893 * Currently we limit it to one page. 1894 */ 1895 s32 offset; 1896 sector_t bb_sector; 1897 __le64 *bbp; 1898 int i; 1899 int sectors = le16_to_cpu(sb->bblog_size); 1900 if (sectors > (PAGE_SIZE / 512)) 1901 return -EINVAL; 1902 offset = le32_to_cpu(sb->bblog_offset); 1903 if (offset == 0) 1904 return -EINVAL; 1905 bb_sector = (long long)offset; 1906 if (!sync_page_io(rdev, bb_sector, sectors << 9, 1907 rdev->bb_page, REQ_OP_READ, true)) 1908 return -EIO; 1909 bbp = (__le64 *)page_address(rdev->bb_page); 1910 rdev->badblocks.shift = sb->bblog_shift; 1911 for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) { 1912 u64 bb = le64_to_cpu(*bbp); 1913 int count = bb & (0x3ff); 1914 u64 sector = bb >> 10; 1915 sector <<= sb->bblog_shift; 1916 count <<= sb->bblog_shift; 1917 if (bb + 1 == 0) 1918 break; 1919 if (!badblocks_set(&rdev->badblocks, sector, count, 1)) 1920 return -EINVAL; 1921 } 1922 } else if (sb->bblog_offset != 0) 1923 rdev->badblocks.shift = 0; 1924 1925 if ((le32_to_cpu(sb->feature_map) & 1926 (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS))) { 1927 rdev->ppl.offset = (__s16)le16_to_cpu(sb->ppl.offset); 1928 rdev->ppl.size = le16_to_cpu(sb->ppl.size); 1929 rdev->ppl.sector = rdev->sb_start + rdev->ppl.offset; 1930 } 1931 1932 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT) && 1933 sb->level != 0) 1934 return -EINVAL; 1935 1936 /* not spare disk */ 1937 if (rdev->desc_nr >= 0 && rdev->desc_nr < le32_to_cpu(sb->max_dev) && 1938 (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX || 1939 le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL)) 1940 spare_disk = false; 1941 1942 if (!refdev) { 1943 if (!spare_disk) 1944 ret = 1; 1945 else 1946 ret = 0; 1947 } else { 1948 __u64 ev1, ev2; 1949 struct mdp_superblock_1 *refsb = page_address(refdev->sb_page); 1950 1951 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 || 1952 sb->level != refsb->level || 1953 sb->layout != refsb->layout || 1954 sb->chunksize != refsb->chunksize) { 1955 pr_warn("md: %pg has strangely different superblock to %pg\n", 1956 rdev->bdev, 1957 refdev->bdev); 1958 return -EINVAL; 1959 } 1960 ev1 = le64_to_cpu(sb->events); 1961 ev2 = le64_to_cpu(refsb->events); 1962 1963 if (!spare_disk && ev1 > ev2) 1964 ret = 1; 1965 else 1966 ret = 0; 1967 } 1968 if (minor_version) 1969 sectors = bdev_nr_sectors(rdev->bdev) - rdev->data_offset; 1970 else 1971 sectors = rdev->sb_start; 1972 if (sectors < le64_to_cpu(sb->data_size)) 1973 return -EINVAL; 1974 rdev->sectors = le64_to_cpu(sb->data_size); 1975 return ret; 1976 } 1977 1978 static int super_1_validate(struct mddev *mddev, struct md_rdev *freshest, struct md_rdev *rdev) 1979 { 1980 struct mdp_superblock_1 *sb = page_address(rdev->sb_page); 1981 __u64 ev1 = le64_to_cpu(sb->events); 1982 int role; 1983 1984 rdev->raid_disk = -1; 1985 clear_bit(Faulty, &rdev->flags); 1986 clear_bit(In_sync, &rdev->flags); 1987 clear_bit(Bitmap_sync, &rdev->flags); 1988 clear_bit(WriteMostly, &rdev->flags); 1989 1990 if (mddev->raid_disks == 0) { 1991 mddev->major_version = 1; 1992 mddev->patch_version = 0; 1993 mddev->external = 0; 1994 mddev->chunk_sectors = le32_to_cpu(sb->chunksize); 1995 mddev->ctime = le64_to_cpu(sb->ctime); 1996 mddev->utime = le64_to_cpu(sb->utime); 1997 mddev->level = le32_to_cpu(sb->level); 1998 mddev->clevel[0] = 0; 1999 mddev->layout = le32_to_cpu(sb->layout); 2000 mddev->raid_disks = le32_to_cpu(sb->raid_disks); 2001 mddev->dev_sectors = le64_to_cpu(sb->size); 2002 mddev->logical_block_size = le32_to_cpu(sb->logical_block_size); 2003 mddev->events = ev1; 2004 mddev->bitmap_info.offset = 0; 2005 mddev->bitmap_info.space = 0; 2006 /* Default location for bitmap is 1K after superblock 2007 * using 3K - total of 4K 2008 */ 2009 mddev->bitmap_info.default_offset = 1024 >> 9; 2010 mddev->bitmap_info.default_space = (4096-1024) >> 9; 2011 mddev->reshape_backwards = 0; 2012 2013 mddev->resync_offset = le64_to_cpu(sb->resync_offset); 2014 memcpy(mddev->uuid, sb->set_uuid, 16); 2015 2016 mddev->max_disks = (4096-256)/2; 2017 2018 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) && 2019 mddev->bitmap_info.file == NULL) { 2020 mddev->bitmap_info.offset = 2021 (__s32)le32_to_cpu(sb->bitmap_offset); 2022 /* Metadata doesn't record how much space is available. 2023 * For 1.0, we assume we can use up to the superblock 2024 * if before, else to 4K beyond superblock. 2025 * For others, assume no change is possible. 2026 */ 2027 if (mddev->minor_version > 0) 2028 mddev->bitmap_info.space = 0; 2029 else if (mddev->bitmap_info.offset > 0) 2030 mddev->bitmap_info.space = 2031 8 - mddev->bitmap_info.offset; 2032 else 2033 mddev->bitmap_info.space = 2034 -mddev->bitmap_info.offset; 2035 } 2036 2037 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) { 2038 mddev->reshape_position = le64_to_cpu(sb->reshape_position); 2039 mddev->delta_disks = le32_to_cpu(sb->delta_disks); 2040 mddev->new_level = le32_to_cpu(sb->new_level); 2041 mddev->new_layout = le32_to_cpu(sb->new_layout); 2042 mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk); 2043 if (mddev->delta_disks < 0 || 2044 (mddev->delta_disks == 0 && 2045 (le32_to_cpu(sb->feature_map) 2046 & MD_FEATURE_RESHAPE_BACKWARDS))) 2047 mddev->reshape_backwards = 1; 2048 } else { 2049 mddev->reshape_position = MaxSector; 2050 mddev->delta_disks = 0; 2051 mddev->new_level = mddev->level; 2052 mddev->new_layout = mddev->layout; 2053 mddev->new_chunk_sectors = mddev->chunk_sectors; 2054 } 2055 2056 if (mddev->level == 0 && 2057 !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RAID0_LAYOUT)) 2058 mddev->layout = -1; 2059 2060 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL) 2061 set_bit(MD_HAS_JOURNAL, &mddev->flags); 2062 2063 if (le32_to_cpu(sb->feature_map) & 2064 (MD_FEATURE_PPL | MD_FEATURE_MULTIPLE_PPLS)) { 2065 if (le32_to_cpu(sb->feature_map) & 2066 (MD_FEATURE_BITMAP_OFFSET | MD_FEATURE_JOURNAL)) 2067 return -EINVAL; 2068 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_PPL) && 2069 (le32_to_cpu(sb->feature_map) & 2070 MD_FEATURE_MULTIPLE_PPLS)) 2071 return -EINVAL; 2072 set_bit(MD_HAS_PPL, &mddev->flags); 2073 } 2074 } else if (mddev->pers == NULL) { 2075 /* Insist of good event counter while assembling, except for 2076 * spares (which don't need an event count). 2077 * Similar to mdadm, we allow event counter difference of 1 2078 * from the freshest device. 2079 */ 2080 if (rdev->desc_nr >= 0 && 2081 rdev->desc_nr < le32_to_cpu(sb->max_dev) && 2082 (le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < MD_DISK_ROLE_MAX || 2083 le16_to_cpu(sb->dev_roles[rdev->desc_nr]) == MD_DISK_ROLE_JOURNAL)) 2084 if (ev1 + 1 < mddev->events) 2085 return -EINVAL; 2086 } else if (mddev->bitmap) { 2087 /* If adding to array with a bitmap, then we can accept an 2088 * older device, but not too old. 2089 */ 2090 if (ev1 < md_bitmap_events_cleared(mddev)) 2091 return 0; 2092 if (ev1 < mddev->events) 2093 set_bit(Bitmap_sync, &rdev->flags); 2094 } else { 2095 if (ev1 < mddev->events) 2096 /* just a hot-add of a new device, leave raid_disk at -1 */ 2097 return 0; 2098 } 2099 2100 if (rdev->desc_nr < 0 || 2101 rdev->desc_nr >= le32_to_cpu(sb->max_dev)) { 2102 role = MD_DISK_ROLE_SPARE; 2103 rdev->desc_nr = -1; 2104 } else if (mddev->pers == NULL && freshest && ev1 < mddev->events) { 2105 /* 2106 * If we are assembling, and our event counter is smaller than the 2107 * highest event counter, we cannot trust our superblock about the role. 2108 * It could happen that our rdev was marked as Faulty, and all other 2109 * superblocks were updated with +1 event counter. 2110 * Then, before the next superblock update, which typically happens when 2111 * remove_and_add_spares() removes the device from the array, there was 2112 * a crash or reboot. 2113 * If we allow current rdev without consulting the freshest superblock, 2114 * we could cause data corruption. 2115 * Note that in this case our event counter is smaller by 1 than the 2116 * highest, otherwise, this rdev would not be allowed into array; 2117 * both kernel and mdadm allow event counter difference of 1. 2118 */ 2119 struct mdp_superblock_1 *freshest_sb = page_address(freshest->sb_page); 2120 u32 freshest_max_dev = le32_to_cpu(freshest_sb->max_dev); 2121 2122 if (rdev->desc_nr >= freshest_max_dev) { 2123 /* this is unexpected, better not proceed */ 2124 pr_warn("md: %s: rdev[%pg]: desc_nr(%d) >= freshest(%pg)->sb->max_dev(%u)\n", 2125 mdname(mddev), rdev->bdev, rdev->desc_nr, 2126 freshest->bdev, freshest_max_dev); 2127 return -EUCLEAN; 2128 } 2129 2130 role = le16_to_cpu(freshest_sb->dev_roles[rdev->desc_nr]); 2131 pr_debug("md: %s: rdev[%pg]: role=%d(0x%x) according to freshest %pg\n", 2132 mdname(mddev), rdev->bdev, role, role, freshest->bdev); 2133 } else { 2134 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]); 2135 } 2136 switch (role) { 2137 case MD_DISK_ROLE_SPARE: /* spare */ 2138 break; 2139 case MD_DISK_ROLE_FAULTY: /* faulty */ 2140 set_bit(Faulty, &rdev->flags); 2141 break; 2142 case MD_DISK_ROLE_JOURNAL: /* journal device */ 2143 if (!(le32_to_cpu(sb->feature_map) & MD_FEATURE_JOURNAL)) { 2144 /* journal device without journal feature */ 2145 pr_warn("md: journal device provided without journal feature, ignoring the device\n"); 2146 return -EINVAL; 2147 } 2148 set_bit(Journal, &rdev->flags); 2149 rdev->journal_tail = le64_to_cpu(sb->journal_tail); 2150 rdev->raid_disk = 0; 2151 break; 2152 default: 2153 rdev->saved_raid_disk = role; 2154 if ((le32_to_cpu(sb->feature_map) & 2155 MD_FEATURE_RECOVERY_OFFSET)) { 2156 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset); 2157 if (!(le32_to_cpu(sb->feature_map) & 2158 MD_FEATURE_RECOVERY_BITMAP)) 2159 rdev->saved_raid_disk = -1; 2160 } else { 2161 /* 2162 * If the array is FROZEN, then the device can't 2163 * be in_sync with rest of array. 2164 */ 2165 if (!test_bit(MD_RECOVERY_FROZEN, 2166 &mddev->recovery)) 2167 set_bit(In_sync, &rdev->flags); 2168 } 2169 rdev->raid_disk = role; 2170 break; 2171 } 2172 if (sb->devflags & WriteMostly1) 2173 set_bit(WriteMostly, &rdev->flags); 2174 if (sb->devflags & FailFast1) 2175 set_bit(FailFast, &rdev->flags); 2176 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT) 2177 set_bit(Replacement, &rdev->flags); 2178 2179 return 0; 2180 } 2181 2182 static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev) 2183 { 2184 struct mdp_superblock_1 *sb; 2185 struct md_rdev *rdev2; 2186 int max_dev, i; 2187 /* make rdev->sb match mddev and rdev data. */ 2188 2189 sb = page_address(rdev->sb_page); 2190 2191 sb->feature_map = 0; 2192 sb->pad0 = 0; 2193 sb->recovery_offset = cpu_to_le64(0); 2194 memset(sb->pad3, 0, sizeof(sb->pad3)); 2195 2196 sb->utime = cpu_to_le64((__u64)mddev->utime); 2197 sb->events = cpu_to_le64(mddev->events); 2198 if (mddev->in_sync) 2199 sb->resync_offset = cpu_to_le64(mddev->resync_offset); 2200 else if (test_bit(MD_JOURNAL_CLEAN, &mddev->flags)) 2201 sb->resync_offset = cpu_to_le64(MaxSector); 2202 else 2203 sb->resync_offset = cpu_to_le64(0); 2204 2205 sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors)); 2206 2207 sb->raid_disks = cpu_to_le32(mddev->raid_disks); 2208 sb->size = cpu_to_le64(mddev->dev_sectors); 2209 sb->chunksize = cpu_to_le32(mddev->chunk_sectors); 2210 sb->level = cpu_to_le32(mddev->level); 2211 sb->layout = cpu_to_le32(mddev->layout); 2212 sb->logical_block_size = cpu_to_le32(mddev->logical_block_size); 2213 if (test_bit(FailFast, &rdev->flags)) 2214 sb->devflags |= FailFast1; 2215 else 2216 sb->devflags &= ~FailFast1; 2217 2218 if (test_bit(WriteMostly, &rdev->flags)) 2219 sb->devflags |= WriteMostly1; 2220 else 2221 sb->devflags &= ~WriteMostly1; 2222 sb->data_offset = cpu_to_le64(rdev->data_offset); 2223 sb->data_size = cpu_to_le64(rdev->sectors); 2224 2225 if (mddev->bitmap && mddev->bitmap_info.file == NULL) { 2226 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset); 2227 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET); 2228 } 2229 2230 if (rdev->raid_disk >= 0 && !test_bit(Journal, &rdev->flags) && 2231 !test_bit(In_sync, &rdev->flags)) { 2232 sb->feature_map |= 2233 cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET); 2234 sb->recovery_offset = 2235 cpu_to_le64(rdev->recovery_offset); 2236 if (rdev->saved_raid_disk >= 0 && mddev->bitmap) 2237 sb->feature_map |= 2238 cpu_to_le32(MD_FEATURE_RECOVERY_BITMAP); 2239 } 2240 /* Note: recovery_offset and journal_tail share space */ 2241 if (test_bit(Journal, &rdev->flags)) 2242 sb->journal_tail = cpu_to_le64(rdev->journal_tail); 2243 if (test_bit(Replacement, &rdev->flags)) 2244 sb->feature_map |= 2245 cpu_to_le32(MD_FEATURE_REPLACEMENT); 2246 2247 if (mddev->reshape_position != MaxSector) { 2248 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE); 2249 sb->reshape_position = cpu_to_le64(mddev->reshape_position); 2250 sb->new_layout = cpu_to_le32(mddev->new_layout); 2251 sb->delta_disks = cpu_to_le32(mddev->delta_disks); 2252 sb->new_level = cpu_to_le32(mddev->new_level); 2253 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors); 2254 if (mddev->delta_disks == 0 && 2255 mddev->reshape_backwards) 2256 sb->feature_map 2257 |= cpu_to_le32(MD_FEATURE_RESHAPE_BACKWARDS); 2258 if (rdev->new_data_offset != rdev->data_offset) { 2259 sb->feature_map 2260 |= cpu_to_le32(MD_FEATURE_NEW_OFFSET); 2261 sb->new_offset = cpu_to_le32((__u32)(rdev->new_data_offset 2262 - rdev->data_offset)); 2263 } 2264 } 2265 2266 if (mddev_is_clustered(mddev)) 2267 sb->feature_map |= cpu_to_le32(MD_FEATURE_CLUSTERED); 2268 2269 if (rdev->badblocks.count == 0) 2270 /* Nothing to do for bad blocks*/ ; 2271 else if (sb->bblog_offset == 0) 2272 /* Cannot record bad blocks on this device */ 2273 md_error(mddev, rdev); 2274 else { 2275 struct badblocks *bb = &rdev->badblocks; 2276 __le64 *bbp = (__le64 *)page_address(rdev->bb_page); 2277 u64 *p = bb->page; 2278 sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS); 2279 if (bb->changed) { 2280 unsigned seq; 2281 2282 retry: 2283 seq = read_seqbegin(&bb->lock); 2284 2285 memset(bbp, 0xff, PAGE_SIZE); 2286 2287 for (i = 0 ; i < bb->count ; i++) { 2288 u64 internal_bb = p[i]; 2289 u64 store_bb = ((BB_OFFSET(internal_bb) << 10) 2290 | BB_LEN(internal_bb)); 2291 bbp[i] = cpu_to_le64(store_bb); 2292 } 2293 bb->changed = 0; 2294 if (read_seqretry(&bb->lock, seq)) 2295 goto retry; 2296 2297 bb->sector = (rdev->sb_start + 2298 (int)le32_to_cpu(sb->bblog_offset)); 2299 bb->size = le16_to_cpu(sb->bblog_size); 2300 } 2301 } 2302 2303 max_dev = 0; 2304 rdev_for_each(rdev2, mddev) 2305 if (rdev2->desc_nr+1 > max_dev) 2306 max_dev = rdev2->desc_nr+1; 2307 2308 if (max_dev > le32_to_cpu(sb->max_dev)) { 2309 int bmask; 2310 sb->max_dev = cpu_to_le32(max_dev); 2311 rdev->sb_size = max_dev * 2 + 256; 2312 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; 2313 if (rdev->sb_size & bmask) 2314 rdev->sb_size = (rdev->sb_size | bmask) + 1; 2315 } else 2316 max_dev = le32_to_cpu(sb->max_dev); 2317 2318 for (i=0; i<max_dev;i++) 2319 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE); 2320 2321 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) 2322 sb->feature_map |= cpu_to_le32(MD_FEATURE_JOURNAL); 2323 2324 if (test_bit(MD_HAS_PPL, &mddev->flags)) { 2325 if (test_bit(MD_HAS_MULTIPLE_PPLS, &mddev->flags)) 2326 sb->feature_map |= 2327 cpu_to_le32(MD_FEATURE_MULTIPLE_PPLS); 2328 else 2329 sb->feature_map |= cpu_to_le32(MD_FEATURE_PPL); 2330 sb->ppl.offset = cpu_to_le16(rdev->ppl.offset); 2331 sb->ppl.size = cpu_to_le16(rdev->ppl.size); 2332 } 2333 2334 rdev_for_each(rdev2, mddev) { 2335 i = rdev2->desc_nr; 2336 if (test_bit(Faulty, &rdev2->flags)) 2337 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_FAULTY); 2338 else if (test_bit(In_sync, &rdev2->flags)) 2339 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); 2340 else if (test_bit(Journal, &rdev2->flags)) 2341 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_JOURNAL); 2342 else if (rdev2->raid_disk >= 0) 2343 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); 2344 else 2345 sb->dev_roles[i] = cpu_to_le16(MD_DISK_ROLE_SPARE); 2346 } 2347 2348 sb->sb_csum = calc_sb_1_csum(sb); 2349 } 2350 2351 static sector_t super_1_choose_bm_space(sector_t dev_size) 2352 { 2353 sector_t bm_space; 2354 2355 /* if the device is bigger than 8Gig, save 64k for bitmap 2356 * usage, if bigger than 200Gig, save 128k 2357 */ 2358 if (dev_size < 64*2) 2359 bm_space = 0; 2360 else if (dev_size - 64*2 >= 200*1024*1024*2) 2361 bm_space = 128*2; 2362 else if (dev_size - 4*2 > 8*1024*1024*2) 2363 bm_space = 64*2; 2364 else 2365 bm_space = 4*2; 2366 return bm_space; 2367 } 2368 2369 static unsigned long long 2370 super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) 2371 { 2372 struct mdp_superblock_1 *sb; 2373 sector_t max_sectors; 2374 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) 2375 return 0; /* component must fit device */ 2376 if (rdev->data_offset != rdev->new_data_offset) 2377 return 0; /* too confusing */ 2378 if (rdev->sb_start < rdev->data_offset) { 2379 /* minor versions 1 and 2; superblock before data */ 2380 max_sectors = bdev_nr_sectors(rdev->bdev) - rdev->data_offset; 2381 if (!num_sectors || num_sectors > max_sectors) 2382 num_sectors = max_sectors; 2383 } else if (rdev->mddev->bitmap_info.offset) { 2384 /* minor version 0 with bitmap we can't move */ 2385 return 0; 2386 } else { 2387 /* minor version 0; superblock after data */ 2388 sector_t sb_start, bm_space; 2389 sector_t dev_size = bdev_nr_sectors(rdev->bdev); 2390 2391 /* 8K is for superblock */ 2392 sb_start = dev_size - 8*2; 2393 sb_start &= ~(sector_t)(4*2 - 1); 2394 2395 bm_space = super_1_choose_bm_space(dev_size); 2396 2397 /* Space that can be used to store date needs to decrease 2398 * superblock bitmap space and bad block space(4K) 2399 */ 2400 max_sectors = sb_start - bm_space - 4*2; 2401 2402 if (!num_sectors || num_sectors > max_sectors) 2403 num_sectors = max_sectors; 2404 rdev->sb_start = sb_start; 2405 } 2406 sb = page_address(rdev->sb_page); 2407 sb->data_size = cpu_to_le64(num_sectors); 2408 sb->super_offset = cpu_to_le64(rdev->sb_start); 2409 sb->sb_csum = calc_sb_1_csum(sb); 2410 do { 2411 md_write_metadata(rdev->mddev, rdev, rdev->sb_start, 2412 rdev->sb_size, rdev->sb_page, 0); 2413 } while (md_super_wait(rdev->mddev) < 0); 2414 return num_sectors; 2415 2416 } 2417 2418 static int 2419 super_1_allow_new_offset(struct md_rdev *rdev, 2420 unsigned long long new_offset) 2421 { 2422 struct mddev *mddev = rdev->mddev; 2423 2424 /* All necessary checks on new >= old have been done */ 2425 if (new_offset >= rdev->data_offset) 2426 return 1; 2427 2428 /* with 1.0 metadata, there is no metadata to tread on 2429 * so we can always move back */ 2430 if (mddev->minor_version == 0) 2431 return 1; 2432 2433 /* otherwise we must be sure not to step on 2434 * any metadata, so stay: 2435 * 36K beyond start of superblock 2436 * beyond end of badblocks 2437 * beyond write-intent bitmap 2438 */ 2439 if (rdev->sb_start + (32+4)*2 > new_offset) 2440 return 0; 2441 2442 if (md_bitmap_registered(mddev) && !mddev->bitmap_info.file) { 2443 struct md_bitmap_stats stats; 2444 int err; 2445 2446 err = mddev->bitmap_ops->get_stats(mddev->bitmap, &stats); 2447 if (!err && rdev->sb_start + mddev->bitmap_info.offset + 2448 stats.file_pages * (PAGE_SIZE >> 9) > new_offset) 2449 return 0; 2450 } 2451 2452 if (rdev->badblocks.sector + rdev->badblocks.size > new_offset) 2453 return 0; 2454 2455 return 1; 2456 } 2457 2458 static struct super_type super_types[] = { 2459 [0] = { 2460 .name = "0.90.0", 2461 .owner = THIS_MODULE, 2462 .load_super = super_90_load, 2463 .validate_super = super_90_validate, 2464 .sync_super = super_90_sync, 2465 .rdev_size_change = super_90_rdev_size_change, 2466 .allow_new_offset = super_90_allow_new_offset, 2467 }, 2468 [1] = { 2469 .name = "md-1", 2470 .owner = THIS_MODULE, 2471 .load_super = super_1_load, 2472 .validate_super = super_1_validate, 2473 .sync_super = super_1_sync, 2474 .rdev_size_change = super_1_rdev_size_change, 2475 .allow_new_offset = super_1_allow_new_offset, 2476 }, 2477 }; 2478 2479 static void sync_super(struct mddev *mddev, struct md_rdev *rdev) 2480 { 2481 if (mddev->sync_super) { 2482 mddev->sync_super(mddev, rdev); 2483 return; 2484 } 2485 2486 BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types)); 2487 2488 super_types[mddev->major_version].sync_super(mddev, rdev); 2489 } 2490 2491 static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2) 2492 { 2493 struct md_rdev *rdev, *rdev2; 2494 2495 rcu_read_lock(); 2496 rdev_for_each_rcu(rdev, mddev1) { 2497 if (test_bit(Faulty, &rdev->flags) || 2498 test_bit(Journal, &rdev->flags) || 2499 rdev->raid_disk == -1) 2500 continue; 2501 rdev_for_each_rcu(rdev2, mddev2) { 2502 if (test_bit(Faulty, &rdev2->flags) || 2503 test_bit(Journal, &rdev2->flags) || 2504 rdev2->raid_disk == -1) 2505 continue; 2506 if (rdev->bdev->bd_disk == rdev2->bdev->bd_disk) { 2507 rcu_read_unlock(); 2508 return 1; 2509 } 2510 } 2511 } 2512 rcu_read_unlock(); 2513 return 0; 2514 } 2515 2516 static LIST_HEAD(pending_raid_disks); 2517 2518 /* 2519 * Try to register data integrity profile for an mddev 2520 * 2521 * This is called when an array is started and after a disk has been kicked 2522 * from the array. It only succeeds if all working and active component devices 2523 * are integrity capable with matching profiles. 2524 */ 2525 int md_integrity_register(struct mddev *mddev) 2526 { 2527 if (list_empty(&mddev->disks)) 2528 return 0; /* nothing to do */ 2529 if (mddev_is_dm(mddev) || !blk_get_integrity(mddev->gendisk)) 2530 return 0; /* shouldn't register */ 2531 2532 pr_debug("md: data integrity enabled on %s\n", mdname(mddev)); 2533 return 0; 2534 } 2535 EXPORT_SYMBOL(md_integrity_register); 2536 2537 static bool rdev_read_only(struct md_rdev *rdev) 2538 { 2539 return bdev_read_only(rdev->bdev) || 2540 (rdev->meta_bdev && bdev_read_only(rdev->meta_bdev)); 2541 } 2542 2543 static int bind_rdev_to_array(struct md_rdev *rdev, struct mddev *mddev) 2544 { 2545 char b[BDEVNAME_SIZE]; 2546 int err; 2547 2548 /* prevent duplicates */ 2549 if (find_rdev(mddev, rdev->bdev->bd_dev)) 2550 return -EEXIST; 2551 2552 if (rdev_read_only(rdev) && mddev->pers) 2553 return -EROFS; 2554 2555 /* make sure rdev->sectors exceeds mddev->dev_sectors */ 2556 if (!test_bit(Journal, &rdev->flags) && 2557 rdev->sectors && 2558 (mddev->dev_sectors == 0 || rdev->sectors < mddev->dev_sectors)) { 2559 if (mddev->pers) { 2560 /* Cannot change size, so fail 2561 * If mddev->level <= 0, then we don't care 2562 * about aligning sizes (e.g. linear) 2563 */ 2564 if (mddev->level > 0) 2565 return -ENOSPC; 2566 } else 2567 mddev->dev_sectors = rdev->sectors; 2568 } 2569 2570 /* Verify rdev->desc_nr is unique. 2571 * If it is -1, assign a free number, else 2572 * check number is not in use 2573 */ 2574 rcu_read_lock(); 2575 if (rdev->desc_nr < 0) { 2576 int choice = 0; 2577 if (mddev->pers) 2578 choice = mddev->raid_disks; 2579 while (md_find_rdev_nr_rcu(mddev, choice)) 2580 choice++; 2581 rdev->desc_nr = choice; 2582 } else { 2583 if (md_find_rdev_nr_rcu(mddev, rdev->desc_nr)) { 2584 rcu_read_unlock(); 2585 return -EBUSY; 2586 } 2587 } 2588 rcu_read_unlock(); 2589 if (!test_bit(Journal, &rdev->flags) && 2590 mddev->max_disks && rdev->desc_nr >= mddev->max_disks) { 2591 pr_warn("md: %s: array is limited to %d devices\n", 2592 mdname(mddev), mddev->max_disks); 2593 return -EBUSY; 2594 } 2595 snprintf(b, sizeof(b), "%pg", rdev->bdev); 2596 strreplace(b, '/', '!'); 2597 2598 rdev->mddev = mddev; 2599 pr_debug("md: bind<%s>\n", b); 2600 2601 if (mddev->raid_disks) 2602 mddev_create_serial_pool(mddev, rdev); 2603 2604 if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b))) 2605 goto fail; 2606 2607 /* failure here is OK */ 2608 err = sysfs_create_link(&rdev->kobj, bdev_kobj(rdev->bdev), "block"); 2609 rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state"); 2610 rdev->sysfs_unack_badblocks = 2611 sysfs_get_dirent_safe(rdev->kobj.sd, "unacknowledged_bad_blocks"); 2612 rdev->sysfs_badblocks = 2613 sysfs_get_dirent_safe(rdev->kobj.sd, "bad_blocks"); 2614 2615 list_add_rcu(&rdev->same_set, &mddev->disks); 2616 bd_link_disk_holder(rdev->bdev, mddev->gendisk); 2617 2618 /* May as well allow recovery to be retried once */ 2619 mddev->recovery_disabled++; 2620 2621 return 0; 2622 2623 fail: 2624 pr_warn("md: failed to register dev-%s for %s\n", 2625 b, mdname(mddev)); 2626 mddev_destroy_serial_pool(mddev, rdev); 2627 return err; 2628 } 2629 2630 void md_autodetect_dev(dev_t dev); 2631 2632 /* just for claiming the bdev */ 2633 static struct md_rdev claim_rdev; 2634 2635 static void export_rdev(struct md_rdev *rdev, struct mddev *mddev) 2636 { 2637 pr_debug("md: export_rdev(%pg)\n", rdev->bdev); 2638 md_rdev_clear(rdev); 2639 #ifndef MODULE 2640 if (test_bit(AutoDetected, &rdev->flags)) 2641 md_autodetect_dev(rdev->bdev->bd_dev); 2642 #endif 2643 fput(rdev->bdev_file); 2644 rdev->bdev = NULL; 2645 kobject_put(&rdev->kobj); 2646 } 2647 2648 static void md_kick_rdev_from_array(struct md_rdev *rdev) 2649 { 2650 struct mddev *mddev = rdev->mddev; 2651 2652 bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk); 2653 list_del_rcu(&rdev->same_set); 2654 pr_debug("md: unbind<%pg>\n", rdev->bdev); 2655 mddev_destroy_serial_pool(rdev->mddev, rdev); 2656 WRITE_ONCE(rdev->mddev, NULL); 2657 sysfs_remove_link(&rdev->kobj, "block"); 2658 sysfs_put(rdev->sysfs_state); 2659 sysfs_put(rdev->sysfs_unack_badblocks); 2660 sysfs_put(rdev->sysfs_badblocks); 2661 rdev->sysfs_state = NULL; 2662 rdev->sysfs_unack_badblocks = NULL; 2663 rdev->sysfs_badblocks = NULL; 2664 rdev->badblocks.count = 0; 2665 2666 synchronize_rcu(); 2667 2668 /* 2669 * kobject_del() will wait for all in progress writers to be done, where 2670 * reconfig_mutex is held, hence it can't be called under 2671 * reconfig_mutex and it's delayed to mddev_unlock(). 2672 */ 2673 list_add(&rdev->same_set, &mddev->deleting); 2674 } 2675 2676 static void export_array(struct mddev *mddev) 2677 { 2678 struct md_rdev *rdev; 2679 2680 while (!list_empty(&mddev->disks)) { 2681 rdev = list_first_entry(&mddev->disks, struct md_rdev, 2682 same_set); 2683 md_kick_rdev_from_array(rdev); 2684 } 2685 mddev->raid_disks = 0; 2686 mddev->major_version = 0; 2687 } 2688 2689 static bool set_in_sync(struct mddev *mddev) 2690 { 2691 lockdep_assert_held(&mddev->lock); 2692 if (!mddev->in_sync) { 2693 mddev->sync_checkers++; 2694 spin_unlock(&mddev->lock); 2695 percpu_ref_switch_to_atomic_sync(&mddev->writes_pending); 2696 spin_lock(&mddev->lock); 2697 if (!mddev->in_sync && 2698 percpu_ref_is_zero(&mddev->writes_pending)) { 2699 mddev->in_sync = 1; 2700 /* 2701 * Ensure ->in_sync is visible before we clear 2702 * ->sync_checkers. 2703 */ 2704 smp_mb(); 2705 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); 2706 sysfs_notify_dirent_safe(mddev->sysfs_state); 2707 } 2708 if (--mddev->sync_checkers == 0) 2709 percpu_ref_switch_to_percpu(&mddev->writes_pending); 2710 } 2711 if (mddev->safemode == 1) 2712 mddev->safemode = 0; 2713 return mddev->in_sync; 2714 } 2715 2716 static void sync_sbs(struct mddev *mddev, int nospares) 2717 { 2718 /* Update each superblock (in-memory image), but 2719 * if we are allowed to, skip spares which already 2720 * have the right event counter, or have one earlier 2721 * (which would mean they aren't being marked as dirty 2722 * with the rest of the array) 2723 */ 2724 struct md_rdev *rdev; 2725 rdev_for_each(rdev, mddev) { 2726 if (rdev->sb_events == mddev->events || 2727 (nospares && 2728 rdev->raid_disk < 0 && 2729 rdev->sb_events+1 == mddev->events)) { 2730 /* Don't update this superblock */ 2731 rdev->sb_loaded = 2; 2732 } else { 2733 sync_super(mddev, rdev); 2734 rdev->sb_loaded = 1; 2735 } 2736 } 2737 } 2738 2739 static bool does_sb_need_changing(struct mddev *mddev) 2740 { 2741 struct md_rdev *rdev = NULL, *iter; 2742 struct mdp_superblock_1 *sb; 2743 int role; 2744 2745 /* Find a good rdev */ 2746 rdev_for_each(iter, mddev) 2747 if ((iter->raid_disk >= 0) && !test_bit(Faulty, &iter->flags)) { 2748 rdev = iter; 2749 break; 2750 } 2751 2752 /* No good device found. */ 2753 if (!rdev) 2754 return false; 2755 2756 sb = page_address(rdev->sb_page); 2757 /* Check if a device has become faulty or a spare become active */ 2758 rdev_for_each(rdev, mddev) { 2759 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]); 2760 /* Device activated? */ 2761 if (role == MD_DISK_ROLE_SPARE && rdev->raid_disk >= 0 && 2762 !test_bit(Faulty, &rdev->flags)) 2763 return true; 2764 /* Device turned faulty? */ 2765 if (test_bit(Faulty, &rdev->flags) && (role < MD_DISK_ROLE_MAX)) 2766 return true; 2767 } 2768 2769 /* Check if any mddev parameters have changed */ 2770 if ((mddev->dev_sectors != le64_to_cpu(sb->size)) || 2771 (mddev->reshape_position != le64_to_cpu(sb->reshape_position)) || 2772 (mddev->layout != le32_to_cpu(sb->layout)) || 2773 (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) || 2774 (mddev->chunk_sectors != le32_to_cpu(sb->chunksize))) 2775 return true; 2776 2777 return false; 2778 } 2779 2780 void md_update_sb(struct mddev *mddev, int force_change) 2781 { 2782 struct md_rdev *rdev; 2783 int sync_req; 2784 int nospares = 0; 2785 int any_badblocks_changed = 0; 2786 int ret = -1; 2787 2788 if (!md_is_rdwr(mddev)) { 2789 if (force_change) 2790 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 2791 return; 2792 } 2793 2794 repeat: 2795 if (mddev_is_clustered(mddev)) { 2796 if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) 2797 force_change = 1; 2798 if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags)) 2799 nospares = 1; 2800 ret = mddev->cluster_ops->metadata_update_start(mddev); 2801 /* Has someone else has updated the sb */ 2802 if (!does_sb_need_changing(mddev)) { 2803 if (ret == 0) 2804 mddev->cluster_ops->metadata_update_cancel(mddev); 2805 bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING), 2806 BIT(MD_SB_CHANGE_DEVS) | 2807 BIT(MD_SB_CHANGE_CLEAN)); 2808 return; 2809 } 2810 } 2811 2812 /* 2813 * First make sure individual recovery_offsets are correct 2814 * curr_resync_completed can only be used during recovery. 2815 * During reshape/resync it might use array-addresses rather 2816 * that device addresses. 2817 */ 2818 rdev_for_each(rdev, mddev) { 2819 if (rdev->raid_disk >= 0 && 2820 mddev->delta_disks >= 0 && 2821 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && 2822 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery) && 2823 !test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 2824 !test_bit(Journal, &rdev->flags) && 2825 !test_bit(In_sync, &rdev->flags) && 2826 mddev->curr_resync_completed > rdev->recovery_offset) 2827 rdev->recovery_offset = mddev->curr_resync_completed; 2828 2829 } 2830 if (!mddev->persistent) { 2831 clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); 2832 clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 2833 if (!mddev->external) { 2834 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); 2835 rdev_for_each(rdev, mddev) { 2836 if (rdev->badblocks.changed) { 2837 rdev->badblocks.changed = 0; 2838 ack_all_badblocks(&rdev->badblocks); 2839 md_error(mddev, rdev); 2840 } 2841 clear_bit(Blocked, &rdev->flags); 2842 clear_bit(BlockedBadBlocks, &rdev->flags); 2843 wake_up(&rdev->blocked_wait); 2844 } 2845 } 2846 wake_up(&mddev->sb_wait); 2847 return; 2848 } 2849 2850 spin_lock(&mddev->lock); 2851 2852 mddev->utime = ktime_get_real_seconds(); 2853 2854 if (test_and_clear_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) 2855 force_change = 1; 2856 if (test_and_clear_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags)) 2857 /* just a clean<-> dirty transition, possibly leave spares alone, 2858 * though if events isn't the right even/odd, we will have to do 2859 * spares after all 2860 */ 2861 nospares = 1; 2862 if (force_change) 2863 nospares = 0; 2864 if (mddev->degraded) 2865 /* If the array is degraded, then skipping spares is both 2866 * dangerous and fairly pointless. 2867 * Dangerous because a device that was removed from the array 2868 * might have a event_count that still looks up-to-date, 2869 * so it can be re-added without a resync. 2870 * Pointless because if there are any spares to skip, 2871 * then a recovery will happen and soon that array won't 2872 * be degraded any more and the spare can go back to sleep then. 2873 */ 2874 nospares = 0; 2875 2876 sync_req = mddev->in_sync; 2877 2878 /* If this is just a dirty<->clean transition, and the array is clean 2879 * and 'events' is odd, we can roll back to the previous clean state */ 2880 if (nospares 2881 && (mddev->in_sync && mddev->resync_offset == MaxSector) 2882 && mddev->can_decrease_events 2883 && mddev->events != 1) { 2884 mddev->events--; 2885 mddev->can_decrease_events = 0; 2886 } else { 2887 /* otherwise we have to go forward and ... */ 2888 mddev->events ++; 2889 mddev->can_decrease_events = nospares; 2890 } 2891 2892 /* 2893 * This 64-bit counter should never wrap. 2894 * Either we are in around ~1 trillion A.C., assuming 2895 * 1 reboot per second, or we have a bug... 2896 */ 2897 WARN_ON(mddev->events == 0); 2898 2899 rdev_for_each(rdev, mddev) { 2900 if (rdev->badblocks.changed) 2901 any_badblocks_changed++; 2902 if (test_bit(Faulty, &rdev->flags)) 2903 set_bit(FaultRecorded, &rdev->flags); 2904 } 2905 2906 sync_sbs(mddev, nospares); 2907 spin_unlock(&mddev->lock); 2908 2909 pr_debug("md: updating %s RAID superblock on device (in sync %d)\n", 2910 mdname(mddev), mddev->in_sync); 2911 2912 mddev_add_trace_msg(mddev, "md md_update_sb"); 2913 rewrite: 2914 if (md_bitmap_enabled(mddev, false)) 2915 mddev->bitmap_ops->update_sb(mddev->bitmap); 2916 rdev_for_each(rdev, mddev) { 2917 if (rdev->sb_loaded != 1) 2918 continue; /* no noise on spare devices */ 2919 2920 if (!test_bit(Faulty, &rdev->flags)) { 2921 md_write_metadata(mddev, rdev, rdev->sb_start, 2922 rdev->sb_size, rdev->sb_page, 0); 2923 pr_debug("md: (write) %pg's sb offset: %llu\n", 2924 rdev->bdev, 2925 (unsigned long long)rdev->sb_start); 2926 rdev->sb_events = mddev->events; 2927 if (rdev->badblocks.size) { 2928 md_write_metadata(mddev, rdev, 2929 rdev->badblocks.sector, 2930 rdev->badblocks.size << 9, 2931 rdev->bb_page, 0); 2932 rdev->badblocks.size = 0; 2933 } 2934 2935 } else 2936 pr_debug("md: %pg (skipping faulty)\n", 2937 rdev->bdev); 2938 } 2939 if (md_super_wait(mddev) < 0) 2940 goto rewrite; 2941 /* if there was a failure, MD_SB_CHANGE_DEVS was set, and we re-write super */ 2942 2943 if (mddev_is_clustered(mddev) && ret == 0) 2944 mddev->cluster_ops->metadata_update_finish(mddev); 2945 2946 if (mddev->in_sync != sync_req || 2947 !bit_clear_unless(&mddev->sb_flags, BIT(MD_SB_CHANGE_PENDING), 2948 BIT(MD_SB_CHANGE_DEVS) | BIT(MD_SB_CHANGE_CLEAN))) 2949 /* have to write it out again */ 2950 goto repeat; 2951 wake_up(&mddev->sb_wait); 2952 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 2953 sysfs_notify_dirent_safe(mddev->sysfs_completed); 2954 2955 rdev_for_each(rdev, mddev) { 2956 if (test_and_clear_bit(FaultRecorded, &rdev->flags)) 2957 clear_bit(Blocked, &rdev->flags); 2958 2959 if (any_badblocks_changed) 2960 ack_all_badblocks(&rdev->badblocks); 2961 clear_bit(BlockedBadBlocks, &rdev->flags); 2962 wake_up(&rdev->blocked_wait); 2963 } 2964 } 2965 EXPORT_SYMBOL(md_update_sb); 2966 2967 static int add_bound_rdev(struct md_rdev *rdev) 2968 { 2969 struct mddev *mddev = rdev->mddev; 2970 int err = 0; 2971 bool add_journal = test_bit(Journal, &rdev->flags); 2972 2973 if (!mddev->pers->hot_remove_disk || add_journal) { 2974 /* If there is hot_add_disk but no hot_remove_disk 2975 * then added disks for geometry changes, 2976 * and should be added immediately. 2977 */ 2978 super_types[mddev->major_version]. 2979 validate_super(mddev, NULL/*freshest*/, rdev); 2980 err = mddev->pers->hot_add_disk(mddev, rdev); 2981 if (err) { 2982 md_kick_rdev_from_array(rdev); 2983 return err; 2984 } 2985 } 2986 sysfs_notify_dirent_safe(rdev->sysfs_state); 2987 2988 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 2989 if (mddev->degraded) 2990 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 2991 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 2992 md_new_event(); 2993 return 0; 2994 } 2995 2996 /* words written to sysfs files may, or may not, be \n terminated. 2997 * We want to accept with case. For this we use cmd_match. 2998 */ 2999 static int cmd_match(const char *cmd, const char *str) 3000 { 3001 /* See if cmd, written into a sysfs file, matches 3002 * str. They must either be the same, or cmd can 3003 * have a trailing newline 3004 */ 3005 while (*cmd && *str && *cmd == *str) { 3006 cmd++; 3007 str++; 3008 } 3009 if (*cmd == '\n') 3010 cmd++; 3011 if (*str || *cmd) 3012 return 0; 3013 return 1; 3014 } 3015 3016 struct rdev_sysfs_entry { 3017 struct attribute attr; 3018 ssize_t (*show)(struct md_rdev *, char *); 3019 ssize_t (*store)(struct md_rdev *, const char *, size_t); 3020 }; 3021 3022 static ssize_t 3023 state_show(struct md_rdev *rdev, char *page) 3024 { 3025 char *sep = ","; 3026 size_t len = 0; 3027 unsigned long flags = READ_ONCE(rdev->flags); 3028 3029 if (test_bit(Faulty, &flags) || 3030 (!test_bit(ExternalBbl, &flags) && 3031 rdev->badblocks.unacked_exist)) 3032 len += sprintf(page+len, "faulty%s", sep); 3033 if (test_bit(In_sync, &flags)) 3034 len += sprintf(page+len, "in_sync%s", sep); 3035 if (test_bit(Journal, &flags)) 3036 len += sprintf(page+len, "journal%s", sep); 3037 if (test_bit(WriteMostly, &flags)) 3038 len += sprintf(page+len, "write_mostly%s", sep); 3039 if (test_bit(Blocked, &flags) || 3040 (rdev->badblocks.unacked_exist 3041 && !test_bit(Faulty, &flags))) 3042 len += sprintf(page+len, "blocked%s", sep); 3043 if (!test_bit(Faulty, &flags) && 3044 !test_bit(Journal, &flags) && 3045 !test_bit(In_sync, &flags)) 3046 len += sprintf(page+len, "spare%s", sep); 3047 if (test_bit(WriteErrorSeen, &flags)) 3048 len += sprintf(page+len, "write_error%s", sep); 3049 if (test_bit(WantReplacement, &flags)) 3050 len += sprintf(page+len, "want_replacement%s", sep); 3051 if (test_bit(Replacement, &flags)) 3052 len += sprintf(page+len, "replacement%s", sep); 3053 if (test_bit(ExternalBbl, &flags)) 3054 len += sprintf(page+len, "external_bbl%s", sep); 3055 if (test_bit(FailFast, &flags)) 3056 len += sprintf(page+len, "failfast%s", sep); 3057 3058 if (len) 3059 len -= strlen(sep); 3060 3061 return len+sprintf(page+len, "\n"); 3062 } 3063 3064 static ssize_t 3065 state_store(struct md_rdev *rdev, const char *buf, size_t len) 3066 { 3067 /* can write 3068 * faulty - simulates an error 3069 * remove - disconnects the device 3070 * writemostly - sets write_mostly 3071 * -writemostly - clears write_mostly 3072 * blocked - sets the Blocked flags 3073 * -blocked - clears the Blocked and possibly simulates an error 3074 * insync - sets Insync providing device isn't active 3075 * -insync - clear Insync for a device with a slot assigned, 3076 * so that it gets rebuilt based on bitmap 3077 * write_error - sets WriteErrorSeen 3078 * -write_error - clears WriteErrorSeen 3079 * {,-}failfast - set/clear FailFast 3080 */ 3081 3082 struct mddev *mddev = rdev->mddev; 3083 int err = -EINVAL; 3084 bool need_update_sb = false; 3085 3086 if (cmd_match(buf, "faulty") && rdev->mddev->pers) { 3087 md_error(rdev->mddev, rdev); 3088 3089 if (test_bit(MD_BROKEN, &rdev->mddev->flags)) 3090 err = -EBUSY; 3091 else 3092 err = 0; 3093 } else if (cmd_match(buf, "remove")) { 3094 if (rdev->mddev->pers) { 3095 clear_bit(Blocked, &rdev->flags); 3096 remove_and_add_spares(rdev->mddev, rdev); 3097 } 3098 if (rdev->raid_disk >= 0) 3099 err = -EBUSY; 3100 else { 3101 err = 0; 3102 if (mddev_is_clustered(mddev)) 3103 err = mddev->cluster_ops->remove_disk(mddev, rdev); 3104 3105 if (err == 0) { 3106 md_kick_rdev_from_array(rdev); 3107 if (mddev->pers) 3108 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 3109 md_new_event(); 3110 } 3111 } 3112 } else if (cmd_match(buf, "writemostly")) { 3113 set_bit(WriteMostly, &rdev->flags); 3114 mddev_create_serial_pool(rdev->mddev, rdev); 3115 need_update_sb = true; 3116 err = 0; 3117 } else if (cmd_match(buf, "-writemostly")) { 3118 mddev_destroy_serial_pool(rdev->mddev, rdev); 3119 clear_bit(WriteMostly, &rdev->flags); 3120 need_update_sb = true; 3121 err = 0; 3122 } else if (cmd_match(buf, "blocked")) { 3123 set_bit(Blocked, &rdev->flags); 3124 err = 0; 3125 } else if (cmd_match(buf, "-blocked")) { 3126 if (!test_bit(Faulty, &rdev->flags) && 3127 !test_bit(ExternalBbl, &rdev->flags) && 3128 rdev->badblocks.unacked_exist) { 3129 /* metadata handler doesn't understand badblocks, 3130 * so we need to fail the device 3131 */ 3132 md_error(rdev->mddev, rdev); 3133 } 3134 clear_bit(Blocked, &rdev->flags); 3135 clear_bit(BlockedBadBlocks, &rdev->flags); 3136 wake_up(&rdev->blocked_wait); 3137 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); 3138 3139 err = 0; 3140 } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) { 3141 set_bit(In_sync, &rdev->flags); 3142 err = 0; 3143 } else if (cmd_match(buf, "failfast")) { 3144 set_bit(FailFast, &rdev->flags); 3145 need_update_sb = true; 3146 err = 0; 3147 } else if (cmd_match(buf, "-failfast")) { 3148 clear_bit(FailFast, &rdev->flags); 3149 need_update_sb = true; 3150 err = 0; 3151 } else if (cmd_match(buf, "-insync") && rdev->raid_disk >= 0 && 3152 !test_bit(Journal, &rdev->flags)) { 3153 if (rdev->mddev->pers == NULL) { 3154 clear_bit(In_sync, &rdev->flags); 3155 rdev->saved_raid_disk = rdev->raid_disk; 3156 rdev->raid_disk = -1; 3157 err = 0; 3158 } 3159 } else if (cmd_match(buf, "write_error")) { 3160 set_bit(WriteErrorSeen, &rdev->flags); 3161 err = 0; 3162 } else if (cmd_match(buf, "-write_error")) { 3163 clear_bit(WriteErrorSeen, &rdev->flags); 3164 err = 0; 3165 } else if (cmd_match(buf, "want_replacement")) { 3166 /* Any non-spare device that is not a replacement can 3167 * become want_replacement at any time, but we then need to 3168 * check if recovery is needed. 3169 */ 3170 if (rdev->raid_disk >= 0 && 3171 !test_bit(Journal, &rdev->flags) && 3172 !test_bit(Replacement, &rdev->flags)) 3173 set_bit(WantReplacement, &rdev->flags); 3174 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); 3175 err = 0; 3176 } else if (cmd_match(buf, "-want_replacement")) { 3177 /* Clearing 'want_replacement' is always allowed. 3178 * Once replacements starts it is too late though. 3179 */ 3180 err = 0; 3181 clear_bit(WantReplacement, &rdev->flags); 3182 } else if (cmd_match(buf, "replacement")) { 3183 /* Can only set a device as a replacement when array has not 3184 * yet been started. Once running, replacement is automatic 3185 * from spares, or by assigning 'slot'. 3186 */ 3187 if (rdev->mddev->pers) 3188 err = -EBUSY; 3189 else { 3190 set_bit(Replacement, &rdev->flags); 3191 err = 0; 3192 } 3193 } else if (cmd_match(buf, "-replacement")) { 3194 /* Similarly, can only clear Replacement before start */ 3195 if (rdev->mddev->pers) 3196 err = -EBUSY; 3197 else { 3198 clear_bit(Replacement, &rdev->flags); 3199 err = 0; 3200 } 3201 } else if (cmd_match(buf, "re-add")) { 3202 if (!rdev->mddev->pers) 3203 err = -EINVAL; 3204 else if (test_bit(Faulty, &rdev->flags) && (rdev->raid_disk == -1) && 3205 rdev->saved_raid_disk >= 0) { 3206 /* clear_bit is performed _after_ all the devices 3207 * have their local Faulty bit cleared. If any writes 3208 * happen in the meantime in the local node, they 3209 * will land in the local bitmap, which will be synced 3210 * by this node eventually 3211 */ 3212 if (!mddev_is_clustered(rdev->mddev) || 3213 (err = mddev->cluster_ops->gather_bitmaps(rdev)) == 0) { 3214 clear_bit(Faulty, &rdev->flags); 3215 err = add_bound_rdev(rdev); 3216 } 3217 } else 3218 err = -EBUSY; 3219 } else if (cmd_match(buf, "external_bbl") && (rdev->mddev->external)) { 3220 set_bit(ExternalBbl, &rdev->flags); 3221 rdev->badblocks.shift = 0; 3222 err = 0; 3223 } else if (cmd_match(buf, "-external_bbl") && (rdev->mddev->external)) { 3224 clear_bit(ExternalBbl, &rdev->flags); 3225 err = 0; 3226 } 3227 if (need_update_sb) 3228 md_update_sb(mddev, 1); 3229 if (!err) 3230 sysfs_notify_dirent_safe(rdev->sysfs_state); 3231 return err ? err : len; 3232 } 3233 static struct rdev_sysfs_entry rdev_state = 3234 __ATTR_PREALLOC(state, S_IRUGO|S_IWUSR, state_show, state_store); 3235 3236 static ssize_t 3237 errors_show(struct md_rdev *rdev, char *page) 3238 { 3239 return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors)); 3240 } 3241 3242 static ssize_t 3243 errors_store(struct md_rdev *rdev, const char *buf, size_t len) 3244 { 3245 unsigned int n; 3246 int rv; 3247 3248 rv = kstrtouint(buf, 10, &n); 3249 if (rv < 0) 3250 return rv; 3251 atomic_set(&rdev->corrected_errors, n); 3252 return len; 3253 } 3254 static struct rdev_sysfs_entry rdev_errors = 3255 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store); 3256 3257 static ssize_t 3258 slot_show(struct md_rdev *rdev, char *page) 3259 { 3260 if (test_bit(Journal, &rdev->flags)) 3261 return sprintf(page, "journal\n"); 3262 else if (rdev->raid_disk < 0) 3263 return sprintf(page, "none\n"); 3264 else 3265 return sprintf(page, "%d\n", rdev->raid_disk); 3266 } 3267 3268 static ssize_t 3269 slot_store(struct md_rdev *rdev, const char *buf, size_t len) 3270 { 3271 int slot; 3272 int err; 3273 3274 if (test_bit(Journal, &rdev->flags)) 3275 return -EBUSY; 3276 if (strncmp(buf, "none", 4)==0) 3277 slot = -1; 3278 else { 3279 err = kstrtouint(buf, 10, (unsigned int *)&slot); 3280 if (err < 0) 3281 return err; 3282 if (slot < 0) 3283 /* overflow */ 3284 return -ENOSPC; 3285 } 3286 if (rdev->mddev->pers && slot == -1) { 3287 /* Setting 'slot' on an active array requires also 3288 * updating the 'rd%d' link, and communicating 3289 * with the personality with ->hot_*_disk. 3290 * For now we only support removing 3291 * failed/spare devices. This normally happens automatically, 3292 * but not when the metadata is externally managed. 3293 */ 3294 if (rdev->raid_disk == -1) 3295 return -EEXIST; 3296 /* personality does all needed checks */ 3297 if (rdev->mddev->pers->hot_remove_disk == NULL) 3298 return -EINVAL; 3299 clear_bit(Blocked, &rdev->flags); 3300 remove_and_add_spares(rdev->mddev, rdev); 3301 if (rdev->raid_disk >= 0) 3302 return -EBUSY; 3303 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); 3304 } else if (rdev->mddev->pers) { 3305 /* Activating a spare .. or possibly reactivating 3306 * if we ever get bitmaps working here. 3307 */ 3308 int err; 3309 3310 if (rdev->raid_disk != -1) 3311 return -EBUSY; 3312 3313 if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery)) 3314 return -EBUSY; 3315 3316 if (rdev->mddev->pers->hot_add_disk == NULL) 3317 return -EINVAL; 3318 3319 if (slot >= rdev->mddev->raid_disks && 3320 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks) 3321 return -ENOSPC; 3322 3323 rdev->raid_disk = slot; 3324 if (test_bit(In_sync, &rdev->flags)) 3325 rdev->saved_raid_disk = slot; 3326 else 3327 rdev->saved_raid_disk = -1; 3328 clear_bit(In_sync, &rdev->flags); 3329 clear_bit(Bitmap_sync, &rdev->flags); 3330 err = rdev->mddev->pers->hot_add_disk(rdev->mddev, rdev); 3331 if (err) { 3332 rdev->raid_disk = -1; 3333 return err; 3334 } else 3335 sysfs_notify_dirent_safe(rdev->sysfs_state); 3336 /* failure here is OK */; 3337 sysfs_link_rdev(rdev->mddev, rdev); 3338 /* don't wakeup anyone, leave that to userspace. */ 3339 } else { 3340 if (slot >= rdev->mddev->raid_disks && 3341 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks) 3342 return -ENOSPC; 3343 rdev->raid_disk = slot; 3344 /* assume it is working */ 3345 clear_bit(Faulty, &rdev->flags); 3346 clear_bit(WriteMostly, &rdev->flags); 3347 set_bit(In_sync, &rdev->flags); 3348 sysfs_notify_dirent_safe(rdev->sysfs_state); 3349 } 3350 return len; 3351 } 3352 3353 static struct rdev_sysfs_entry rdev_slot = 3354 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store); 3355 3356 static ssize_t 3357 offset_show(struct md_rdev *rdev, char *page) 3358 { 3359 return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset); 3360 } 3361 3362 static ssize_t 3363 offset_store(struct md_rdev *rdev, const char *buf, size_t len) 3364 { 3365 unsigned long long offset; 3366 if (kstrtoull(buf, 10, &offset) < 0) 3367 return -EINVAL; 3368 if (rdev->mddev->pers && rdev->raid_disk >= 0) 3369 return -EBUSY; 3370 if (rdev->sectors && rdev->mddev->external) 3371 /* Must set offset before size, so overlap checks 3372 * can be sane */ 3373 return -EBUSY; 3374 rdev->data_offset = offset; 3375 rdev->new_data_offset = offset; 3376 return len; 3377 } 3378 3379 static struct rdev_sysfs_entry rdev_offset = 3380 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store); 3381 3382 static ssize_t new_offset_show(struct md_rdev *rdev, char *page) 3383 { 3384 return sprintf(page, "%llu\n", 3385 (unsigned long long)rdev->new_data_offset); 3386 } 3387 3388 static ssize_t new_offset_store(struct md_rdev *rdev, 3389 const char *buf, size_t len) 3390 { 3391 unsigned long long new_offset; 3392 struct mddev *mddev = rdev->mddev; 3393 3394 if (kstrtoull(buf, 10, &new_offset) < 0) 3395 return -EINVAL; 3396 3397 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 3398 return -EBUSY; 3399 if (new_offset == rdev->data_offset) 3400 /* reset is always permitted */ 3401 ; 3402 else if (new_offset > rdev->data_offset) { 3403 /* must not push array size beyond rdev_sectors */ 3404 if (new_offset - rdev->data_offset 3405 + mddev->dev_sectors > rdev->sectors) 3406 return -E2BIG; 3407 } 3408 /* Metadata worries about other space details. */ 3409 3410 /* decreasing the offset is inconsistent with a backwards 3411 * reshape. 3412 */ 3413 if (new_offset < rdev->data_offset && 3414 mddev->reshape_backwards) 3415 return -EINVAL; 3416 /* Increasing offset is inconsistent with forwards 3417 * reshape. reshape_direction should be set to 3418 * 'backwards' first. 3419 */ 3420 if (new_offset > rdev->data_offset && 3421 !mddev->reshape_backwards) 3422 return -EINVAL; 3423 3424 if (mddev->pers && mddev->persistent && 3425 !super_types[mddev->major_version] 3426 .allow_new_offset(rdev, new_offset)) 3427 return -E2BIG; 3428 rdev->new_data_offset = new_offset; 3429 if (new_offset > rdev->data_offset) 3430 mddev->reshape_backwards = 1; 3431 else if (new_offset < rdev->data_offset) 3432 mddev->reshape_backwards = 0; 3433 3434 return len; 3435 } 3436 static struct rdev_sysfs_entry rdev_new_offset = 3437 __ATTR(new_offset, S_IRUGO|S_IWUSR, new_offset_show, new_offset_store); 3438 3439 static ssize_t 3440 rdev_size_show(struct md_rdev *rdev, char *page) 3441 { 3442 return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2); 3443 } 3444 3445 static int md_rdevs_overlap(struct md_rdev *a, struct md_rdev *b) 3446 { 3447 /* check if two start/length pairs overlap */ 3448 if (a->data_offset + a->sectors <= b->data_offset) 3449 return false; 3450 if (b->data_offset + b->sectors <= a->data_offset) 3451 return false; 3452 return true; 3453 } 3454 3455 static bool md_rdev_overlaps(struct md_rdev *rdev) 3456 { 3457 struct mddev *mddev; 3458 struct md_rdev *rdev2; 3459 3460 spin_lock(&all_mddevs_lock); 3461 list_for_each_entry(mddev, &all_mddevs, all_mddevs) { 3462 if (test_bit(MD_DELETED, &mddev->flags)) 3463 continue; 3464 rdev_for_each(rdev2, mddev) { 3465 if (rdev != rdev2 && rdev->bdev == rdev2->bdev && 3466 md_rdevs_overlap(rdev, rdev2)) { 3467 spin_unlock(&all_mddevs_lock); 3468 return true; 3469 } 3470 } 3471 } 3472 spin_unlock(&all_mddevs_lock); 3473 return false; 3474 } 3475 3476 static int strict_blocks_to_sectors(const char *buf, sector_t *sectors) 3477 { 3478 unsigned long long blocks; 3479 sector_t new; 3480 3481 if (kstrtoull(buf, 10, &blocks) < 0) 3482 return -EINVAL; 3483 3484 if (blocks & 1ULL << (8 * sizeof(blocks) - 1)) 3485 return -EINVAL; /* sector conversion overflow */ 3486 3487 new = blocks * 2; 3488 if (new != blocks * 2) 3489 return -EINVAL; /* unsigned long long to sector_t overflow */ 3490 3491 *sectors = new; 3492 return 0; 3493 } 3494 3495 static ssize_t 3496 rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len) 3497 { 3498 struct mddev *my_mddev = rdev->mddev; 3499 sector_t oldsectors = rdev->sectors; 3500 sector_t sectors; 3501 3502 if (test_bit(Journal, &rdev->flags)) 3503 return -EBUSY; 3504 if (strict_blocks_to_sectors(buf, §ors) < 0) 3505 return -EINVAL; 3506 if (rdev->data_offset != rdev->new_data_offset) 3507 return -EINVAL; /* too confusing */ 3508 if (my_mddev->pers && rdev->raid_disk >= 0) { 3509 if (my_mddev->persistent) { 3510 sectors = super_types[my_mddev->major_version]. 3511 rdev_size_change(rdev, sectors); 3512 if (!sectors) 3513 return -EBUSY; 3514 } else if (!sectors) 3515 sectors = bdev_nr_sectors(rdev->bdev) - 3516 rdev->data_offset; 3517 if (!my_mddev->pers->resize) 3518 /* Cannot change size for RAID0 or Linear etc */ 3519 return -EINVAL; 3520 } 3521 if (sectors < my_mddev->dev_sectors) 3522 return -EINVAL; /* component must fit device */ 3523 3524 rdev->sectors = sectors; 3525 3526 /* 3527 * Check that all other rdevs with the same bdev do not overlap. This 3528 * check does not provide a hard guarantee, it just helps avoid 3529 * dangerous mistakes. 3530 */ 3531 if (sectors > oldsectors && my_mddev->external && 3532 md_rdev_overlaps(rdev)) { 3533 /* 3534 * Someone else could have slipped in a size change here, but 3535 * doing so is just silly. We put oldsectors back because we 3536 * know it is safe, and trust userspace not to race with itself. 3537 */ 3538 rdev->sectors = oldsectors; 3539 return -EBUSY; 3540 } 3541 return len; 3542 } 3543 3544 static struct rdev_sysfs_entry rdev_size = 3545 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store); 3546 3547 static ssize_t recovery_start_show(struct md_rdev *rdev, char *page) 3548 { 3549 unsigned long long recovery_start = rdev->recovery_offset; 3550 3551 if (test_bit(In_sync, &rdev->flags) || 3552 recovery_start == MaxSector) 3553 return sprintf(page, "none\n"); 3554 3555 return sprintf(page, "%llu\n", recovery_start); 3556 } 3557 3558 static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len) 3559 { 3560 unsigned long long recovery_start; 3561 3562 if (cmd_match(buf, "none")) 3563 recovery_start = MaxSector; 3564 else if (kstrtoull(buf, 10, &recovery_start)) 3565 return -EINVAL; 3566 3567 if (rdev->mddev->pers && 3568 rdev->raid_disk >= 0) 3569 return -EBUSY; 3570 3571 rdev->recovery_offset = recovery_start; 3572 if (recovery_start == MaxSector) 3573 set_bit(In_sync, &rdev->flags); 3574 else 3575 clear_bit(In_sync, &rdev->flags); 3576 return len; 3577 } 3578 3579 static struct rdev_sysfs_entry rdev_recovery_start = 3580 __ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store); 3581 3582 /* sysfs access to bad-blocks list. 3583 * We present two files. 3584 * 'bad-blocks' lists sector numbers and lengths of ranges that 3585 * are recorded as bad. The list is truncated to fit within 3586 * the one-page limit of sysfs. 3587 * Writing "sector length" to this file adds an acknowledged 3588 * bad block list. 3589 * 'unacknowledged-bad-blocks' lists bad blocks that have not yet 3590 * been acknowledged. Writing to this file adds bad blocks 3591 * without acknowledging them. This is largely for testing. 3592 */ 3593 static ssize_t bb_show(struct md_rdev *rdev, char *page) 3594 { 3595 return badblocks_show(&rdev->badblocks, page, 0); 3596 } 3597 static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len) 3598 { 3599 int rv = badblocks_store(&rdev->badblocks, page, len, 0); 3600 /* Maybe that ack was all we needed */ 3601 if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags)) 3602 wake_up(&rdev->blocked_wait); 3603 return rv; 3604 } 3605 static struct rdev_sysfs_entry rdev_bad_blocks = 3606 __ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store); 3607 3608 static ssize_t ubb_show(struct md_rdev *rdev, char *page) 3609 { 3610 return badblocks_show(&rdev->badblocks, page, 1); 3611 } 3612 static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len) 3613 { 3614 return badblocks_store(&rdev->badblocks, page, len, 1); 3615 } 3616 static struct rdev_sysfs_entry rdev_unack_bad_blocks = 3617 __ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store); 3618 3619 static ssize_t 3620 ppl_sector_show(struct md_rdev *rdev, char *page) 3621 { 3622 return sprintf(page, "%llu\n", (unsigned long long)rdev->ppl.sector); 3623 } 3624 3625 static ssize_t 3626 ppl_sector_store(struct md_rdev *rdev, const char *buf, size_t len) 3627 { 3628 unsigned long long sector; 3629 3630 if (kstrtoull(buf, 10, §or) < 0) 3631 return -EINVAL; 3632 if (sector != (sector_t)sector) 3633 return -EINVAL; 3634 3635 if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) && 3636 rdev->raid_disk >= 0) 3637 return -EBUSY; 3638 3639 if (rdev->mddev->persistent) { 3640 if (rdev->mddev->major_version == 0) 3641 return -EINVAL; 3642 if ((sector > rdev->sb_start && 3643 sector - rdev->sb_start > S16_MAX) || 3644 (sector < rdev->sb_start && 3645 rdev->sb_start - sector > -S16_MIN)) 3646 return -EINVAL; 3647 rdev->ppl.offset = sector - rdev->sb_start; 3648 } else if (!rdev->mddev->external) { 3649 return -EBUSY; 3650 } 3651 rdev->ppl.sector = sector; 3652 return len; 3653 } 3654 3655 static struct rdev_sysfs_entry rdev_ppl_sector = 3656 __ATTR(ppl_sector, S_IRUGO|S_IWUSR, ppl_sector_show, ppl_sector_store); 3657 3658 static ssize_t 3659 ppl_size_show(struct md_rdev *rdev, char *page) 3660 { 3661 return sprintf(page, "%u\n", rdev->ppl.size); 3662 } 3663 3664 static ssize_t 3665 ppl_size_store(struct md_rdev *rdev, const char *buf, size_t len) 3666 { 3667 unsigned int size; 3668 3669 if (kstrtouint(buf, 10, &size) < 0) 3670 return -EINVAL; 3671 3672 if (rdev->mddev->pers && test_bit(MD_HAS_PPL, &rdev->mddev->flags) && 3673 rdev->raid_disk >= 0) 3674 return -EBUSY; 3675 3676 if (rdev->mddev->persistent) { 3677 if (rdev->mddev->major_version == 0) 3678 return -EINVAL; 3679 if (size > U16_MAX) 3680 return -EINVAL; 3681 } else if (!rdev->mddev->external) { 3682 return -EBUSY; 3683 } 3684 rdev->ppl.size = size; 3685 return len; 3686 } 3687 3688 static struct rdev_sysfs_entry rdev_ppl_size = 3689 __ATTR(ppl_size, S_IRUGO|S_IWUSR, ppl_size_show, ppl_size_store); 3690 3691 static struct attribute *rdev_default_attrs[] = { 3692 &rdev_state.attr, 3693 &rdev_errors.attr, 3694 &rdev_slot.attr, 3695 &rdev_offset.attr, 3696 &rdev_new_offset.attr, 3697 &rdev_size.attr, 3698 &rdev_recovery_start.attr, 3699 &rdev_bad_blocks.attr, 3700 &rdev_unack_bad_blocks.attr, 3701 &rdev_ppl_sector.attr, 3702 &rdev_ppl_size.attr, 3703 NULL, 3704 }; 3705 ATTRIBUTE_GROUPS(rdev_default); 3706 static ssize_t 3707 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 3708 { 3709 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); 3710 struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj); 3711 3712 if (!entry->show) 3713 return -EIO; 3714 if (!rdev->mddev) 3715 return -ENODEV; 3716 return entry->show(rdev, page); 3717 } 3718 3719 static ssize_t 3720 rdev_attr_store(struct kobject *kobj, struct attribute *attr, 3721 const char *page, size_t length) 3722 { 3723 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); 3724 struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj); 3725 struct kernfs_node *kn = NULL; 3726 bool suspend = false; 3727 ssize_t rv; 3728 struct mddev *mddev = READ_ONCE(rdev->mddev); 3729 3730 if (!entry->store) 3731 return -EIO; 3732 if (!capable(CAP_SYS_ADMIN)) 3733 return -EACCES; 3734 if (!mddev) 3735 return -ENODEV; 3736 3737 if (entry->store == state_store) { 3738 if (cmd_match(page, "remove")) 3739 kn = sysfs_break_active_protection(kobj, attr); 3740 if (cmd_match(page, "remove") || cmd_match(page, "re-add") || 3741 cmd_match(page, "writemostly") || 3742 cmd_match(page, "-writemostly")) 3743 suspend = true; 3744 } 3745 3746 rv = suspend ? mddev_suspend_and_lock(mddev) : mddev_lock(mddev); 3747 if (!rv) { 3748 if (rdev->mddev == NULL) 3749 rv = -ENODEV; 3750 else 3751 rv = entry->store(rdev, page, length); 3752 suspend ? mddev_unlock_and_resume(mddev) : mddev_unlock(mddev); 3753 } 3754 3755 if (kn) 3756 sysfs_unbreak_active_protection(kn); 3757 3758 return rv; 3759 } 3760 3761 static void rdev_free(struct kobject *ko) 3762 { 3763 struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj); 3764 kfree(rdev); 3765 } 3766 static const struct sysfs_ops rdev_sysfs_ops = { 3767 .show = rdev_attr_show, 3768 .store = rdev_attr_store, 3769 }; 3770 static const struct kobj_type rdev_ktype = { 3771 .release = rdev_free, 3772 .sysfs_ops = &rdev_sysfs_ops, 3773 .default_groups = rdev_default_groups, 3774 }; 3775 3776 int md_rdev_init(struct md_rdev *rdev) 3777 { 3778 rdev->desc_nr = -1; 3779 rdev->saved_raid_disk = -1; 3780 rdev->raid_disk = -1; 3781 rdev->flags = 0; 3782 rdev->data_offset = 0; 3783 rdev->new_data_offset = 0; 3784 rdev->sb_events = 0; 3785 rdev->last_read_error = 0; 3786 rdev->sb_loaded = 0; 3787 rdev->bb_page = NULL; 3788 atomic_set(&rdev->nr_pending, 0); 3789 atomic_set(&rdev->read_errors, 0); 3790 atomic_set(&rdev->corrected_errors, 0); 3791 3792 INIT_LIST_HEAD(&rdev->same_set); 3793 init_waitqueue_head(&rdev->blocked_wait); 3794 3795 /* Add space to store bad block list. 3796 * This reserves the space even on arrays where it cannot 3797 * be used - I wonder if that matters 3798 */ 3799 return badblocks_init(&rdev->badblocks, 0); 3800 } 3801 EXPORT_SYMBOL_GPL(md_rdev_init); 3802 3803 /* 3804 * Import a device. If 'super_format' >= 0, then sanity check the superblock 3805 * 3806 * mark the device faulty if: 3807 * 3808 * - the device is nonexistent (zero size) 3809 * - the device has no valid superblock 3810 * 3811 * a faulty rdev _never_ has rdev->sb set. 3812 */ 3813 static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor) 3814 { 3815 struct md_rdev *rdev; 3816 sector_t size; 3817 int err; 3818 3819 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); 3820 if (!rdev) 3821 return ERR_PTR(-ENOMEM); 3822 3823 err = md_rdev_init(rdev); 3824 if (err) 3825 goto out_free_rdev; 3826 err = alloc_disk_sb(rdev); 3827 if (err) 3828 goto out_clear_rdev; 3829 3830 rdev->bdev_file = bdev_file_open_by_dev(newdev, 3831 BLK_OPEN_READ | BLK_OPEN_WRITE, 3832 super_format == -2 ? &claim_rdev : rdev, NULL); 3833 if (IS_ERR(rdev->bdev_file)) { 3834 pr_warn("md: could not open device unknown-block(%u,%u).\n", 3835 MAJOR(newdev), MINOR(newdev)); 3836 err = PTR_ERR(rdev->bdev_file); 3837 goto out_clear_rdev; 3838 } 3839 rdev->bdev = file_bdev(rdev->bdev_file); 3840 3841 kobject_init(&rdev->kobj, &rdev_ktype); 3842 3843 size = bdev_nr_bytes(rdev->bdev) >> BLOCK_SIZE_BITS; 3844 if (!size) { 3845 pr_warn("md: %pg has zero or unknown size, marking faulty!\n", 3846 rdev->bdev); 3847 err = -EINVAL; 3848 goto out_blkdev_put; 3849 } 3850 3851 if (super_format >= 0) { 3852 err = super_types[super_format]. 3853 load_super(rdev, NULL, super_minor); 3854 if (err == -EINVAL) { 3855 pr_warn("md: %pg does not have a valid v%d.%d superblock, not importing!\n", 3856 rdev->bdev, 3857 super_format, super_minor); 3858 goto out_blkdev_put; 3859 } 3860 if (err < 0) { 3861 pr_warn("md: could not read %pg's sb, not importing!\n", 3862 rdev->bdev); 3863 goto out_blkdev_put; 3864 } 3865 } 3866 3867 return rdev; 3868 3869 out_blkdev_put: 3870 fput(rdev->bdev_file); 3871 out_clear_rdev: 3872 md_rdev_clear(rdev); 3873 out_free_rdev: 3874 kfree(rdev); 3875 return ERR_PTR(err); 3876 } 3877 3878 /* 3879 * Check a full RAID array for plausibility 3880 */ 3881 3882 static int analyze_sbs(struct mddev *mddev) 3883 { 3884 int i; 3885 struct md_rdev *rdev, *freshest, *tmp; 3886 3887 freshest = NULL; 3888 rdev_for_each_safe(rdev, tmp, mddev) 3889 switch (super_types[mddev->major_version]. 3890 load_super(rdev, freshest, mddev->minor_version)) { 3891 case 1: 3892 freshest = rdev; 3893 break; 3894 case 0: 3895 break; 3896 default: 3897 pr_warn("md: fatal superblock inconsistency in %pg -- removing from array\n", 3898 rdev->bdev); 3899 md_kick_rdev_from_array(rdev); 3900 } 3901 3902 /* Cannot find a valid fresh disk */ 3903 if (!freshest) { 3904 pr_warn("md: cannot find a valid disk\n"); 3905 return -EINVAL; 3906 } 3907 3908 super_types[mddev->major_version]. 3909 validate_super(mddev, NULL/*freshest*/, freshest); 3910 3911 i = 0; 3912 rdev_for_each_safe(rdev, tmp, mddev) { 3913 if (mddev->max_disks && 3914 (rdev->desc_nr >= mddev->max_disks || 3915 i > mddev->max_disks)) { 3916 pr_warn("md: %s: %pg: only %d devices permitted\n", 3917 mdname(mddev), rdev->bdev, 3918 mddev->max_disks); 3919 md_kick_rdev_from_array(rdev); 3920 continue; 3921 } 3922 if (rdev != freshest) { 3923 if (super_types[mddev->major_version]. 3924 validate_super(mddev, freshest, rdev)) { 3925 pr_warn("md: kicking non-fresh %pg from array!\n", 3926 rdev->bdev); 3927 md_kick_rdev_from_array(rdev); 3928 continue; 3929 } 3930 } 3931 if (rdev->raid_disk >= (mddev->raid_disks - min(0, mddev->delta_disks)) && 3932 !test_bit(Journal, &rdev->flags)) { 3933 rdev->raid_disk = -1; 3934 clear_bit(In_sync, &rdev->flags); 3935 } 3936 } 3937 3938 return 0; 3939 } 3940 3941 /* Read a fixed-point number. 3942 * Numbers in sysfs attributes should be in "standard" units where 3943 * possible, so time should be in seconds. 3944 * However we internally use a a much smaller unit such as 3945 * milliseconds or jiffies. 3946 * This function takes a decimal number with a possible fractional 3947 * component, and produces an integer which is the result of 3948 * multiplying that number by 10^'scale'. 3949 * all without any floating-point arithmetic. 3950 */ 3951 int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale) 3952 { 3953 unsigned long result = 0; 3954 long decimals = -1; 3955 while (isdigit(*cp) || (*cp == '.' && decimals < 0)) { 3956 if (*cp == '.') 3957 decimals = 0; 3958 else if (decimals < scale) { 3959 unsigned int value; 3960 value = *cp - '0'; 3961 result = result * 10 + value; 3962 if (decimals >= 0) 3963 decimals++; 3964 } 3965 cp++; 3966 } 3967 if (*cp == '\n') 3968 cp++; 3969 if (*cp) 3970 return -EINVAL; 3971 if (decimals < 0) 3972 decimals = 0; 3973 *res = result * int_pow(10, scale - decimals); 3974 return 0; 3975 } 3976 3977 static ssize_t 3978 safe_delay_show(struct mddev *mddev, char *page) 3979 { 3980 unsigned int msec = ((unsigned long)mddev->safemode_delay*1000)/HZ; 3981 3982 return sprintf(page, "%u.%03u\n", msec/1000, msec%1000); 3983 } 3984 static ssize_t 3985 safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len) 3986 { 3987 unsigned long msec; 3988 3989 if (mddev_is_clustered(mddev)) { 3990 pr_warn("md: Safemode is disabled for clustered mode\n"); 3991 return -EINVAL; 3992 } 3993 3994 if (strict_strtoul_scaled(cbuf, &msec, 3) < 0 || msec > UINT_MAX / HZ) 3995 return -EINVAL; 3996 if (msec == 0) 3997 mddev->safemode_delay = 0; 3998 else { 3999 unsigned long old_delay = mddev->safemode_delay; 4000 unsigned long new_delay = (msec*HZ)/1000; 4001 4002 if (new_delay == 0) 4003 new_delay = 1; 4004 mddev->safemode_delay = new_delay; 4005 if (new_delay < old_delay || old_delay == 0) 4006 mod_timer(&mddev->safemode_timer, jiffies+1); 4007 } 4008 return len; 4009 } 4010 static struct md_sysfs_entry md_safe_delay = 4011 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store); 4012 4013 static ssize_t 4014 level_show(struct mddev *mddev, char *page) 4015 { 4016 struct md_personality *p; 4017 int ret; 4018 spin_lock(&mddev->lock); 4019 p = mddev->pers; 4020 if (p) 4021 ret = sprintf(page, "%s\n", p->head.name); 4022 else if (mddev->clevel[0]) 4023 ret = sprintf(page, "%s\n", mddev->clevel); 4024 else if (mddev->level != LEVEL_NONE) 4025 ret = sprintf(page, "%d\n", mddev->level); 4026 else 4027 ret = 0; 4028 spin_unlock(&mddev->lock); 4029 return ret; 4030 } 4031 4032 static ssize_t 4033 level_store(struct mddev *mddev, const char *buf, size_t len) 4034 { 4035 char clevel[16]; 4036 ssize_t rv; 4037 size_t slen = len; 4038 struct md_personality *pers, *oldpers; 4039 long level; 4040 void *priv, *oldpriv; 4041 struct md_rdev *rdev; 4042 4043 if (slen == 0 || slen >= sizeof(clevel)) 4044 return -EINVAL; 4045 4046 rv = mddev_suspend_and_lock(mddev); 4047 if (rv) 4048 return rv; 4049 4050 if (mddev->pers == NULL) { 4051 memcpy(mddev->clevel, buf, slen); 4052 if (mddev->clevel[slen-1] == '\n') 4053 slen--; 4054 mddev->clevel[slen] = 0; 4055 mddev->level = LEVEL_NONE; 4056 rv = len; 4057 goto out_unlock; 4058 } 4059 rv = -EROFS; 4060 if (!md_is_rdwr(mddev)) 4061 goto out_unlock; 4062 4063 /* request to change the personality. Need to ensure: 4064 * - array is not engaged in resync/recovery/reshape 4065 * - old personality can be suspended 4066 * - new personality will access other array. 4067 */ 4068 4069 rv = -EBUSY; 4070 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 4071 mddev->reshape_position != MaxSector || 4072 mddev->sysfs_active) 4073 goto out_unlock; 4074 4075 rv = -EINVAL; 4076 if (!mddev->pers->quiesce) { 4077 pr_warn("md: %s: %s does not support online personality change\n", 4078 mdname(mddev), mddev->pers->head.name); 4079 goto out_unlock; 4080 } 4081 4082 /* Now find the new personality */ 4083 memcpy(clevel, buf, slen); 4084 if (clevel[slen-1] == '\n') 4085 slen--; 4086 clevel[slen] = 0; 4087 if (kstrtol(clevel, 10, &level)) 4088 level = LEVEL_NONE; 4089 4090 if (request_module("md-%s", clevel) != 0) 4091 request_module("md-level-%s", clevel); 4092 pers = get_pers(level, clevel); 4093 if (!pers) { 4094 rv = -EINVAL; 4095 goto out_unlock; 4096 } 4097 4098 if (pers == mddev->pers) { 4099 /* Nothing to do! */ 4100 put_pers(pers); 4101 rv = len; 4102 goto out_unlock; 4103 } 4104 if (!pers->takeover) { 4105 put_pers(pers); 4106 pr_warn("md: %s: %s does not support personality takeover\n", 4107 mdname(mddev), clevel); 4108 rv = -EINVAL; 4109 goto out_unlock; 4110 } 4111 4112 rdev_for_each(rdev, mddev) 4113 rdev->new_raid_disk = rdev->raid_disk; 4114 4115 /* ->takeover must set new_* and/or delta_disks 4116 * if it succeeds, and may set them when it fails. 4117 */ 4118 priv = pers->takeover(mddev); 4119 if (IS_ERR(priv)) { 4120 mddev->new_level = mddev->level; 4121 mddev->new_layout = mddev->layout; 4122 mddev->new_chunk_sectors = mddev->chunk_sectors; 4123 mddev->raid_disks -= mddev->delta_disks; 4124 mddev->delta_disks = 0; 4125 mddev->reshape_backwards = 0; 4126 put_pers(pers); 4127 pr_warn("md: %s: %s would not accept array\n", 4128 mdname(mddev), clevel); 4129 rv = PTR_ERR(priv); 4130 goto out_unlock; 4131 } 4132 4133 /* Looks like we have a winner */ 4134 mddev_detach(mddev); 4135 4136 spin_lock(&mddev->lock); 4137 oldpers = mddev->pers; 4138 oldpriv = mddev->private; 4139 mddev->pers = pers; 4140 mddev->private = priv; 4141 strscpy(mddev->clevel, pers->head.name, sizeof(mddev->clevel)); 4142 mddev->level = mddev->new_level; 4143 mddev->layout = mddev->new_layout; 4144 mddev->chunk_sectors = mddev->new_chunk_sectors; 4145 mddev->delta_disks = 0; 4146 mddev->reshape_backwards = 0; 4147 mddev->degraded = 0; 4148 spin_unlock(&mddev->lock); 4149 4150 if (oldpers->sync_request == NULL && 4151 mddev->external) { 4152 /* We are converting from a no-redundancy array 4153 * to a redundancy array and metadata is managed 4154 * externally so we need to be sure that writes 4155 * won't block due to a need to transition 4156 * clean->dirty 4157 * until external management is started. 4158 */ 4159 mddev->in_sync = 0; 4160 mddev->safemode_delay = 0; 4161 mddev->safemode = 0; 4162 } 4163 4164 oldpers->free(mddev, oldpriv); 4165 4166 if (oldpers->sync_request == NULL && 4167 pers->sync_request != NULL) { 4168 /* need to add the md_redundancy_group */ 4169 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group)) 4170 pr_warn("md: cannot register extra attributes for %s\n", 4171 mdname(mddev)); 4172 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action"); 4173 mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed"); 4174 mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded"); 4175 } 4176 if (oldpers->sync_request != NULL && 4177 pers->sync_request == NULL) { 4178 /* need to remove the md_redundancy_group */ 4179 if (mddev->to_remove == NULL) 4180 mddev->to_remove = &md_redundancy_group; 4181 } 4182 4183 put_pers(oldpers); 4184 4185 rdev_for_each(rdev, mddev) { 4186 if (rdev->raid_disk < 0) 4187 continue; 4188 if (rdev->new_raid_disk >= mddev->raid_disks) 4189 rdev->new_raid_disk = -1; 4190 if (rdev->new_raid_disk == rdev->raid_disk) 4191 continue; 4192 sysfs_unlink_rdev(mddev, rdev); 4193 } 4194 rdev_for_each(rdev, mddev) { 4195 if (rdev->raid_disk < 0) 4196 continue; 4197 if (rdev->new_raid_disk == rdev->raid_disk) 4198 continue; 4199 rdev->raid_disk = rdev->new_raid_disk; 4200 if (rdev->raid_disk < 0) 4201 clear_bit(In_sync, &rdev->flags); 4202 else { 4203 if (sysfs_link_rdev(mddev, rdev)) 4204 pr_warn("md: cannot register rd%d for %s after level change\n", 4205 rdev->raid_disk, mdname(mddev)); 4206 } 4207 } 4208 4209 if (pers->sync_request == NULL) { 4210 /* this is now an array without redundancy, so 4211 * it must always be in_sync 4212 */ 4213 mddev->in_sync = 1; 4214 timer_delete_sync(&mddev->safemode_timer); 4215 } 4216 pers->run(mddev); 4217 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 4218 if (!mddev->thread) 4219 md_update_sb(mddev, 1); 4220 sysfs_notify_dirent_safe(mddev->sysfs_level); 4221 md_new_event(); 4222 rv = len; 4223 out_unlock: 4224 mddev_unlock_and_resume(mddev); 4225 return rv; 4226 } 4227 4228 static struct md_sysfs_entry md_level = 4229 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store); 4230 4231 static ssize_t 4232 new_level_show(struct mddev *mddev, char *page) 4233 { 4234 return sprintf(page, "%d\n", mddev->new_level); 4235 } 4236 4237 static ssize_t 4238 new_level_store(struct mddev *mddev, const char *buf, size_t len) 4239 { 4240 unsigned int n; 4241 int err; 4242 4243 err = kstrtouint(buf, 10, &n); 4244 if (err < 0) 4245 return err; 4246 err = mddev_lock(mddev); 4247 if (err) 4248 return err; 4249 4250 mddev->new_level = n; 4251 md_update_sb(mddev, 1); 4252 4253 mddev_unlock(mddev); 4254 return len; 4255 } 4256 static struct md_sysfs_entry md_new_level = 4257 __ATTR(new_level, 0664, new_level_show, new_level_store); 4258 4259 static ssize_t 4260 bitmap_type_show(struct mddev *mddev, char *page) 4261 { 4262 struct md_submodule_head *head; 4263 unsigned long i; 4264 ssize_t len = 0; 4265 4266 if (mddev->bitmap_id == ID_BITMAP_NONE) 4267 len += sprintf(page + len, "[none] "); 4268 else 4269 len += sprintf(page + len, "none "); 4270 4271 xa_lock(&md_submodule); 4272 xa_for_each(&md_submodule, i, head) { 4273 if (head->type != MD_BITMAP) 4274 continue; 4275 4276 if (mddev->bitmap_id == head->id) 4277 len += sprintf(page + len, "[%s] ", head->name); 4278 else 4279 len += sprintf(page + len, "%s ", head->name); 4280 } 4281 xa_unlock(&md_submodule); 4282 4283 len += sprintf(page + len, "\n"); 4284 return len; 4285 } 4286 4287 static ssize_t 4288 bitmap_type_store(struct mddev *mddev, const char *buf, size_t len) 4289 { 4290 struct md_submodule_head *head; 4291 enum md_submodule_id id; 4292 unsigned long i; 4293 int err = 0; 4294 4295 xa_lock(&md_submodule); 4296 4297 if (mddev->bitmap_ops) { 4298 err = -EBUSY; 4299 goto out; 4300 } 4301 4302 if (cmd_match(buf, "none")) { 4303 mddev->bitmap_id = ID_BITMAP_NONE; 4304 goto out; 4305 } 4306 4307 xa_for_each(&md_submodule, i, head) { 4308 if (head->type == MD_BITMAP && cmd_match(buf, head->name)) { 4309 mddev->bitmap_id = head->id; 4310 goto out; 4311 } 4312 } 4313 4314 err = kstrtoint(buf, 10, &id); 4315 if (err) 4316 goto out; 4317 4318 if (id == ID_BITMAP_NONE) { 4319 mddev->bitmap_id = id; 4320 goto out; 4321 } 4322 4323 head = xa_load(&md_submodule, id); 4324 if (head && head->type == MD_BITMAP) { 4325 mddev->bitmap_id = id; 4326 goto out; 4327 } 4328 4329 err = -ENOENT; 4330 4331 out: 4332 xa_unlock(&md_submodule); 4333 return err ? err : len; 4334 } 4335 4336 static struct md_sysfs_entry md_bitmap_type = 4337 __ATTR(bitmap_type, 0664, bitmap_type_show, bitmap_type_store); 4338 4339 static ssize_t 4340 layout_show(struct mddev *mddev, char *page) 4341 { 4342 /* just a number, not meaningful for all levels */ 4343 if (mddev->reshape_position != MaxSector && 4344 mddev->layout != mddev->new_layout) 4345 return sprintf(page, "%d (%d)\n", 4346 mddev->new_layout, mddev->layout); 4347 return sprintf(page, "%d\n", mddev->layout); 4348 } 4349 4350 static ssize_t 4351 layout_store(struct mddev *mddev, const char *buf, size_t len) 4352 { 4353 unsigned int n; 4354 int err; 4355 4356 err = kstrtouint(buf, 10, &n); 4357 if (err < 0) 4358 return err; 4359 err = mddev_lock(mddev); 4360 if (err) 4361 return err; 4362 4363 if (mddev->pers) { 4364 if (mddev->pers->check_reshape == NULL) 4365 err = -EBUSY; 4366 else if (!md_is_rdwr(mddev)) 4367 err = -EROFS; 4368 else { 4369 mddev->new_layout = n; 4370 err = mddev->pers->check_reshape(mddev); 4371 if (err) 4372 mddev->new_layout = mddev->layout; 4373 } 4374 } else { 4375 mddev->new_layout = n; 4376 if (mddev->reshape_position == MaxSector) 4377 mddev->layout = n; 4378 } 4379 mddev_unlock(mddev); 4380 return err ?: len; 4381 } 4382 static struct md_sysfs_entry md_layout = 4383 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store); 4384 4385 static ssize_t 4386 raid_disks_show(struct mddev *mddev, char *page) 4387 { 4388 if (mddev->raid_disks == 0) 4389 return 0; 4390 if (mddev->reshape_position != MaxSector && 4391 mddev->delta_disks != 0) 4392 return sprintf(page, "%d (%d)\n", mddev->raid_disks, 4393 mddev->raid_disks - mddev->delta_disks); 4394 return sprintf(page, "%d\n", mddev->raid_disks); 4395 } 4396 4397 static int update_raid_disks(struct mddev *mddev, int raid_disks); 4398 4399 static ssize_t 4400 raid_disks_store(struct mddev *mddev, const char *buf, size_t len) 4401 { 4402 unsigned int n; 4403 int err; 4404 4405 err = kstrtouint(buf, 10, &n); 4406 if (err < 0) 4407 return err; 4408 4409 err = mddev_lock(mddev); 4410 if (err) 4411 return err; 4412 if (mddev->pers) 4413 err = update_raid_disks(mddev, n); 4414 else if (mddev->reshape_position != MaxSector) { 4415 struct md_rdev *rdev; 4416 int olddisks = mddev->raid_disks - mddev->delta_disks; 4417 4418 err = -EINVAL; 4419 rdev_for_each(rdev, mddev) { 4420 if (olddisks < n && 4421 rdev->data_offset < rdev->new_data_offset) 4422 goto out_unlock; 4423 if (olddisks > n && 4424 rdev->data_offset > rdev->new_data_offset) 4425 goto out_unlock; 4426 } 4427 err = 0; 4428 mddev->delta_disks = n - olddisks; 4429 mddev->raid_disks = n; 4430 mddev->reshape_backwards = (mddev->delta_disks < 0); 4431 } else 4432 mddev->raid_disks = n; 4433 out_unlock: 4434 mddev_unlock(mddev); 4435 return err ? err : len; 4436 } 4437 static struct md_sysfs_entry md_raid_disks = 4438 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store); 4439 4440 static ssize_t 4441 uuid_show(struct mddev *mddev, char *page) 4442 { 4443 return sprintf(page, "%pU\n", mddev->uuid); 4444 } 4445 static struct md_sysfs_entry md_uuid = 4446 __ATTR(uuid, S_IRUGO, uuid_show, NULL); 4447 4448 static ssize_t 4449 chunk_size_show(struct mddev *mddev, char *page) 4450 { 4451 if (mddev->reshape_position != MaxSector && 4452 mddev->chunk_sectors != mddev->new_chunk_sectors) 4453 return sprintf(page, "%d (%d)\n", 4454 mddev->new_chunk_sectors << 9, 4455 mddev->chunk_sectors << 9); 4456 return sprintf(page, "%d\n", mddev->chunk_sectors << 9); 4457 } 4458 4459 static ssize_t 4460 chunk_size_store(struct mddev *mddev, const char *buf, size_t len) 4461 { 4462 unsigned long n; 4463 int err; 4464 4465 err = kstrtoul(buf, 10, &n); 4466 if (err < 0) 4467 return err; 4468 4469 err = mddev_lock(mddev); 4470 if (err) 4471 return err; 4472 if (mddev->pers) { 4473 if (mddev->pers->check_reshape == NULL) 4474 err = -EBUSY; 4475 else if (!md_is_rdwr(mddev)) 4476 err = -EROFS; 4477 else { 4478 mddev->new_chunk_sectors = n >> 9; 4479 err = mddev->pers->check_reshape(mddev); 4480 if (err) 4481 mddev->new_chunk_sectors = mddev->chunk_sectors; 4482 } 4483 } else { 4484 mddev->new_chunk_sectors = n >> 9; 4485 if (mddev->reshape_position == MaxSector) 4486 mddev->chunk_sectors = n >> 9; 4487 } 4488 mddev_unlock(mddev); 4489 return err ?: len; 4490 } 4491 static struct md_sysfs_entry md_chunk_size = 4492 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store); 4493 4494 static ssize_t 4495 resync_start_show(struct mddev *mddev, char *page) 4496 { 4497 if (mddev->resync_offset == MaxSector) 4498 return sprintf(page, "none\n"); 4499 return sprintf(page, "%llu\n", (unsigned long long)mddev->resync_offset); 4500 } 4501 4502 static ssize_t 4503 resync_start_store(struct mddev *mddev, const char *buf, size_t len) 4504 { 4505 unsigned long long n; 4506 int err; 4507 4508 if (cmd_match(buf, "none")) 4509 n = MaxSector; 4510 else { 4511 err = kstrtoull(buf, 10, &n); 4512 if (err < 0) 4513 return err; 4514 if (n != (sector_t)n) 4515 return -EINVAL; 4516 } 4517 4518 err = mddev_lock(mddev); 4519 if (err) 4520 return err; 4521 if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) 4522 err = -EBUSY; 4523 4524 if (!err) { 4525 mddev->resync_offset = n; 4526 if (mddev->pers) 4527 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); 4528 } 4529 mddev_unlock(mddev); 4530 return err ?: len; 4531 } 4532 static struct md_sysfs_entry md_resync_start = 4533 __ATTR_PREALLOC(resync_start, S_IRUGO|S_IWUSR, 4534 resync_start_show, resync_start_store); 4535 4536 /* 4537 * The array state can be: 4538 * 4539 * clear 4540 * No devices, no size, no level 4541 * Equivalent to STOP_ARRAY ioctl 4542 * inactive 4543 * May have some settings, but array is not active 4544 * all IO results in error 4545 * When written, doesn't tear down array, but just stops it 4546 * suspended (not supported yet) 4547 * All IO requests will block. The array can be reconfigured. 4548 * Writing this, if accepted, will block until array is quiescent 4549 * readonly 4550 * no resync can happen. no superblocks get written. 4551 * write requests fail 4552 * read-auto 4553 * like readonly, but behaves like 'clean' on a write request. 4554 * 4555 * clean - no pending writes, but otherwise active. 4556 * When written to inactive array, starts without resync 4557 * If a write request arrives then 4558 * if metadata is known, mark 'dirty' and switch to 'active'. 4559 * if not known, block and switch to write-pending 4560 * If written to an active array that has pending writes, then fails. 4561 * active 4562 * fully active: IO and resync can be happening. 4563 * When written to inactive array, starts with resync 4564 * 4565 * write-pending 4566 * clean, but writes are blocked waiting for 'active' to be written. 4567 * 4568 * active-idle 4569 * like active, but no writes have been seen for a while (100msec). 4570 * 4571 * broken 4572 * Array is failed. It's useful because mounted-arrays aren't stopped 4573 * when array is failed, so this state will at least alert the user that 4574 * something is wrong. 4575 */ 4576 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active, 4577 write_pending, active_idle, broken, bad_word}; 4578 static char *array_states[] = { 4579 "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active", 4580 "write-pending", "active-idle", "broken", NULL }; 4581 4582 static int match_word(const char *word, char **list) 4583 { 4584 int n; 4585 for (n=0; list[n]; n++) 4586 if (cmd_match(word, list[n])) 4587 break; 4588 return n; 4589 } 4590 4591 static ssize_t 4592 array_state_show(struct mddev *mddev, char *page) 4593 { 4594 enum array_state st = inactive; 4595 4596 if (mddev->pers && !test_bit(MD_NOT_READY, &mddev->flags)) { 4597 switch(mddev->ro) { 4598 case MD_RDONLY: 4599 st = readonly; 4600 break; 4601 case MD_AUTO_READ: 4602 st = read_auto; 4603 break; 4604 case MD_RDWR: 4605 spin_lock(&mddev->lock); 4606 if (test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) 4607 st = write_pending; 4608 else if (mddev->in_sync) 4609 st = clean; 4610 else if (mddev->safemode) 4611 st = active_idle; 4612 else 4613 st = active; 4614 spin_unlock(&mddev->lock); 4615 } 4616 4617 if (test_bit(MD_BROKEN, &mddev->flags) && st == clean) 4618 st = broken; 4619 } else { 4620 if (list_empty(&mddev->disks) && 4621 mddev->raid_disks == 0 && 4622 mddev->dev_sectors == 0) 4623 st = clear; 4624 else 4625 st = inactive; 4626 } 4627 return sprintf(page, "%s\n", array_states[st]); 4628 } 4629 4630 static int do_md_stop(struct mddev *mddev, int ro); 4631 static int md_set_readonly(struct mddev *mddev); 4632 static int restart_array(struct mddev *mddev); 4633 4634 static ssize_t 4635 array_state_store(struct mddev *mddev, const char *buf, size_t len) 4636 { 4637 int err = 0; 4638 enum array_state st = match_word(buf, array_states); 4639 4640 /* No lock dependent actions */ 4641 switch (st) { 4642 case suspended: /* not supported yet */ 4643 case write_pending: /* cannot be set */ 4644 case active_idle: /* cannot be set */ 4645 case broken: /* cannot be set */ 4646 case bad_word: 4647 return -EINVAL; 4648 case clear: 4649 case readonly: 4650 case inactive: 4651 case read_auto: 4652 if (!mddev->pers || !md_is_rdwr(mddev)) 4653 break; 4654 /* write sysfs will not open mddev and opener should be 0 */ 4655 err = mddev_set_closing_and_sync_blockdev(mddev, 0); 4656 if (err) 4657 return err; 4658 break; 4659 default: 4660 break; 4661 } 4662 4663 if (mddev->pers && (st == active || st == clean) && 4664 mddev->ro != MD_RDONLY) { 4665 /* don't take reconfig_mutex when toggling between 4666 * clean and active 4667 */ 4668 spin_lock(&mddev->lock); 4669 if (st == active) { 4670 restart_array(mddev); 4671 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); 4672 md_wakeup_thread(mddev->thread); 4673 wake_up(&mddev->sb_wait); 4674 } else /* st == clean */ { 4675 restart_array(mddev); 4676 if (!set_in_sync(mddev)) 4677 err = -EBUSY; 4678 } 4679 if (!err) 4680 sysfs_notify_dirent_safe(mddev->sysfs_state); 4681 spin_unlock(&mddev->lock); 4682 return err ?: len; 4683 } 4684 err = mddev_lock(mddev); 4685 if (err) 4686 return err; 4687 4688 switch (st) { 4689 case inactive: 4690 /* stop an active array, return 0 otherwise */ 4691 if (mddev->pers) 4692 err = do_md_stop(mddev, 2); 4693 break; 4694 case clear: 4695 err = do_md_stop(mddev, 0); 4696 break; 4697 case readonly: 4698 if (mddev->pers) 4699 err = md_set_readonly(mddev); 4700 else { 4701 mddev->ro = MD_RDONLY; 4702 set_disk_ro(mddev->gendisk, 1); 4703 err = do_md_run(mddev); 4704 } 4705 break; 4706 case read_auto: 4707 if (mddev->pers) { 4708 if (md_is_rdwr(mddev)) 4709 err = md_set_readonly(mddev); 4710 else if (mddev->ro == MD_RDONLY) 4711 err = restart_array(mddev); 4712 if (err == 0) { 4713 mddev->ro = MD_AUTO_READ; 4714 set_disk_ro(mddev->gendisk, 0); 4715 } 4716 } else { 4717 mddev->ro = MD_AUTO_READ; 4718 err = do_md_run(mddev); 4719 } 4720 break; 4721 case clean: 4722 if (mddev->pers) { 4723 err = restart_array(mddev); 4724 if (err) 4725 break; 4726 spin_lock(&mddev->lock); 4727 if (!set_in_sync(mddev)) 4728 err = -EBUSY; 4729 spin_unlock(&mddev->lock); 4730 } else 4731 err = -EINVAL; 4732 break; 4733 case active: 4734 if (mddev->pers) { 4735 err = restart_array(mddev); 4736 if (err) 4737 break; 4738 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); 4739 wake_up(&mddev->sb_wait); 4740 err = 0; 4741 } else { 4742 mddev->ro = MD_RDWR; 4743 set_disk_ro(mddev->gendisk, 0); 4744 err = do_md_run(mddev); 4745 } 4746 break; 4747 default: 4748 err = -EINVAL; 4749 break; 4750 } 4751 4752 if (!err) { 4753 if (mddev->hold_active == UNTIL_IOCTL) 4754 mddev->hold_active = 0; 4755 sysfs_notify_dirent_safe(mddev->sysfs_state); 4756 } 4757 mddev_unlock(mddev); 4758 4759 if (st == readonly || st == read_auto || st == inactive || 4760 (err && st == clear)) 4761 clear_bit(MD_CLOSING, &mddev->flags); 4762 4763 return err ?: len; 4764 } 4765 static struct md_sysfs_entry md_array_state = 4766 __ATTR_PREALLOC(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store); 4767 4768 static ssize_t 4769 max_corrected_read_errors_show(struct mddev *mddev, char *page) { 4770 return sprintf(page, "%d\n", 4771 atomic_read(&mddev->max_corr_read_errors)); 4772 } 4773 4774 static ssize_t 4775 max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len) 4776 { 4777 unsigned int n; 4778 int rv; 4779 4780 rv = kstrtouint(buf, 10, &n); 4781 if (rv < 0) 4782 return rv; 4783 if (n > INT_MAX) 4784 return -EINVAL; 4785 atomic_set(&mddev->max_corr_read_errors, n); 4786 return len; 4787 } 4788 4789 static struct md_sysfs_entry max_corr_read_errors = 4790 __ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show, 4791 max_corrected_read_errors_store); 4792 4793 static ssize_t 4794 null_show(struct mddev *mddev, char *page) 4795 { 4796 return -EINVAL; 4797 } 4798 4799 static ssize_t 4800 new_dev_store(struct mddev *mddev, const char *buf, size_t len) 4801 { 4802 /* buf must be %d:%d\n? giving major and minor numbers */ 4803 /* The new device is added to the array. 4804 * If the array has a persistent superblock, we read the 4805 * superblock to initialise info and check validity. 4806 * Otherwise, only checking done is that in bind_rdev_to_array, 4807 * which mainly checks size. 4808 */ 4809 char *e; 4810 int major = simple_strtoul(buf, &e, 10); 4811 int minor; 4812 dev_t dev; 4813 struct md_rdev *rdev; 4814 int err; 4815 4816 if (!*buf || *e != ':' || !e[1] || e[1] == '\n') 4817 return -EINVAL; 4818 minor = simple_strtoul(e+1, &e, 10); 4819 if (*e && *e != '\n') 4820 return -EINVAL; 4821 dev = MKDEV(major, minor); 4822 if (major != MAJOR(dev) || 4823 minor != MINOR(dev)) 4824 return -EOVERFLOW; 4825 4826 err = mddev_suspend_and_lock(mddev); 4827 if (err) 4828 return err; 4829 if (mddev->persistent) { 4830 rdev = md_import_device(dev, mddev->major_version, 4831 mddev->minor_version); 4832 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) { 4833 struct md_rdev *rdev0 4834 = list_entry(mddev->disks.next, 4835 struct md_rdev, same_set); 4836 err = super_types[mddev->major_version] 4837 .load_super(rdev, rdev0, mddev->minor_version); 4838 if (err < 0) 4839 goto out; 4840 } 4841 } else if (mddev->external) 4842 rdev = md_import_device(dev, -2, -1); 4843 else 4844 rdev = md_import_device(dev, -1, -1); 4845 4846 if (IS_ERR(rdev)) { 4847 mddev_unlock_and_resume(mddev); 4848 return PTR_ERR(rdev); 4849 } 4850 err = bind_rdev_to_array(rdev, mddev); 4851 out: 4852 if (err) 4853 export_rdev(rdev, mddev); 4854 mddev_unlock_and_resume(mddev); 4855 if (!err) 4856 md_new_event(); 4857 return err ? err : len; 4858 } 4859 4860 static struct md_sysfs_entry md_new_device = 4861 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store); 4862 4863 static ssize_t 4864 bitmap_store(struct mddev *mddev, const char *buf, size_t len) 4865 { 4866 char *end; 4867 unsigned long chunk, end_chunk; 4868 int err; 4869 4870 if (!md_bitmap_enabled(mddev, false)) 4871 return len; 4872 4873 err = mddev_lock(mddev); 4874 if (err) 4875 return err; 4876 if (!mddev->bitmap) 4877 goto out; 4878 /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */ 4879 while (*buf) { 4880 chunk = end_chunk = simple_strtoul(buf, &end, 0); 4881 if (buf == end) 4882 break; 4883 4884 if (*end == '-') { /* range */ 4885 buf = end + 1; 4886 end_chunk = simple_strtoul(buf, &end, 0); 4887 if (buf == end) 4888 break; 4889 } 4890 4891 if (*end && !isspace(*end)) 4892 break; 4893 4894 mddev->bitmap_ops->dirty_bits(mddev, chunk, end_chunk); 4895 buf = skip_spaces(end); 4896 } 4897 mddev->bitmap_ops->unplug(mddev, true); /* flush the bits to disk */ 4898 out: 4899 mddev_unlock(mddev); 4900 return len; 4901 } 4902 4903 static struct md_sysfs_entry md_bitmap = 4904 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store); 4905 4906 static ssize_t 4907 size_show(struct mddev *mddev, char *page) 4908 { 4909 return sprintf(page, "%llu\n", 4910 (unsigned long long)mddev->dev_sectors / 2); 4911 } 4912 4913 static int update_size(struct mddev *mddev, sector_t num_sectors); 4914 4915 static ssize_t 4916 size_store(struct mddev *mddev, const char *buf, size_t len) 4917 { 4918 /* If array is inactive, we can reduce the component size, but 4919 * not increase it (except from 0). 4920 * If array is active, we can try an on-line resize 4921 */ 4922 sector_t sectors; 4923 int err = strict_blocks_to_sectors(buf, §ors); 4924 4925 if (err < 0) 4926 return err; 4927 err = mddev_lock(mddev); 4928 if (err) 4929 return err; 4930 if (mddev->pers) { 4931 err = update_size(mddev, sectors); 4932 if (err == 0) 4933 md_update_sb(mddev, 1); 4934 } else { 4935 if (mddev->dev_sectors == 0 || 4936 mddev->dev_sectors > sectors) 4937 mddev->dev_sectors = sectors; 4938 else 4939 err = -ENOSPC; 4940 } 4941 mddev_unlock(mddev); 4942 return err ? err : len; 4943 } 4944 4945 static struct md_sysfs_entry md_size = 4946 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store); 4947 4948 /* Metadata version. 4949 * This is one of 4950 * 'none' for arrays with no metadata (good luck...) 4951 * 'external' for arrays with externally managed metadata, 4952 * or N.M for internally known formats 4953 */ 4954 static ssize_t 4955 metadata_show(struct mddev *mddev, char *page) 4956 { 4957 if (mddev->persistent) 4958 return sprintf(page, "%d.%d\n", 4959 mddev->major_version, mddev->minor_version); 4960 else if (mddev->external) 4961 return sprintf(page, "external:%s\n", mddev->metadata_type); 4962 else 4963 return sprintf(page, "none\n"); 4964 } 4965 4966 static ssize_t 4967 metadata_store(struct mddev *mddev, const char *buf, size_t len) 4968 { 4969 int major, minor; 4970 char *e; 4971 int err; 4972 /* Changing the details of 'external' metadata is 4973 * always permitted. Otherwise there must be 4974 * no devices attached to the array. 4975 */ 4976 4977 err = mddev_lock(mddev); 4978 if (err) 4979 return err; 4980 err = -EBUSY; 4981 if (mddev->external && strncmp(buf, "external:", 9) == 0) 4982 ; 4983 else if (!list_empty(&mddev->disks)) 4984 goto out_unlock; 4985 4986 err = 0; 4987 if (cmd_match(buf, "none")) { 4988 mddev->persistent = 0; 4989 mddev->external = 0; 4990 mddev->major_version = 0; 4991 mddev->minor_version = 90; 4992 goto out_unlock; 4993 } 4994 if (strncmp(buf, "external:", 9) == 0) { 4995 size_t namelen = len-9; 4996 if (namelen >= sizeof(mddev->metadata_type)) 4997 namelen = sizeof(mddev->metadata_type)-1; 4998 memcpy(mddev->metadata_type, buf+9, namelen); 4999 mddev->metadata_type[namelen] = 0; 5000 if (namelen && mddev->metadata_type[namelen-1] == '\n') 5001 mddev->metadata_type[--namelen] = 0; 5002 mddev->persistent = 0; 5003 mddev->external = 1; 5004 mddev->major_version = 0; 5005 mddev->minor_version = 90; 5006 goto out_unlock; 5007 } 5008 major = simple_strtoul(buf, &e, 10); 5009 err = -EINVAL; 5010 if (e==buf || *e != '.') 5011 goto out_unlock; 5012 buf = e+1; 5013 minor = simple_strtoul(buf, &e, 10); 5014 if (e==buf || (*e && *e != '\n') ) 5015 goto out_unlock; 5016 err = -ENOENT; 5017 if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL) 5018 goto out_unlock; 5019 mddev->major_version = major; 5020 mddev->minor_version = minor; 5021 mddev->persistent = 1; 5022 mddev->external = 0; 5023 err = 0; 5024 out_unlock: 5025 mddev_unlock(mddev); 5026 return err ?: len; 5027 } 5028 5029 static struct md_sysfs_entry md_metadata = 5030 __ATTR_PREALLOC(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store); 5031 5032 static bool rdev_needs_recovery(struct md_rdev *rdev, sector_t sectors) 5033 { 5034 return rdev->raid_disk >= 0 && 5035 !test_bit(Journal, &rdev->flags) && 5036 !test_bit(Faulty, &rdev->flags) && 5037 !test_bit(In_sync, &rdev->flags) && 5038 rdev->recovery_offset < sectors; 5039 } 5040 5041 static enum sync_action md_get_active_sync_action(struct mddev *mddev) 5042 { 5043 struct md_rdev *rdev; 5044 bool is_recover = false; 5045 5046 if (mddev->resync_offset < MaxSector) 5047 return ACTION_RESYNC; 5048 5049 if (mddev->reshape_position != MaxSector) 5050 return ACTION_RESHAPE; 5051 5052 rcu_read_lock(); 5053 rdev_for_each_rcu(rdev, mddev) { 5054 if (rdev_needs_recovery(rdev, MaxSector)) { 5055 is_recover = true; 5056 break; 5057 } 5058 } 5059 rcu_read_unlock(); 5060 5061 return is_recover ? ACTION_RECOVER : ACTION_IDLE; 5062 } 5063 5064 enum sync_action md_sync_action(struct mddev *mddev) 5065 { 5066 unsigned long recovery = mddev->recovery; 5067 enum sync_action active_action; 5068 5069 /* 5070 * frozen has the highest priority, means running sync_thread will be 5071 * stopped immediately, and no new sync_thread can start. 5072 */ 5073 if (test_bit(MD_RECOVERY_FROZEN, &recovery)) 5074 return ACTION_FROZEN; 5075 5076 /* 5077 * read-only array can't register sync_thread, and it can only 5078 * add/remove spares. 5079 */ 5080 if (!md_is_rdwr(mddev)) 5081 return ACTION_IDLE; 5082 5083 /* 5084 * idle means no sync_thread is running, and no new sync_thread is 5085 * requested. 5086 */ 5087 if (!test_bit(MD_RECOVERY_RUNNING, &recovery) && 5088 !test_bit(MD_RECOVERY_NEEDED, &recovery)) 5089 return ACTION_IDLE; 5090 5091 /* 5092 * Check if any sync operation (resync/recover/reshape) is 5093 * currently active. This ensures that only one sync operation 5094 * can run at a time. Returns the type of active operation, or 5095 * ACTION_IDLE if none are active. 5096 */ 5097 active_action = md_get_active_sync_action(mddev); 5098 if (active_action != ACTION_IDLE) 5099 return active_action; 5100 5101 if (test_bit(MD_RECOVERY_RESHAPE, &recovery)) 5102 return ACTION_RESHAPE; 5103 5104 if (test_bit(MD_RECOVERY_RECOVER, &recovery)) 5105 return ACTION_RECOVER; 5106 5107 if (test_bit(MD_RECOVERY_SYNC, &recovery)) { 5108 /* 5109 * MD_RECOVERY_CHECK must be paired with 5110 * MD_RECOVERY_REQUESTED. 5111 */ 5112 if (test_bit(MD_RECOVERY_CHECK, &recovery)) 5113 return ACTION_CHECK; 5114 if (test_bit(MD_RECOVERY_REQUESTED, &recovery)) 5115 return ACTION_REPAIR; 5116 return ACTION_RESYNC; 5117 } 5118 5119 /* 5120 * MD_RECOVERY_NEEDED or MD_RECOVERY_RUNNING is set, however, no 5121 * sync_action is specified. 5122 */ 5123 return ACTION_IDLE; 5124 } 5125 5126 enum sync_action md_sync_action_by_name(const char *page) 5127 { 5128 enum sync_action action; 5129 5130 for (action = 0; action < NR_SYNC_ACTIONS; ++action) { 5131 if (cmd_match(page, action_name[action])) 5132 return action; 5133 } 5134 5135 return NR_SYNC_ACTIONS; 5136 } 5137 5138 const char *md_sync_action_name(enum sync_action action) 5139 { 5140 return action_name[action]; 5141 } 5142 5143 static ssize_t 5144 action_show(struct mddev *mddev, char *page) 5145 { 5146 enum sync_action action = md_sync_action(mddev); 5147 5148 return sprintf(page, "%s\n", md_sync_action_name(action)); 5149 } 5150 5151 /** 5152 * stop_sync_thread() - wait for sync_thread to stop if it's running. 5153 * @mddev: the array. 5154 * @locked: if set, reconfig_mutex will still be held after this function 5155 * return; if not set, reconfig_mutex will be released after this 5156 * function return. 5157 */ 5158 static void stop_sync_thread(struct mddev *mddev, bool locked) 5159 { 5160 int sync_seq = atomic_read(&mddev->sync_seq); 5161 5162 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { 5163 if (!locked) 5164 mddev_unlock(mddev); 5165 return; 5166 } 5167 5168 mddev_unlock(mddev); 5169 5170 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 5171 /* 5172 * Thread might be blocked waiting for metadata update which will now 5173 * never happen 5174 */ 5175 md_wakeup_thread_directly(&mddev->sync_thread); 5176 if (work_pending(&mddev->sync_work)) 5177 flush_work(&mddev->sync_work); 5178 5179 wait_event(resync_wait, 5180 !test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 5181 (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery) && 5182 sync_seq != atomic_read(&mddev->sync_seq))); 5183 5184 if (locked) 5185 mddev_lock_nointr(mddev); 5186 } 5187 5188 void md_idle_sync_thread(struct mddev *mddev) 5189 { 5190 lockdep_assert_held(&mddev->reconfig_mutex); 5191 5192 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5193 stop_sync_thread(mddev, true); 5194 } 5195 EXPORT_SYMBOL_GPL(md_idle_sync_thread); 5196 5197 void md_frozen_sync_thread(struct mddev *mddev) 5198 { 5199 lockdep_assert_held(&mddev->reconfig_mutex); 5200 5201 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5202 stop_sync_thread(mddev, true); 5203 } 5204 EXPORT_SYMBOL_GPL(md_frozen_sync_thread); 5205 5206 void md_unfrozen_sync_thread(struct mddev *mddev) 5207 { 5208 lockdep_assert_held(&mddev->reconfig_mutex); 5209 5210 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5211 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5212 md_wakeup_thread(mddev->thread); 5213 sysfs_notify_dirent_safe(mddev->sysfs_action); 5214 } 5215 EXPORT_SYMBOL_GPL(md_unfrozen_sync_thread); 5216 5217 static int mddev_start_reshape(struct mddev *mddev) 5218 { 5219 int ret; 5220 5221 if (mddev->pers->start_reshape == NULL) 5222 return -EINVAL; 5223 5224 if (mddev->reshape_position == MaxSector || 5225 mddev->pers->check_reshape == NULL || 5226 mddev->pers->check_reshape(mddev)) { 5227 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5228 ret = mddev->pers->start_reshape(mddev); 5229 if (ret) 5230 return ret; 5231 } else { 5232 /* 5233 * If reshape is still in progress, and md_check_recovery() can 5234 * continue to reshape, don't restart reshape because data can 5235 * be corrupted for raid456. 5236 */ 5237 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5238 } 5239 5240 sysfs_notify_dirent_safe(mddev->sysfs_degraded); 5241 return 0; 5242 } 5243 5244 static ssize_t 5245 action_store(struct mddev *mddev, const char *page, size_t len) 5246 { 5247 int ret; 5248 enum sync_action action; 5249 5250 if (!mddev->pers || !mddev->pers->sync_request) 5251 return -EINVAL; 5252 5253 retry: 5254 if (work_busy(&mddev->sync_work)) 5255 flush_work(&mddev->sync_work); 5256 5257 ret = mddev_lock(mddev); 5258 if (ret) 5259 return ret; 5260 5261 if (work_busy(&mddev->sync_work)) { 5262 mddev_unlock(mddev); 5263 goto retry; 5264 } 5265 5266 action = md_sync_action_by_name(page); 5267 5268 /* TODO: mdadm rely on "idle" to start sync_thread. */ 5269 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { 5270 switch (action) { 5271 case ACTION_FROZEN: 5272 md_frozen_sync_thread(mddev); 5273 ret = len; 5274 goto out; 5275 case ACTION_IDLE: 5276 md_idle_sync_thread(mddev); 5277 break; 5278 case ACTION_RESHAPE: 5279 case ACTION_RECOVER: 5280 case ACTION_CHECK: 5281 case ACTION_REPAIR: 5282 case ACTION_RESYNC: 5283 ret = -EBUSY; 5284 goto out; 5285 default: 5286 ret = -EINVAL; 5287 goto out; 5288 } 5289 } else { 5290 switch (action) { 5291 case ACTION_FROZEN: 5292 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5293 ret = len; 5294 goto out; 5295 case ACTION_RESHAPE: 5296 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5297 ret = mddev_start_reshape(mddev); 5298 if (ret) 5299 goto out; 5300 break; 5301 case ACTION_RECOVER: 5302 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5303 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 5304 break; 5305 case ACTION_CHECK: 5306 set_bit(MD_RECOVERY_CHECK, &mddev->recovery); 5307 fallthrough; 5308 case ACTION_REPAIR: 5309 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 5310 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 5311 fallthrough; 5312 case ACTION_RESYNC: 5313 case ACTION_IDLE: 5314 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5315 break; 5316 default: 5317 ret = -EINVAL; 5318 goto out; 5319 } 5320 } 5321 5322 if (mddev->ro == MD_AUTO_READ) { 5323 /* A write to sync_action is enough to justify 5324 * canceling read-auto mode 5325 */ 5326 mddev->ro = MD_RDWR; 5327 md_wakeup_thread(mddev->sync_thread); 5328 } 5329 5330 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5331 md_wakeup_thread(mddev->thread); 5332 sysfs_notify_dirent_safe(mddev->sysfs_action); 5333 ret = len; 5334 5335 out: 5336 mddev_unlock(mddev); 5337 return ret; 5338 } 5339 5340 static struct md_sysfs_entry md_scan_mode = 5341 __ATTR_PREALLOC(sync_action, S_IRUGO|S_IWUSR, action_show, action_store); 5342 5343 static ssize_t 5344 last_sync_action_show(struct mddev *mddev, char *page) 5345 { 5346 return sprintf(page, "%s\n", 5347 md_sync_action_name(mddev->last_sync_action)); 5348 } 5349 5350 static struct md_sysfs_entry md_last_scan_mode = __ATTR_RO(last_sync_action); 5351 5352 static ssize_t 5353 mismatch_cnt_show(struct mddev *mddev, char *page) 5354 { 5355 return sprintf(page, "%llu\n", 5356 (unsigned long long) 5357 atomic64_read(&mddev->resync_mismatches)); 5358 } 5359 5360 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt); 5361 5362 static ssize_t 5363 sync_min_show(struct mddev *mddev, char *page) 5364 { 5365 return sprintf(page, "%d (%s)\n", speed_min(mddev), 5366 mddev->sync_speed_min ? "local" : "system"); 5367 } 5368 5369 static ssize_t 5370 sync_min_store(struct mddev *mddev, const char *buf, size_t len) 5371 { 5372 unsigned int min; 5373 int rv; 5374 5375 if (strncmp(buf, "system", 6) == 0) { 5376 min = 0; 5377 } else { 5378 rv = kstrtouint(buf, 10, &min); 5379 if (rv < 0) 5380 return rv; 5381 if (min == 0) 5382 return -EINVAL; 5383 } 5384 mddev->sync_speed_min = min; 5385 return len; 5386 } 5387 5388 static struct md_sysfs_entry md_sync_min = 5389 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store); 5390 5391 static ssize_t 5392 sync_max_show(struct mddev *mddev, char *page) 5393 { 5394 return sprintf(page, "%d (%s)\n", speed_max(mddev), 5395 mddev->sync_speed_max ? "local" : "system"); 5396 } 5397 5398 static ssize_t 5399 sync_max_store(struct mddev *mddev, const char *buf, size_t len) 5400 { 5401 unsigned int max; 5402 int rv; 5403 5404 if (strncmp(buf, "system", 6) == 0) { 5405 max = 0; 5406 } else { 5407 rv = kstrtouint(buf, 10, &max); 5408 if (rv < 0) 5409 return rv; 5410 if (max == 0) 5411 return -EINVAL; 5412 } 5413 mddev->sync_speed_max = max; 5414 return len; 5415 } 5416 5417 static struct md_sysfs_entry md_sync_max = 5418 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store); 5419 5420 static ssize_t 5421 sync_io_depth_show(struct mddev *mddev, char *page) 5422 { 5423 return sprintf(page, "%d (%s)\n", sync_io_depth(mddev), 5424 mddev->sync_io_depth ? "local" : "system"); 5425 } 5426 5427 static ssize_t 5428 sync_io_depth_store(struct mddev *mddev, const char *buf, size_t len) 5429 { 5430 unsigned int max; 5431 int rv; 5432 5433 if (strncmp(buf, "system", 6) == 0) { 5434 max = 0; 5435 } else { 5436 rv = kstrtouint(buf, 10, &max); 5437 if (rv < 0) 5438 return rv; 5439 if (max == 0) 5440 return -EINVAL; 5441 } 5442 mddev->sync_io_depth = max; 5443 return len; 5444 } 5445 5446 static struct md_sysfs_entry md_sync_io_depth = 5447 __ATTR_RW(sync_io_depth); 5448 5449 static ssize_t 5450 degraded_show(struct mddev *mddev, char *page) 5451 { 5452 return sprintf(page, "%d\n", mddev->degraded); 5453 } 5454 static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded); 5455 5456 static ssize_t 5457 sync_force_parallel_show(struct mddev *mddev, char *page) 5458 { 5459 return sprintf(page, "%d\n", mddev->parallel_resync); 5460 } 5461 5462 static ssize_t 5463 sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len) 5464 { 5465 long n; 5466 5467 if (kstrtol(buf, 10, &n)) 5468 return -EINVAL; 5469 5470 if (n != 0 && n != 1) 5471 return -EINVAL; 5472 5473 mddev->parallel_resync = n; 5474 5475 if (mddev->sync_thread) 5476 wake_up(&resync_wait); 5477 5478 return len; 5479 } 5480 5481 /* force parallel resync, even with shared block devices */ 5482 static struct md_sysfs_entry md_sync_force_parallel = 5483 __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR, 5484 sync_force_parallel_show, sync_force_parallel_store); 5485 5486 static ssize_t 5487 sync_speed_show(struct mddev *mddev, char *page) 5488 { 5489 unsigned long resync, dt, db; 5490 if (mddev->curr_resync == MD_RESYNC_NONE) 5491 return sprintf(page, "none\n"); 5492 resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active); 5493 dt = (jiffies - mddev->resync_mark) / HZ; 5494 if (!dt) dt++; 5495 db = resync - mddev->resync_mark_cnt; 5496 return sprintf(page, "%lu\n", db/dt/2); /* K/sec */ 5497 } 5498 5499 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed); 5500 5501 static ssize_t 5502 sync_completed_show(struct mddev *mddev, char *page) 5503 { 5504 unsigned long long max_sectors, resync; 5505 5506 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 5507 return sprintf(page, "none\n"); 5508 5509 if (mddev->curr_resync == MD_RESYNC_YIELDED || 5510 mddev->curr_resync == MD_RESYNC_DELAYED) 5511 return sprintf(page, "delayed\n"); 5512 5513 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || 5514 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 5515 max_sectors = mddev->resync_max_sectors; 5516 else 5517 max_sectors = mddev->dev_sectors; 5518 5519 resync = mddev->curr_resync_completed; 5520 return sprintf(page, "%llu / %llu\n", resync, max_sectors); 5521 } 5522 5523 static struct md_sysfs_entry md_sync_completed = 5524 __ATTR_PREALLOC(sync_completed, S_IRUGO, sync_completed_show, NULL); 5525 5526 static ssize_t 5527 min_sync_show(struct mddev *mddev, char *page) 5528 { 5529 return sprintf(page, "%llu\n", 5530 (unsigned long long)mddev->resync_min); 5531 } 5532 static ssize_t 5533 min_sync_store(struct mddev *mddev, const char *buf, size_t len) 5534 { 5535 unsigned long long min; 5536 int err; 5537 5538 if (kstrtoull(buf, 10, &min)) 5539 return -EINVAL; 5540 5541 spin_lock(&mddev->lock); 5542 err = -EINVAL; 5543 if (min > mddev->resync_max) 5544 goto out_unlock; 5545 5546 err = -EBUSY; 5547 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 5548 goto out_unlock; 5549 5550 /* Round down to multiple of 4K for safety */ 5551 mddev->resync_min = round_down(min, 8); 5552 err = 0; 5553 5554 out_unlock: 5555 spin_unlock(&mddev->lock); 5556 return err ?: len; 5557 } 5558 5559 static struct md_sysfs_entry md_min_sync = 5560 __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store); 5561 5562 static ssize_t 5563 max_sync_show(struct mddev *mddev, char *page) 5564 { 5565 if (mddev->resync_max == MaxSector) 5566 return sprintf(page, "max\n"); 5567 else 5568 return sprintf(page, "%llu\n", 5569 (unsigned long long)mddev->resync_max); 5570 } 5571 static ssize_t 5572 max_sync_store(struct mddev *mddev, const char *buf, size_t len) 5573 { 5574 int err; 5575 spin_lock(&mddev->lock); 5576 if (strncmp(buf, "max", 3) == 0) 5577 mddev->resync_max = MaxSector; 5578 else { 5579 unsigned long long max; 5580 int chunk; 5581 5582 err = -EINVAL; 5583 if (kstrtoull(buf, 10, &max)) 5584 goto out_unlock; 5585 if (max < mddev->resync_min) 5586 goto out_unlock; 5587 5588 err = -EBUSY; 5589 if (max < mddev->resync_max && md_is_rdwr(mddev) && 5590 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 5591 goto out_unlock; 5592 5593 /* Must be a multiple of chunk_size */ 5594 chunk = mddev->chunk_sectors; 5595 if (chunk) { 5596 sector_t temp = max; 5597 5598 err = -EINVAL; 5599 if (sector_div(temp, chunk)) 5600 goto out_unlock; 5601 } 5602 mddev->resync_max = max; 5603 } 5604 wake_up(&mddev->recovery_wait); 5605 err = 0; 5606 out_unlock: 5607 spin_unlock(&mddev->lock); 5608 return err ?: len; 5609 } 5610 5611 static struct md_sysfs_entry md_max_sync = 5612 __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store); 5613 5614 static ssize_t 5615 suspend_lo_show(struct mddev *mddev, char *page) 5616 { 5617 return sprintf(page, "%llu\n", 5618 (unsigned long long)READ_ONCE(mddev->suspend_lo)); 5619 } 5620 5621 static ssize_t 5622 suspend_lo_store(struct mddev *mddev, const char *buf, size_t len) 5623 { 5624 unsigned long long new; 5625 int err; 5626 5627 err = kstrtoull(buf, 10, &new); 5628 if (err < 0) 5629 return err; 5630 if (new != (sector_t)new) 5631 return -EINVAL; 5632 5633 err = mddev_suspend(mddev, true); 5634 if (err) 5635 return err; 5636 5637 WRITE_ONCE(mddev->suspend_lo, new); 5638 mddev_resume(mddev); 5639 5640 return len; 5641 } 5642 static struct md_sysfs_entry md_suspend_lo = 5643 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store); 5644 5645 static ssize_t 5646 suspend_hi_show(struct mddev *mddev, char *page) 5647 { 5648 return sprintf(page, "%llu\n", 5649 (unsigned long long)READ_ONCE(mddev->suspend_hi)); 5650 } 5651 5652 static ssize_t 5653 suspend_hi_store(struct mddev *mddev, const char *buf, size_t len) 5654 { 5655 unsigned long long new; 5656 int err; 5657 5658 err = kstrtoull(buf, 10, &new); 5659 if (err < 0) 5660 return err; 5661 if (new != (sector_t)new) 5662 return -EINVAL; 5663 5664 err = mddev_suspend(mddev, true); 5665 if (err) 5666 return err; 5667 5668 WRITE_ONCE(mddev->suspend_hi, new); 5669 mddev_resume(mddev); 5670 5671 return len; 5672 } 5673 static struct md_sysfs_entry md_suspend_hi = 5674 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store); 5675 5676 static ssize_t 5677 reshape_position_show(struct mddev *mddev, char *page) 5678 { 5679 if (mddev->reshape_position != MaxSector) 5680 return sprintf(page, "%llu\n", 5681 (unsigned long long)mddev->reshape_position); 5682 strcpy(page, "none\n"); 5683 return 5; 5684 } 5685 5686 static ssize_t 5687 reshape_position_store(struct mddev *mddev, const char *buf, size_t len) 5688 { 5689 struct md_rdev *rdev; 5690 unsigned long long new; 5691 int err; 5692 5693 err = kstrtoull(buf, 10, &new); 5694 if (err < 0) 5695 return err; 5696 if (new != (sector_t)new) 5697 return -EINVAL; 5698 err = mddev_lock(mddev); 5699 if (err) 5700 return err; 5701 err = -EBUSY; 5702 if (mddev->pers) 5703 goto unlock; 5704 mddev->reshape_position = new; 5705 mddev->delta_disks = 0; 5706 mddev->reshape_backwards = 0; 5707 mddev->new_level = mddev->level; 5708 mddev->new_layout = mddev->layout; 5709 mddev->new_chunk_sectors = mddev->chunk_sectors; 5710 rdev_for_each(rdev, mddev) 5711 rdev->new_data_offset = rdev->data_offset; 5712 err = 0; 5713 unlock: 5714 mddev_unlock(mddev); 5715 return err ?: len; 5716 } 5717 5718 static struct md_sysfs_entry md_reshape_position = 5719 __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show, 5720 reshape_position_store); 5721 5722 static ssize_t 5723 reshape_direction_show(struct mddev *mddev, char *page) 5724 { 5725 return sprintf(page, "%s\n", 5726 mddev->reshape_backwards ? "backwards" : "forwards"); 5727 } 5728 5729 static ssize_t 5730 reshape_direction_store(struct mddev *mddev, const char *buf, size_t len) 5731 { 5732 int backwards = 0; 5733 int err; 5734 5735 if (cmd_match(buf, "forwards")) 5736 backwards = 0; 5737 else if (cmd_match(buf, "backwards")) 5738 backwards = 1; 5739 else 5740 return -EINVAL; 5741 if (mddev->reshape_backwards == backwards) 5742 return len; 5743 5744 err = mddev_lock(mddev); 5745 if (err) 5746 return err; 5747 /* check if we are allowed to change */ 5748 if (mddev->delta_disks) 5749 err = -EBUSY; 5750 else if (mddev->persistent && 5751 mddev->major_version == 0) 5752 err = -EINVAL; 5753 else 5754 mddev->reshape_backwards = backwards; 5755 mddev_unlock(mddev); 5756 return err ?: len; 5757 } 5758 5759 static struct md_sysfs_entry md_reshape_direction = 5760 __ATTR(reshape_direction, S_IRUGO|S_IWUSR, reshape_direction_show, 5761 reshape_direction_store); 5762 5763 static ssize_t 5764 array_size_show(struct mddev *mddev, char *page) 5765 { 5766 if (mddev->external_size) 5767 return sprintf(page, "%llu\n", 5768 (unsigned long long)mddev->array_sectors/2); 5769 else 5770 return sprintf(page, "default\n"); 5771 } 5772 5773 static ssize_t 5774 array_size_store(struct mddev *mddev, const char *buf, size_t len) 5775 { 5776 sector_t sectors; 5777 int err; 5778 5779 err = mddev_lock(mddev); 5780 if (err) 5781 return err; 5782 5783 /* cluster raid doesn't support change array_sectors */ 5784 if (mddev_is_clustered(mddev)) { 5785 mddev_unlock(mddev); 5786 return -EINVAL; 5787 } 5788 5789 if (strncmp(buf, "default", 7) == 0) { 5790 if (mddev->pers) 5791 sectors = mddev->pers->size(mddev, 0, 0); 5792 else 5793 sectors = mddev->array_sectors; 5794 5795 mddev->external_size = 0; 5796 } else { 5797 if (strict_blocks_to_sectors(buf, §ors) < 0) 5798 err = -EINVAL; 5799 else if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors) 5800 err = -E2BIG; 5801 else 5802 mddev->external_size = 1; 5803 } 5804 5805 if (!err) { 5806 mddev->array_sectors = sectors; 5807 if (mddev->pers) 5808 set_capacity_and_notify(mddev->gendisk, 5809 mddev->array_sectors); 5810 } 5811 mddev_unlock(mddev); 5812 return err ?: len; 5813 } 5814 5815 static struct md_sysfs_entry md_array_size = 5816 __ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show, 5817 array_size_store); 5818 5819 static ssize_t 5820 consistency_policy_show(struct mddev *mddev, char *page) 5821 { 5822 int ret; 5823 5824 if (test_bit(MD_HAS_JOURNAL, &mddev->flags)) { 5825 ret = sprintf(page, "journal\n"); 5826 } else if (test_bit(MD_HAS_PPL, &mddev->flags)) { 5827 ret = sprintf(page, "ppl\n"); 5828 } else if (mddev->bitmap) { 5829 ret = sprintf(page, "bitmap\n"); 5830 } else if (mddev->pers) { 5831 if (mddev->pers->sync_request) 5832 ret = sprintf(page, "resync\n"); 5833 else 5834 ret = sprintf(page, "none\n"); 5835 } else { 5836 ret = sprintf(page, "unknown\n"); 5837 } 5838 5839 return ret; 5840 } 5841 5842 static ssize_t 5843 consistency_policy_store(struct mddev *mddev, const char *buf, size_t len) 5844 { 5845 int err = 0; 5846 5847 if (mddev->pers) { 5848 if (mddev->pers->change_consistency_policy) 5849 err = mddev->pers->change_consistency_policy(mddev, buf); 5850 else 5851 err = -EBUSY; 5852 } else if (mddev->external && strncmp(buf, "ppl", 3) == 0) { 5853 set_bit(MD_HAS_PPL, &mddev->flags); 5854 } else { 5855 err = -EINVAL; 5856 } 5857 5858 return err ? err : len; 5859 } 5860 5861 static struct md_sysfs_entry md_consistency_policy = 5862 __ATTR(consistency_policy, S_IRUGO | S_IWUSR, consistency_policy_show, 5863 consistency_policy_store); 5864 5865 static ssize_t fail_last_dev_show(struct mddev *mddev, char *page) 5866 { 5867 return sprintf(page, "%d\n", mddev->fail_last_dev); 5868 } 5869 5870 /* 5871 * Setting fail_last_dev to true to allow last device to be forcibly removed 5872 * from RAID1/RAID10. 5873 */ 5874 static ssize_t 5875 fail_last_dev_store(struct mddev *mddev, const char *buf, size_t len) 5876 { 5877 int ret; 5878 bool value; 5879 5880 ret = kstrtobool(buf, &value); 5881 if (ret) 5882 return ret; 5883 5884 if (value != mddev->fail_last_dev) 5885 mddev->fail_last_dev = value; 5886 5887 return len; 5888 } 5889 static struct md_sysfs_entry md_fail_last_dev = 5890 __ATTR(fail_last_dev, S_IRUGO | S_IWUSR, fail_last_dev_show, 5891 fail_last_dev_store); 5892 5893 static ssize_t serialize_policy_show(struct mddev *mddev, char *page) 5894 { 5895 if (mddev->pers == NULL || (mddev->pers->head.id != ID_RAID1)) 5896 return sprintf(page, "n/a\n"); 5897 else 5898 return sprintf(page, "%d\n", mddev->serialize_policy); 5899 } 5900 5901 /* 5902 * Setting serialize_policy to true to enforce write IO is not reordered 5903 * for raid1. 5904 */ 5905 static ssize_t 5906 serialize_policy_store(struct mddev *mddev, const char *buf, size_t len) 5907 { 5908 int err; 5909 bool value; 5910 5911 err = kstrtobool(buf, &value); 5912 if (err) 5913 return err; 5914 5915 if (value == mddev->serialize_policy) 5916 return len; 5917 5918 err = mddev_suspend_and_lock(mddev); 5919 if (err) 5920 return err; 5921 if (mddev->pers == NULL || (mddev->pers->head.id != ID_RAID1)) { 5922 pr_err("md: serialize_policy is only effective for raid1\n"); 5923 err = -EINVAL; 5924 goto unlock; 5925 } 5926 5927 if (value) 5928 mddev_create_serial_pool(mddev, NULL); 5929 else 5930 mddev_destroy_serial_pool(mddev, NULL); 5931 mddev->serialize_policy = value; 5932 unlock: 5933 mddev_unlock_and_resume(mddev); 5934 return err ?: len; 5935 } 5936 5937 static struct md_sysfs_entry md_serialize_policy = 5938 __ATTR(serialize_policy, S_IRUGO | S_IWUSR, serialize_policy_show, 5939 serialize_policy_store); 5940 5941 static int mddev_set_logical_block_size(struct mddev *mddev, 5942 unsigned int lbs) 5943 { 5944 int err = 0; 5945 struct queue_limits lim; 5946 5947 if (queue_logical_block_size(mddev->gendisk->queue) >= lbs) { 5948 pr_err("%s: Cannot set LBS smaller than mddev LBS %u\n", 5949 mdname(mddev), lbs); 5950 return -EINVAL; 5951 } 5952 5953 lim = queue_limits_start_update(mddev->gendisk->queue); 5954 lim.logical_block_size = lbs; 5955 pr_info("%s: logical_block_size is changed, data may be lost\n", 5956 mdname(mddev)); 5957 err = queue_limits_commit_update(mddev->gendisk->queue, &lim); 5958 if (err) 5959 return err; 5960 5961 mddev->logical_block_size = lbs; 5962 /* New lbs will be written to superblock after array is running */ 5963 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 5964 return 0; 5965 } 5966 5967 static ssize_t 5968 lbs_show(struct mddev *mddev, char *page) 5969 { 5970 return sprintf(page, "%u\n", mddev->logical_block_size); 5971 } 5972 5973 static ssize_t 5974 lbs_store(struct mddev *mddev, const char *buf, size_t len) 5975 { 5976 unsigned int lbs; 5977 int err = -EBUSY; 5978 5979 /* Only 1.x meta supports configurable LBS */ 5980 if (mddev->major_version == 0) 5981 return -EINVAL; 5982 5983 if (mddev->pers) 5984 return -EBUSY; 5985 5986 err = kstrtouint(buf, 10, &lbs); 5987 if (err < 0) 5988 return -EINVAL; 5989 5990 err = mddev_lock(mddev); 5991 if (err) 5992 goto unlock; 5993 5994 err = mddev_set_logical_block_size(mddev, lbs); 5995 5996 unlock: 5997 mddev_unlock(mddev); 5998 return err ?: len; 5999 } 6000 6001 static struct md_sysfs_entry md_logical_block_size = 6002 __ATTR(logical_block_size, 0644, lbs_show, lbs_store); 6003 6004 static struct attribute *md_default_attrs[] = { 6005 &md_level.attr, 6006 &md_new_level.attr, 6007 &md_bitmap_type.attr, 6008 &md_layout.attr, 6009 &md_raid_disks.attr, 6010 &md_uuid.attr, 6011 &md_chunk_size.attr, 6012 &md_size.attr, 6013 &md_resync_start.attr, 6014 &md_metadata.attr, 6015 &md_new_device.attr, 6016 &md_safe_delay.attr, 6017 &md_array_state.attr, 6018 &md_reshape_position.attr, 6019 &md_reshape_direction.attr, 6020 &md_array_size.attr, 6021 &max_corr_read_errors.attr, 6022 &md_consistency_policy.attr, 6023 &md_fail_last_dev.attr, 6024 &md_serialize_policy.attr, 6025 &md_logical_block_size.attr, 6026 NULL, 6027 }; 6028 6029 static const struct attribute_group md_default_group = { 6030 .attrs = md_default_attrs, 6031 }; 6032 6033 static struct attribute *md_redundancy_attrs[] = { 6034 &md_scan_mode.attr, 6035 &md_last_scan_mode.attr, 6036 &md_mismatches.attr, 6037 &md_sync_min.attr, 6038 &md_sync_max.attr, 6039 &md_sync_io_depth.attr, 6040 &md_sync_speed.attr, 6041 &md_sync_force_parallel.attr, 6042 &md_sync_completed.attr, 6043 &md_min_sync.attr, 6044 &md_max_sync.attr, 6045 &md_suspend_lo.attr, 6046 &md_suspend_hi.attr, 6047 &md_bitmap.attr, 6048 &md_degraded.attr, 6049 NULL, 6050 }; 6051 static const struct attribute_group md_redundancy_group = { 6052 .name = NULL, 6053 .attrs = md_redundancy_attrs, 6054 }; 6055 6056 static const struct attribute_group *md_attr_groups[] = { 6057 &md_default_group, 6058 NULL, 6059 }; 6060 6061 static ssize_t 6062 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 6063 { 6064 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); 6065 struct mddev *mddev = container_of(kobj, struct mddev, kobj); 6066 ssize_t rv; 6067 6068 if (!entry->show) 6069 return -EIO; 6070 spin_lock(&all_mddevs_lock); 6071 if (!mddev_get(mddev)) { 6072 spin_unlock(&all_mddevs_lock); 6073 return -EBUSY; 6074 } 6075 spin_unlock(&all_mddevs_lock); 6076 6077 rv = entry->show(mddev, page); 6078 mddev_put(mddev); 6079 return rv; 6080 } 6081 6082 static ssize_t 6083 md_attr_store(struct kobject *kobj, struct attribute *attr, 6084 const char *page, size_t length) 6085 { 6086 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); 6087 struct mddev *mddev = container_of(kobj, struct mddev, kobj); 6088 ssize_t rv; 6089 struct kernfs_node *kn = NULL; 6090 6091 if (!entry->store) 6092 return -EIO; 6093 if (!capable(CAP_SYS_ADMIN)) 6094 return -EACCES; 6095 6096 if (entry->store == array_state_store && cmd_match(page, "clear")) 6097 kn = sysfs_break_active_protection(kobj, attr); 6098 6099 spin_lock(&all_mddevs_lock); 6100 if (!mddev_get(mddev)) { 6101 spin_unlock(&all_mddevs_lock); 6102 if (kn) 6103 sysfs_unbreak_active_protection(kn); 6104 return -EBUSY; 6105 } 6106 spin_unlock(&all_mddevs_lock); 6107 rv = entry->store(mddev, page, length); 6108 mddev_put(mddev); 6109 6110 if (kn) 6111 sysfs_unbreak_active_protection(kn); 6112 6113 return rv; 6114 } 6115 6116 static void md_kobj_release(struct kobject *ko) 6117 { 6118 struct mddev *mddev = container_of(ko, struct mddev, kobj); 6119 6120 if (legacy_async_del_gendisk) { 6121 if (mddev->sysfs_state) 6122 sysfs_put(mddev->sysfs_state); 6123 if (mddev->sysfs_level) 6124 sysfs_put(mddev->sysfs_level); 6125 del_gendisk(mddev->gendisk); 6126 } 6127 put_disk(mddev->gendisk); 6128 } 6129 6130 static const struct sysfs_ops md_sysfs_ops = { 6131 .show = md_attr_show, 6132 .store = md_attr_store, 6133 }; 6134 static const struct kobj_type md_ktype = { 6135 .release = md_kobj_release, 6136 .sysfs_ops = &md_sysfs_ops, 6137 .default_groups = md_attr_groups, 6138 }; 6139 6140 int mdp_major = 0; 6141 6142 /* stack the limit for all rdevs into lim */ 6143 int mddev_stack_rdev_limits(struct mddev *mddev, struct queue_limits *lim, 6144 unsigned int flags) 6145 { 6146 struct md_rdev *rdev; 6147 6148 rdev_for_each(rdev, mddev) { 6149 queue_limits_stack_bdev(lim, rdev->bdev, rdev->data_offset, 6150 mddev->gendisk->disk_name); 6151 if ((flags & MDDEV_STACK_INTEGRITY) && 6152 !queue_limits_stack_integrity_bdev(lim, rdev->bdev)) 6153 return -EINVAL; 6154 } 6155 6156 /* 6157 * Before RAID adding folio support, the logical_block_size 6158 * should be smaller than the page size. 6159 */ 6160 if (lim->logical_block_size > PAGE_SIZE) { 6161 pr_err("%s: logical_block_size must not larger than PAGE_SIZE\n", 6162 mdname(mddev)); 6163 return -EINVAL; 6164 } 6165 mddev->logical_block_size = lim->logical_block_size; 6166 6167 return 0; 6168 } 6169 EXPORT_SYMBOL_GPL(mddev_stack_rdev_limits); 6170 6171 /* apply the extra stacking limits from a new rdev into mddev */ 6172 int mddev_stack_new_rdev(struct mddev *mddev, struct md_rdev *rdev) 6173 { 6174 struct queue_limits lim; 6175 6176 if (mddev_is_dm(mddev)) 6177 return 0; 6178 6179 if (queue_logical_block_size(rdev->bdev->bd_disk->queue) > 6180 queue_logical_block_size(mddev->gendisk->queue)) { 6181 pr_err("%s: incompatible logical_block_size, can not add\n", 6182 mdname(mddev)); 6183 return -EINVAL; 6184 } 6185 6186 lim = queue_limits_start_update(mddev->gendisk->queue); 6187 queue_limits_stack_bdev(&lim, rdev->bdev, rdev->data_offset, 6188 mddev->gendisk->disk_name); 6189 6190 if (!queue_limits_stack_integrity_bdev(&lim, rdev->bdev)) { 6191 pr_err("%s: incompatible integrity profile for %pg\n", 6192 mdname(mddev), rdev->bdev); 6193 queue_limits_cancel_update(mddev->gendisk->queue); 6194 return -ENXIO; 6195 } 6196 6197 return queue_limits_commit_update(mddev->gendisk->queue, &lim); 6198 } 6199 EXPORT_SYMBOL_GPL(mddev_stack_new_rdev); 6200 6201 /* update the optimal I/O size after a reshape */ 6202 void mddev_update_io_opt(struct mddev *mddev, unsigned int nr_stripes) 6203 { 6204 struct queue_limits lim; 6205 6206 if (mddev_is_dm(mddev)) 6207 return; 6208 6209 /* don't bother updating io_opt if we can't suspend the array */ 6210 if (mddev_suspend(mddev, false) < 0) 6211 return; 6212 lim = queue_limits_start_update(mddev->gendisk->queue); 6213 lim.io_opt = lim.io_min * nr_stripes; 6214 queue_limits_commit_update(mddev->gendisk->queue, &lim); 6215 mddev_resume(mddev); 6216 } 6217 EXPORT_SYMBOL_GPL(mddev_update_io_opt); 6218 6219 static void mddev_delayed_delete(struct work_struct *ws) 6220 { 6221 struct mddev *mddev = container_of(ws, struct mddev, del_work); 6222 6223 kobject_put(&mddev->kobj); 6224 } 6225 6226 void md_init_stacking_limits(struct queue_limits *lim) 6227 { 6228 blk_set_stacking_limits(lim); 6229 lim->features = BLK_FEAT_WRITE_CACHE | BLK_FEAT_FUA | 6230 BLK_FEAT_IO_STAT | BLK_FEAT_NOWAIT; 6231 } 6232 EXPORT_SYMBOL_GPL(md_init_stacking_limits); 6233 6234 struct mddev *md_alloc(dev_t dev, char *name) 6235 { 6236 /* 6237 * If dev is zero, name is the name of a device to allocate with 6238 * an arbitrary minor number. It will be "md_???" 6239 * If dev is non-zero it must be a device number with a MAJOR of 6240 * MD_MAJOR or mdp_major. In this case, if "name" is NULL, then 6241 * the device is being created by opening a node in /dev. 6242 * If "name" is not NULL, the device is being created by 6243 * writing to /sys/module/md_mod/parameters/new_array. 6244 */ 6245 static DEFINE_MUTEX(disks_mutex); 6246 struct mddev *mddev; 6247 struct gendisk *disk; 6248 int partitioned; 6249 int shift; 6250 int unit; 6251 int error; 6252 6253 /* 6254 * Wait for any previous instance of this device to be completely 6255 * removed (mddev_delayed_delete). 6256 */ 6257 flush_workqueue(md_misc_wq); 6258 6259 mutex_lock(&disks_mutex); 6260 mddev = mddev_alloc(dev); 6261 if (IS_ERR(mddev)) { 6262 error = PTR_ERR(mddev); 6263 goto out_unlock; 6264 } 6265 6266 partitioned = (MAJOR(mddev->unit) != MD_MAJOR); 6267 shift = partitioned ? MdpMinorShift : 0; 6268 unit = MINOR(mddev->unit) >> shift; 6269 6270 if (name && !dev) { 6271 /* Need to ensure that 'name' is not a duplicate. 6272 */ 6273 struct mddev *mddev2; 6274 spin_lock(&all_mddevs_lock); 6275 6276 list_for_each_entry(mddev2, &all_mddevs, all_mddevs) 6277 if (mddev2->gendisk && 6278 strcmp(mddev2->gendisk->disk_name, name) == 0) { 6279 spin_unlock(&all_mddevs_lock); 6280 error = -EEXIST; 6281 goto out_free_mddev; 6282 } 6283 spin_unlock(&all_mddevs_lock); 6284 } 6285 if (name && dev) 6286 /* 6287 * Creating /dev/mdNNN via "newarray", so adjust hold_active. 6288 */ 6289 mddev->hold_active = UNTIL_STOP; 6290 6291 disk = blk_alloc_disk(NULL, NUMA_NO_NODE); 6292 if (IS_ERR(disk)) { 6293 error = PTR_ERR(disk); 6294 goto out_free_mddev; 6295 } 6296 6297 disk->major = MAJOR(mddev->unit); 6298 disk->first_minor = unit << shift; 6299 disk->minors = 1 << shift; 6300 if (name) 6301 strcpy(disk->disk_name, name); 6302 else if (partitioned) 6303 sprintf(disk->disk_name, "md_d%d", unit); 6304 else 6305 sprintf(disk->disk_name, "md%d", unit); 6306 disk->fops = &md_fops; 6307 disk->private_data = mddev; 6308 6309 disk->events |= DISK_EVENT_MEDIA_CHANGE; 6310 mddev->gendisk = disk; 6311 error = add_disk(disk); 6312 if (error) 6313 goto out_put_disk; 6314 6315 kobject_init(&mddev->kobj, &md_ktype); 6316 error = kobject_add(&mddev->kobj, &disk_to_dev(disk)->kobj, "%s", "md"); 6317 if (error) { 6318 /* 6319 * The disk is already live at this point. Clear the hold flag 6320 * and let mddev_put take care of the deletion, as it isn't any 6321 * different from a normal close on last release now. 6322 */ 6323 mddev->hold_active = 0; 6324 mutex_unlock(&disks_mutex); 6325 mddev_put(mddev); 6326 return ERR_PTR(error); 6327 } 6328 6329 kobject_uevent(&mddev->kobj, KOBJ_ADD); 6330 mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state"); 6331 mddev->sysfs_level = sysfs_get_dirent_safe(mddev->kobj.sd, "level"); 6332 mutex_unlock(&disks_mutex); 6333 return mddev; 6334 6335 out_put_disk: 6336 put_disk(disk); 6337 out_free_mddev: 6338 mddev_free(mddev); 6339 out_unlock: 6340 mutex_unlock(&disks_mutex); 6341 return ERR_PTR(error); 6342 } 6343 6344 static int md_alloc_and_put(dev_t dev, char *name) 6345 { 6346 struct mddev *mddev = md_alloc(dev, name); 6347 6348 if (legacy_async_del_gendisk) 6349 pr_warn("md: async del_gendisk mode will be removed in future, please upgrade to mdadm-4.5+\n"); 6350 6351 if (IS_ERR(mddev)) 6352 return PTR_ERR(mddev); 6353 mddev_put(mddev); 6354 return 0; 6355 } 6356 6357 static void md_probe(dev_t dev) 6358 { 6359 if (MAJOR(dev) == MD_MAJOR && MINOR(dev) >= 512) 6360 return; 6361 if (create_on_open) 6362 md_alloc_and_put(dev, NULL); 6363 } 6364 6365 static int add_named_array(const char *val, const struct kernel_param *kp) 6366 { 6367 /* 6368 * val must be "md_*" or "mdNNN". 6369 * For "md_*" we allocate an array with a large free minor number, and 6370 * set the name to val. val must not already be an active name. 6371 * For "mdNNN" we allocate an array with the minor number NNN 6372 * which must not already be in use. 6373 */ 6374 int len = strlen(val); 6375 char buf[DISK_NAME_LEN]; 6376 unsigned long devnum; 6377 6378 while (len && val[len-1] == '\n') 6379 len--; 6380 if (len >= DISK_NAME_LEN) 6381 return -E2BIG; 6382 strscpy(buf, val, len+1); 6383 if (strncmp(buf, "md_", 3) == 0) 6384 return md_alloc_and_put(0, buf); 6385 if (strncmp(buf, "md", 2) == 0 && 6386 isdigit(buf[2]) && 6387 kstrtoul(buf+2, 10, &devnum) == 0 && 6388 devnum <= MINORMASK) 6389 return md_alloc_and_put(MKDEV(MD_MAJOR, devnum), NULL); 6390 6391 return -EINVAL; 6392 } 6393 6394 static void md_safemode_timeout(struct timer_list *t) 6395 { 6396 struct mddev *mddev = timer_container_of(mddev, t, safemode_timer); 6397 6398 mddev->safemode = 1; 6399 if (mddev->external) 6400 sysfs_notify_dirent_safe(mddev->sysfs_state); 6401 6402 md_wakeup_thread(mddev->thread); 6403 } 6404 6405 static int start_dirty_degraded; 6406 6407 static int md_bitmap_create(struct mddev *mddev) 6408 { 6409 if (mddev->bitmap_id == ID_BITMAP_NONE) 6410 return -EINVAL; 6411 6412 if (!mddev_set_bitmap_ops(mddev)) 6413 return -ENOENT; 6414 6415 return mddev->bitmap_ops->create(mddev); 6416 } 6417 6418 static void md_bitmap_destroy(struct mddev *mddev) 6419 { 6420 if (!md_bitmap_registered(mddev)) 6421 return; 6422 6423 mddev->bitmap_ops->destroy(mddev); 6424 mddev_clear_bitmap_ops(mddev); 6425 } 6426 6427 int md_run(struct mddev *mddev) 6428 { 6429 int err; 6430 struct md_rdev *rdev; 6431 struct md_personality *pers; 6432 bool nowait = true; 6433 6434 if (list_empty(&mddev->disks)) 6435 /* cannot run an array with no devices.. */ 6436 return -EINVAL; 6437 6438 if (mddev->pers) 6439 return -EBUSY; 6440 /* Cannot run until previous stop completes properly */ 6441 if (mddev->sysfs_active) 6442 return -EBUSY; 6443 6444 /* 6445 * Analyze all RAID superblock(s) 6446 */ 6447 if (!mddev->raid_disks) { 6448 if (!mddev->persistent) 6449 return -EINVAL; 6450 err = analyze_sbs(mddev); 6451 if (err) 6452 return -EINVAL; 6453 } 6454 6455 if (mddev->level != LEVEL_NONE) 6456 request_module("md-level-%d", mddev->level); 6457 else if (mddev->clevel[0]) 6458 request_module("md-%s", mddev->clevel); 6459 6460 /* 6461 * Drop all container device buffers, from now on 6462 * the only valid external interface is through the md 6463 * device. 6464 */ 6465 mddev->has_superblocks = false; 6466 rdev_for_each(rdev, mddev) { 6467 if (test_bit(Faulty, &rdev->flags)) 6468 continue; 6469 sync_blockdev(rdev->bdev); 6470 invalidate_bdev(rdev->bdev); 6471 if (mddev->ro != MD_RDONLY && rdev_read_only(rdev)) { 6472 mddev->ro = MD_RDONLY; 6473 if (!mddev_is_dm(mddev)) 6474 set_disk_ro(mddev->gendisk, 1); 6475 } 6476 6477 if (rdev->sb_page) 6478 mddev->has_superblocks = true; 6479 6480 /* perform some consistency tests on the device. 6481 * We don't want the data to overlap the metadata, 6482 * Internal Bitmap issues have been handled elsewhere. 6483 */ 6484 if (rdev->meta_bdev) { 6485 /* Nothing to check */; 6486 } else if (rdev->data_offset < rdev->sb_start) { 6487 if (mddev->dev_sectors && 6488 rdev->data_offset + mddev->dev_sectors 6489 > rdev->sb_start) { 6490 pr_warn("md: %s: data overlaps metadata\n", 6491 mdname(mddev)); 6492 return -EINVAL; 6493 } 6494 } else { 6495 if (rdev->sb_start + rdev->sb_size/512 6496 > rdev->data_offset) { 6497 pr_warn("md: %s: metadata overlaps data\n", 6498 mdname(mddev)); 6499 return -EINVAL; 6500 } 6501 } 6502 sysfs_notify_dirent_safe(rdev->sysfs_state); 6503 nowait = nowait && bdev_nowait(rdev->bdev); 6504 } 6505 6506 pers = get_pers(mddev->level, mddev->clevel); 6507 if (!pers) 6508 return -EINVAL; 6509 if (mddev->level != pers->head.id) { 6510 mddev->level = pers->head.id; 6511 mddev->new_level = pers->head.id; 6512 } 6513 strscpy(mddev->clevel, pers->head.name, sizeof(mddev->clevel)); 6514 6515 if (mddev->reshape_position != MaxSector && 6516 pers->start_reshape == NULL) { 6517 /* This personality cannot handle reshaping... */ 6518 put_pers(pers); 6519 return -EINVAL; 6520 } 6521 6522 if (pers->sync_request) { 6523 /* Warn if this is a potentially silly 6524 * configuration. 6525 */ 6526 struct md_rdev *rdev2; 6527 int warned = 0; 6528 6529 rdev_for_each(rdev, mddev) 6530 rdev_for_each(rdev2, mddev) { 6531 if (rdev < rdev2 && 6532 rdev->bdev->bd_disk == 6533 rdev2->bdev->bd_disk) { 6534 pr_warn("%s: WARNING: %pg appears to be on the same physical disk as %pg.\n", 6535 mdname(mddev), 6536 rdev->bdev, 6537 rdev2->bdev); 6538 warned = 1; 6539 } 6540 } 6541 6542 if (warned) 6543 pr_warn("True protection against single-disk failure might be compromised.\n"); 6544 } 6545 6546 /* dm-raid expect sync_thread to be frozen until resume */ 6547 if (mddev->gendisk) 6548 mddev->recovery = 0; 6549 6550 /* may be over-ridden by personality */ 6551 mddev->resync_max_sectors = mddev->dev_sectors; 6552 6553 mddev->ok_start_degraded = start_dirty_degraded; 6554 6555 if (start_readonly && md_is_rdwr(mddev)) 6556 mddev->ro = MD_AUTO_READ; /* read-only, but switch on first write */ 6557 6558 err = pers->run(mddev); 6559 if (err) 6560 pr_warn("md: pers->run() failed ...\n"); 6561 else if (pers->size(mddev, 0, 0) < mddev->array_sectors) { 6562 WARN_ONCE(!mddev->external_size, 6563 "%s: default size too small, but 'external_size' not in effect?\n", 6564 __func__); 6565 pr_warn("md: invalid array_size %llu > default size %llu\n", 6566 (unsigned long long)mddev->array_sectors / 2, 6567 (unsigned long long)pers->size(mddev, 0, 0) / 2); 6568 err = -EINVAL; 6569 } 6570 if (err == 0 && pers->sync_request && 6571 (mddev->bitmap_info.file || mddev->bitmap_info.offset)) { 6572 err = md_bitmap_create(mddev); 6573 if (err) 6574 pr_warn("%s: failed to create bitmap (%d)\n", 6575 mdname(mddev), err); 6576 } 6577 if (err) 6578 goto bitmap_abort; 6579 6580 if (mddev->bitmap_info.max_write_behind > 0) { 6581 bool create_pool = false; 6582 6583 rdev_for_each(rdev, mddev) { 6584 if (test_bit(WriteMostly, &rdev->flags) && 6585 rdev_init_serial(rdev)) 6586 create_pool = true; 6587 } 6588 if (create_pool && mddev->serial_info_pool == NULL) { 6589 mddev->serial_info_pool = 6590 mempool_create_kmalloc_pool(NR_SERIAL_INFOS, 6591 sizeof(struct serial_info)); 6592 if (!mddev->serial_info_pool) { 6593 err = -ENOMEM; 6594 goto bitmap_abort; 6595 } 6596 } 6597 } 6598 6599 if (pers->sync_request) { 6600 if (mddev->kobj.sd && 6601 sysfs_create_group(&mddev->kobj, &md_redundancy_group)) 6602 pr_warn("md: cannot register extra attributes for %s\n", 6603 mdname(mddev)); 6604 mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action"); 6605 mddev->sysfs_completed = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_completed"); 6606 mddev->sysfs_degraded = sysfs_get_dirent_safe(mddev->kobj.sd, "degraded"); 6607 } else if (mddev->ro == MD_AUTO_READ) 6608 mddev->ro = MD_RDWR; 6609 6610 atomic_set(&mddev->max_corr_read_errors, 6611 MD_DEFAULT_MAX_CORRECTED_READ_ERRORS); 6612 mddev->safemode = 0; 6613 if (mddev_is_clustered(mddev)) 6614 mddev->safemode_delay = 0; 6615 else 6616 mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY; 6617 mddev->in_sync = 1; 6618 smp_wmb(); 6619 spin_lock(&mddev->lock); 6620 mddev->pers = pers; 6621 spin_unlock(&mddev->lock); 6622 rdev_for_each(rdev, mddev) 6623 if (rdev->raid_disk >= 0) 6624 sysfs_link_rdev(mddev, rdev); /* failure here is OK */ 6625 6626 if (mddev->degraded && md_is_rdwr(mddev)) 6627 /* This ensures that recovering status is reported immediately 6628 * via sysfs - until a lack of spares is confirmed. 6629 */ 6630 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 6631 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6632 6633 if (mddev->sb_flags) 6634 md_update_sb(mddev, 0); 6635 6636 md_new_event(); 6637 return 0; 6638 6639 bitmap_abort: 6640 mddev_detach(mddev); 6641 if (mddev->private) 6642 pers->free(mddev, mddev->private); 6643 mddev->private = NULL; 6644 put_pers(pers); 6645 md_bitmap_destroy(mddev); 6646 return err; 6647 } 6648 EXPORT_SYMBOL_GPL(md_run); 6649 6650 int do_md_run(struct mddev *mddev) 6651 { 6652 int err; 6653 6654 set_bit(MD_NOT_READY, &mddev->flags); 6655 err = md_run(mddev); 6656 if (err) 6657 goto out; 6658 6659 if (md_bitmap_registered(mddev)) { 6660 err = mddev->bitmap_ops->load(mddev); 6661 if (err) { 6662 md_bitmap_destroy(mddev); 6663 goto out; 6664 } 6665 } 6666 6667 if (mddev_is_clustered(mddev)) 6668 md_allow_write(mddev); 6669 6670 /* run start up tasks that require md_thread */ 6671 md_start(mddev); 6672 6673 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ 6674 6675 set_capacity_and_notify(mddev->gendisk, mddev->array_sectors); 6676 clear_bit(MD_NOT_READY, &mddev->flags); 6677 mddev->changed = 1; 6678 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); 6679 sysfs_notify_dirent_safe(mddev->sysfs_state); 6680 sysfs_notify_dirent_safe(mddev->sysfs_action); 6681 sysfs_notify_dirent_safe(mddev->sysfs_degraded); 6682 out: 6683 clear_bit(MD_NOT_READY, &mddev->flags); 6684 return err; 6685 } 6686 6687 int md_start(struct mddev *mddev) 6688 { 6689 int ret = 0; 6690 6691 if (mddev->pers->start) { 6692 set_bit(MD_RECOVERY_WAIT, &mddev->recovery); 6693 ret = mddev->pers->start(mddev); 6694 clear_bit(MD_RECOVERY_WAIT, &mddev->recovery); 6695 md_wakeup_thread(mddev->sync_thread); 6696 } 6697 return ret; 6698 } 6699 EXPORT_SYMBOL_GPL(md_start); 6700 6701 static int restart_array(struct mddev *mddev) 6702 { 6703 struct gendisk *disk = mddev->gendisk; 6704 struct md_rdev *rdev; 6705 bool has_journal = false; 6706 bool has_readonly = false; 6707 6708 /* Complain if it has no devices */ 6709 if (list_empty(&mddev->disks)) 6710 return -ENXIO; 6711 if (!mddev->pers) 6712 return -EINVAL; 6713 if (md_is_rdwr(mddev)) 6714 return -EBUSY; 6715 6716 rcu_read_lock(); 6717 rdev_for_each_rcu(rdev, mddev) { 6718 if (test_bit(Journal, &rdev->flags) && 6719 !test_bit(Faulty, &rdev->flags)) 6720 has_journal = true; 6721 if (rdev_read_only(rdev)) 6722 has_readonly = true; 6723 } 6724 rcu_read_unlock(); 6725 if (test_bit(MD_HAS_JOURNAL, &mddev->flags) && !has_journal) 6726 /* Don't restart rw with journal missing/faulty */ 6727 return -EINVAL; 6728 if (has_readonly) 6729 return -EROFS; 6730 6731 mddev->safemode = 0; 6732 mddev->ro = MD_RDWR; 6733 set_disk_ro(disk, 0); 6734 pr_debug("md: %s switched to read-write mode.\n", mdname(mddev)); 6735 /* Kick recovery or resync if necessary */ 6736 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6737 md_wakeup_thread(mddev->sync_thread); 6738 sysfs_notify_dirent_safe(mddev->sysfs_state); 6739 return 0; 6740 } 6741 6742 static void md_clean(struct mddev *mddev) 6743 { 6744 mddev->array_sectors = 0; 6745 mddev->external_size = 0; 6746 mddev->dev_sectors = 0; 6747 mddev->raid_disks = 0; 6748 mddev->resync_offset = 0; 6749 mddev->resync_min = 0; 6750 mddev->resync_max = MaxSector; 6751 mddev->reshape_position = MaxSector; 6752 /* we still need mddev->external in export_rdev, do not clear it yet */ 6753 mddev->persistent = 0; 6754 mddev->level = LEVEL_NONE; 6755 mddev->clevel[0] = 0; 6756 6757 /* 6758 * For legacy_async_del_gendisk mode, it can stop the array in the 6759 * middle of assembling it, then it still can access the array. So 6760 * it needs to clear MD_CLOSING. If not legacy_async_del_gendisk, 6761 * it can't open the array again after stopping it. So it doesn't 6762 * clear MD_CLOSING. 6763 */ 6764 if (legacy_async_del_gendisk && mddev->hold_active) { 6765 clear_bit(MD_CLOSING, &mddev->flags); 6766 } else { 6767 /* if UNTIL_STOP is set, it's cleared here */ 6768 mddev->hold_active = 0; 6769 /* Don't clear MD_CLOSING, or mddev can be opened again. */ 6770 mddev->flags &= BIT_ULL_MASK(MD_CLOSING); 6771 } 6772 mddev->sb_flags = 0; 6773 mddev->ro = MD_RDWR; 6774 mddev->metadata_type[0] = 0; 6775 mddev->chunk_sectors = 0; 6776 mddev->ctime = mddev->utime = 0; 6777 mddev->layout = 0; 6778 mddev->logical_block_size = 0; 6779 mddev->max_disks = 0; 6780 mddev->events = 0; 6781 mddev->can_decrease_events = 0; 6782 mddev->delta_disks = 0; 6783 mddev->reshape_backwards = 0; 6784 mddev->new_level = LEVEL_NONE; 6785 mddev->new_layout = 0; 6786 mddev->new_chunk_sectors = 0; 6787 mddev->curr_resync = MD_RESYNC_NONE; 6788 atomic64_set(&mddev->resync_mismatches, 0); 6789 mddev->suspend_lo = mddev->suspend_hi = 0; 6790 mddev->sync_speed_min = mddev->sync_speed_max = 0; 6791 mddev->recovery = 0; 6792 mddev->in_sync = 0; 6793 mddev->changed = 0; 6794 mddev->degraded = 0; 6795 mddev->safemode = 0; 6796 mddev->private = NULL; 6797 mddev->cluster_info = NULL; 6798 mddev->bitmap_info.offset = 0; 6799 mddev->bitmap_info.default_offset = 0; 6800 mddev->bitmap_info.default_space = 0; 6801 mddev->bitmap_info.chunksize = 0; 6802 mddev->bitmap_info.daemon_sleep = 0; 6803 mddev->bitmap_info.max_write_behind = 0; 6804 mddev->bitmap_info.nodes = 0; 6805 } 6806 6807 static void __md_stop_writes(struct mddev *mddev) 6808 { 6809 timer_delete_sync(&mddev->safemode_timer); 6810 6811 if (mddev->pers && mddev->pers->quiesce) { 6812 mddev->pers->quiesce(mddev, 1); 6813 mddev->pers->quiesce(mddev, 0); 6814 } 6815 6816 if (md_bitmap_enabled(mddev, true)) 6817 mddev->bitmap_ops->flush(mddev); 6818 6819 if (md_is_rdwr(mddev) && 6820 ((!mddev->in_sync && !mddev_is_clustered(mddev)) || 6821 mddev->sb_flags)) { 6822 /* mark array as shutdown cleanly */ 6823 if (!mddev_is_clustered(mddev)) 6824 mddev->in_sync = 1; 6825 md_update_sb(mddev, 1); 6826 } 6827 /* disable policy to guarantee rdevs free resources for serialization */ 6828 mddev->serialize_policy = 0; 6829 mddev_destroy_serial_pool(mddev, NULL); 6830 } 6831 6832 void md_stop_writes(struct mddev *mddev) 6833 { 6834 mddev_lock_nointr(mddev); 6835 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 6836 stop_sync_thread(mddev, true); 6837 __md_stop_writes(mddev); 6838 mddev_unlock(mddev); 6839 } 6840 EXPORT_SYMBOL_GPL(md_stop_writes); 6841 6842 static void mddev_detach(struct mddev *mddev) 6843 { 6844 if (md_bitmap_enabled(mddev, false)) 6845 mddev->bitmap_ops->wait_behind_writes(mddev); 6846 if (mddev->pers && mddev->pers->quiesce && !is_md_suspended(mddev)) { 6847 mddev->pers->quiesce(mddev, 1); 6848 mddev->pers->quiesce(mddev, 0); 6849 } 6850 md_unregister_thread(mddev, &mddev->thread); 6851 6852 /* the unplug fn references 'conf' */ 6853 if (!mddev_is_dm(mddev)) 6854 blk_sync_queue(mddev->gendisk->queue); 6855 } 6856 6857 static void __md_stop(struct mddev *mddev) 6858 { 6859 struct md_personality *pers = mddev->pers; 6860 6861 md_bitmap_destroy(mddev); 6862 mddev_detach(mddev); 6863 spin_lock(&mddev->lock); 6864 mddev->pers = NULL; 6865 spin_unlock(&mddev->lock); 6866 if (mddev->private) 6867 pers->free(mddev, mddev->private); 6868 mddev->private = NULL; 6869 put_pers(pers); 6870 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 6871 } 6872 6873 void md_stop(struct mddev *mddev) 6874 { 6875 lockdep_assert_held(&mddev->reconfig_mutex); 6876 6877 /* stop the array and free an attached data structures. 6878 * This is called from dm-raid 6879 */ 6880 __md_stop_writes(mddev); 6881 __md_stop(mddev); 6882 } 6883 6884 EXPORT_SYMBOL_GPL(md_stop); 6885 6886 /* ensure 'mddev->pers' exist before calling md_set_readonly() */ 6887 static int md_set_readonly(struct mddev *mddev) 6888 { 6889 int err = 0; 6890 int did_freeze = 0; 6891 6892 if (mddev->external && test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)) 6893 return -EBUSY; 6894 6895 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) { 6896 did_freeze = 1; 6897 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 6898 } 6899 6900 stop_sync_thread(mddev, false); 6901 wait_event(mddev->sb_wait, 6902 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); 6903 mddev_lock_nointr(mddev); 6904 6905 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { 6906 pr_warn("md: %s still in use.\n",mdname(mddev)); 6907 err = -EBUSY; 6908 goto out; 6909 } 6910 6911 __md_stop_writes(mddev); 6912 6913 if (mddev->ro == MD_RDONLY) { 6914 err = -ENXIO; 6915 goto out; 6916 } 6917 6918 mddev->ro = MD_RDONLY; 6919 set_disk_ro(mddev->gendisk, 1); 6920 6921 out: 6922 if (!err || did_freeze) { 6923 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 6924 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6925 sysfs_notify_dirent_safe(mddev->sysfs_state); 6926 } 6927 6928 return err; 6929 } 6930 6931 /* mode: 6932 * 0 - completely stop and dis-assemble array 6933 * 2 - stop but do not disassemble array 6934 */ 6935 static int do_md_stop(struct mddev *mddev, int mode) 6936 { 6937 struct gendisk *disk = mddev->gendisk; 6938 struct md_rdev *rdev; 6939 int did_freeze = 0; 6940 6941 if (!test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) { 6942 did_freeze = 1; 6943 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 6944 } 6945 6946 stop_sync_thread(mddev, true); 6947 6948 if (mddev->sysfs_active || 6949 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { 6950 pr_warn("md: %s still in use.\n",mdname(mddev)); 6951 if (did_freeze) { 6952 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 6953 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6954 } 6955 return -EBUSY; 6956 } 6957 if (mddev->pers) { 6958 if (!md_is_rdwr(mddev)) 6959 set_disk_ro(disk, 0); 6960 6961 if (mode == 2 && mddev->pers->sync_request && 6962 mddev->to_remove == NULL) 6963 mddev->to_remove = &md_redundancy_group; 6964 6965 __md_stop_writes(mddev); 6966 __md_stop(mddev); 6967 6968 /* tell userspace to handle 'inactive' */ 6969 sysfs_notify_dirent_safe(mddev->sysfs_state); 6970 6971 rdev_for_each(rdev, mddev) 6972 if (rdev->raid_disk >= 0) 6973 sysfs_unlink_rdev(mddev, rdev); 6974 6975 set_capacity_and_notify(disk, 0); 6976 mddev->changed = 1; 6977 6978 if (!md_is_rdwr(mddev)) 6979 mddev->ro = MD_RDWR; 6980 } 6981 /* 6982 * Free resources if final stop 6983 */ 6984 if (mode == 0) { 6985 pr_info("md: %s stopped.\n", mdname(mddev)); 6986 6987 if (mddev->bitmap_info.file) { 6988 struct file *f = mddev->bitmap_info.file; 6989 spin_lock(&mddev->lock); 6990 mddev->bitmap_info.file = NULL; 6991 spin_unlock(&mddev->lock); 6992 fput(f); 6993 } 6994 mddev->bitmap_info.offset = 0; 6995 6996 export_array(mddev); 6997 md_clean(mddev); 6998 if (!legacy_async_del_gendisk) 6999 set_bit(MD_DELETED, &mddev->flags); 7000 } 7001 md_new_event(); 7002 sysfs_notify_dirent_safe(mddev->sysfs_state); 7003 return 0; 7004 } 7005 7006 #ifndef MODULE 7007 static void autorun_array(struct mddev *mddev) 7008 { 7009 struct md_rdev *rdev; 7010 int err; 7011 7012 if (list_empty(&mddev->disks)) 7013 return; 7014 7015 pr_info("md: running: "); 7016 7017 rdev_for_each(rdev, mddev) { 7018 pr_cont("<%pg>", rdev->bdev); 7019 } 7020 pr_cont("\n"); 7021 7022 err = do_md_run(mddev); 7023 if (err) { 7024 pr_warn("md: do_md_run() returned %d\n", err); 7025 do_md_stop(mddev, 0); 7026 } 7027 } 7028 7029 /* 7030 * lets try to run arrays based on all disks that have arrived 7031 * until now. (those are in pending_raid_disks) 7032 * 7033 * the method: pick the first pending disk, collect all disks with 7034 * the same UUID, remove all from the pending list and put them into 7035 * the 'same_array' list. Then order this list based on superblock 7036 * update time (freshest comes first), kick out 'old' disks and 7037 * compare superblocks. If everything's fine then run it. 7038 * 7039 * If "unit" is allocated, then bump its reference count 7040 */ 7041 static void autorun_devices(int part) 7042 { 7043 struct md_rdev *rdev0, *rdev, *tmp; 7044 struct mddev *mddev; 7045 7046 pr_info("md: autorun ...\n"); 7047 while (!list_empty(&pending_raid_disks)) { 7048 int unit; 7049 dev_t dev; 7050 LIST_HEAD(candidates); 7051 rdev0 = list_entry(pending_raid_disks.next, 7052 struct md_rdev, same_set); 7053 7054 pr_debug("md: considering %pg ...\n", rdev0->bdev); 7055 INIT_LIST_HEAD(&candidates); 7056 rdev_for_each_list(rdev, tmp, &pending_raid_disks) 7057 if (super_90_load(rdev, rdev0, 0) >= 0) { 7058 pr_debug("md: adding %pg ...\n", 7059 rdev->bdev); 7060 list_move(&rdev->same_set, &candidates); 7061 } 7062 /* 7063 * now we have a set of devices, with all of them having 7064 * mostly sane superblocks. It's time to allocate the 7065 * mddev. 7066 */ 7067 if (part) { 7068 dev = MKDEV(mdp_major, 7069 rdev0->preferred_minor << MdpMinorShift); 7070 unit = MINOR(dev) >> MdpMinorShift; 7071 } else { 7072 dev = MKDEV(MD_MAJOR, rdev0->preferred_minor); 7073 unit = MINOR(dev); 7074 } 7075 if (rdev0->preferred_minor != unit) { 7076 pr_warn("md: unit number in %pg is bad: %d\n", 7077 rdev0->bdev, rdev0->preferred_minor); 7078 break; 7079 } 7080 7081 mddev = md_alloc(dev, NULL); 7082 if (IS_ERR(mddev)) 7083 break; 7084 7085 if (mddev_suspend_and_lock(mddev)) 7086 pr_warn("md: %s locked, cannot run\n", mdname(mddev)); 7087 else if (mddev->raid_disks || mddev->major_version 7088 || !list_empty(&mddev->disks)) { 7089 pr_warn("md: %s already running, cannot run %pg\n", 7090 mdname(mddev), rdev0->bdev); 7091 mddev_unlock_and_resume(mddev); 7092 } else { 7093 pr_debug("md: created %s\n", mdname(mddev)); 7094 mddev->persistent = 1; 7095 rdev_for_each_list(rdev, tmp, &candidates) { 7096 list_del_init(&rdev->same_set); 7097 if (bind_rdev_to_array(rdev, mddev)) 7098 export_rdev(rdev, mddev); 7099 } 7100 autorun_array(mddev); 7101 mddev_unlock_and_resume(mddev); 7102 } 7103 /* on success, candidates will be empty, on error 7104 * it won't... 7105 */ 7106 rdev_for_each_list(rdev, tmp, &candidates) { 7107 list_del_init(&rdev->same_set); 7108 export_rdev(rdev, mddev); 7109 } 7110 mddev_put(mddev); 7111 } 7112 pr_info("md: ... autorun DONE.\n"); 7113 } 7114 #endif /* !MODULE */ 7115 7116 static int get_version(void __user *arg) 7117 { 7118 mdu_version_t ver; 7119 7120 ver.major = MD_MAJOR_VERSION; 7121 ver.minor = MD_MINOR_VERSION; 7122 ver.patchlevel = MD_PATCHLEVEL_VERSION; 7123 7124 if (copy_to_user(arg, &ver, sizeof(ver))) 7125 return -EFAULT; 7126 7127 return 0; 7128 } 7129 7130 static int get_array_info(struct mddev *mddev, void __user *arg) 7131 { 7132 mdu_array_info_t info; 7133 int nr,working,insync,failed,spare; 7134 struct md_rdev *rdev; 7135 7136 nr = working = insync = failed = spare = 0; 7137 rcu_read_lock(); 7138 rdev_for_each_rcu(rdev, mddev) { 7139 nr++; 7140 if (test_bit(Faulty, &rdev->flags)) 7141 failed++; 7142 else { 7143 working++; 7144 if (test_bit(In_sync, &rdev->flags)) 7145 insync++; 7146 else if (test_bit(Journal, &rdev->flags)) 7147 /* TODO: add journal count to md_u.h */ 7148 ; 7149 else 7150 spare++; 7151 } 7152 } 7153 rcu_read_unlock(); 7154 7155 info.major_version = mddev->major_version; 7156 info.minor_version = mddev->minor_version; 7157 info.patch_version = MD_PATCHLEVEL_VERSION; 7158 info.ctime = clamp_t(time64_t, mddev->ctime, 0, U32_MAX); 7159 info.level = mddev->level; 7160 info.size = mddev->dev_sectors / 2; 7161 if (info.size != mddev->dev_sectors / 2) /* overflow */ 7162 info.size = -1; 7163 info.nr_disks = nr; 7164 info.raid_disks = mddev->raid_disks; 7165 info.md_minor = mddev->md_minor; 7166 info.not_persistent= !mddev->persistent; 7167 7168 info.utime = clamp_t(time64_t, mddev->utime, 0, U32_MAX); 7169 info.state = 0; 7170 if (mddev->in_sync) 7171 info.state = (1<<MD_SB_CLEAN); 7172 if (mddev->bitmap && mddev->bitmap_info.offset) 7173 info.state |= (1<<MD_SB_BITMAP_PRESENT); 7174 if (mddev_is_clustered(mddev)) 7175 info.state |= (1<<MD_SB_CLUSTERED); 7176 info.active_disks = insync; 7177 info.working_disks = working; 7178 info.failed_disks = failed; 7179 info.spare_disks = spare; 7180 7181 info.layout = mddev->layout; 7182 info.chunk_size = mddev->chunk_sectors << 9; 7183 7184 if (copy_to_user(arg, &info, sizeof(info))) 7185 return -EFAULT; 7186 7187 return 0; 7188 } 7189 7190 static int get_bitmap_file(struct mddev *mddev, void __user * arg) 7191 { 7192 mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */ 7193 char *ptr; 7194 int err; 7195 7196 file = kzalloc(sizeof(*file), GFP_NOIO); 7197 if (!file) 7198 return -ENOMEM; 7199 7200 err = 0; 7201 spin_lock(&mddev->lock); 7202 /* bitmap enabled */ 7203 if (mddev->bitmap_info.file) { 7204 ptr = file_path(mddev->bitmap_info.file, file->pathname, 7205 sizeof(file->pathname)); 7206 if (IS_ERR(ptr)) 7207 err = PTR_ERR(ptr); 7208 else 7209 memmove(file->pathname, ptr, 7210 sizeof(file->pathname)-(ptr-file->pathname)); 7211 } 7212 spin_unlock(&mddev->lock); 7213 7214 if (err == 0 && 7215 copy_to_user(arg, file, sizeof(*file))) 7216 err = -EFAULT; 7217 7218 kfree(file); 7219 return err; 7220 } 7221 7222 static int get_disk_info(struct mddev *mddev, void __user * arg) 7223 { 7224 mdu_disk_info_t info; 7225 struct md_rdev *rdev; 7226 7227 if (copy_from_user(&info, arg, sizeof(info))) 7228 return -EFAULT; 7229 7230 rcu_read_lock(); 7231 rdev = md_find_rdev_nr_rcu(mddev, info.number); 7232 if (rdev) { 7233 info.major = MAJOR(rdev->bdev->bd_dev); 7234 info.minor = MINOR(rdev->bdev->bd_dev); 7235 info.raid_disk = rdev->raid_disk; 7236 info.state = 0; 7237 if (test_bit(Faulty, &rdev->flags)) 7238 info.state |= (1<<MD_DISK_FAULTY); 7239 else if (test_bit(In_sync, &rdev->flags)) { 7240 info.state |= (1<<MD_DISK_ACTIVE); 7241 info.state |= (1<<MD_DISK_SYNC); 7242 } 7243 if (test_bit(Journal, &rdev->flags)) 7244 info.state |= (1<<MD_DISK_JOURNAL); 7245 if (test_bit(WriteMostly, &rdev->flags)) 7246 info.state |= (1<<MD_DISK_WRITEMOSTLY); 7247 if (test_bit(FailFast, &rdev->flags)) 7248 info.state |= (1<<MD_DISK_FAILFAST); 7249 } else { 7250 info.major = info.minor = 0; 7251 info.raid_disk = -1; 7252 info.state = (1<<MD_DISK_REMOVED); 7253 } 7254 rcu_read_unlock(); 7255 7256 if (copy_to_user(arg, &info, sizeof(info))) 7257 return -EFAULT; 7258 7259 return 0; 7260 } 7261 7262 int md_add_new_disk(struct mddev *mddev, struct mdu_disk_info_s *info) 7263 { 7264 struct md_rdev *rdev; 7265 dev_t dev = MKDEV(info->major,info->minor); 7266 7267 if (mddev_is_clustered(mddev) && 7268 !(info->state & ((1 << MD_DISK_CLUSTER_ADD) | (1 << MD_DISK_CANDIDATE)))) { 7269 pr_warn("%s: Cannot add to clustered mddev.\n", 7270 mdname(mddev)); 7271 return -EINVAL; 7272 } 7273 7274 if (info->major != MAJOR(dev) || info->minor != MINOR(dev)) 7275 return -EOVERFLOW; 7276 7277 if (!mddev->raid_disks) { 7278 int err; 7279 /* expecting a device which has a superblock */ 7280 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version); 7281 if (IS_ERR(rdev)) { 7282 pr_warn("md: md_import_device returned %ld\n", 7283 PTR_ERR(rdev)); 7284 return PTR_ERR(rdev); 7285 } 7286 if (!list_empty(&mddev->disks)) { 7287 struct md_rdev *rdev0 7288 = list_entry(mddev->disks.next, 7289 struct md_rdev, same_set); 7290 err = super_types[mddev->major_version] 7291 .load_super(rdev, rdev0, mddev->minor_version); 7292 if (err < 0) { 7293 pr_warn("md: %pg has different UUID to %pg\n", 7294 rdev->bdev, 7295 rdev0->bdev); 7296 export_rdev(rdev, mddev); 7297 return -EINVAL; 7298 } 7299 } 7300 err = bind_rdev_to_array(rdev, mddev); 7301 if (err) 7302 export_rdev(rdev, mddev); 7303 return err; 7304 } 7305 7306 /* 7307 * md_add_new_disk can be used once the array is assembled 7308 * to add "hot spares". They must already have a superblock 7309 * written 7310 */ 7311 if (mddev->pers) { 7312 int err; 7313 if (!mddev->pers->hot_add_disk) { 7314 pr_warn("%s: personality does not support diskops!\n", 7315 mdname(mddev)); 7316 return -EINVAL; 7317 } 7318 if (mddev->persistent) 7319 rdev = md_import_device(dev, mddev->major_version, 7320 mddev->minor_version); 7321 else 7322 rdev = md_import_device(dev, -1, -1); 7323 if (IS_ERR(rdev)) { 7324 pr_warn("md: md_import_device returned %ld\n", 7325 PTR_ERR(rdev)); 7326 return PTR_ERR(rdev); 7327 } 7328 /* set saved_raid_disk if appropriate */ 7329 if (!mddev->persistent) { 7330 if (info->state & (1<<MD_DISK_SYNC) && 7331 info->raid_disk < mddev->raid_disks) { 7332 rdev->raid_disk = info->raid_disk; 7333 clear_bit(Bitmap_sync, &rdev->flags); 7334 } else 7335 rdev->raid_disk = -1; 7336 rdev->saved_raid_disk = rdev->raid_disk; 7337 } else 7338 super_types[mddev->major_version]. 7339 validate_super(mddev, NULL/*freshest*/, rdev); 7340 if ((info->state & (1<<MD_DISK_SYNC)) && 7341 rdev->raid_disk != info->raid_disk) { 7342 /* This was a hot-add request, but events doesn't 7343 * match, so reject it. 7344 */ 7345 export_rdev(rdev, mddev); 7346 return -EINVAL; 7347 } 7348 7349 clear_bit(In_sync, &rdev->flags); /* just to be sure */ 7350 if (info->state & (1<<MD_DISK_WRITEMOSTLY)) 7351 set_bit(WriteMostly, &rdev->flags); 7352 else 7353 clear_bit(WriteMostly, &rdev->flags); 7354 if (info->state & (1<<MD_DISK_FAILFAST)) 7355 set_bit(FailFast, &rdev->flags); 7356 else 7357 clear_bit(FailFast, &rdev->flags); 7358 7359 if (info->state & (1<<MD_DISK_JOURNAL)) { 7360 struct md_rdev *rdev2; 7361 bool has_journal = false; 7362 7363 /* make sure no existing journal disk */ 7364 rdev_for_each(rdev2, mddev) { 7365 if (test_bit(Journal, &rdev2->flags)) { 7366 has_journal = true; 7367 break; 7368 } 7369 } 7370 if (has_journal || mddev->bitmap) { 7371 export_rdev(rdev, mddev); 7372 return -EBUSY; 7373 } 7374 set_bit(Journal, &rdev->flags); 7375 } 7376 /* 7377 * check whether the device shows up in other nodes 7378 */ 7379 if (mddev_is_clustered(mddev)) { 7380 if (info->state & (1 << MD_DISK_CANDIDATE)) 7381 set_bit(Candidate, &rdev->flags); 7382 else if (info->state & (1 << MD_DISK_CLUSTER_ADD)) { 7383 /* --add initiated by this node */ 7384 err = mddev->cluster_ops->add_new_disk(mddev, rdev); 7385 if (err) { 7386 export_rdev(rdev, mddev); 7387 return err; 7388 } 7389 } 7390 } 7391 7392 rdev->raid_disk = -1; 7393 err = bind_rdev_to_array(rdev, mddev); 7394 7395 if (err) 7396 export_rdev(rdev, mddev); 7397 7398 if (mddev_is_clustered(mddev)) { 7399 if (info->state & (1 << MD_DISK_CANDIDATE)) { 7400 if (!err) { 7401 err = mddev->cluster_ops->new_disk_ack( 7402 mddev, err == 0); 7403 if (err) 7404 md_kick_rdev_from_array(rdev); 7405 } 7406 } else { 7407 if (err) 7408 mddev->cluster_ops->add_new_disk_cancel(mddev); 7409 else 7410 err = add_bound_rdev(rdev); 7411 } 7412 7413 } else if (!err) 7414 err = add_bound_rdev(rdev); 7415 7416 return err; 7417 } 7418 7419 /* otherwise, md_add_new_disk is only allowed 7420 * for major_version==0 superblocks 7421 */ 7422 if (mddev->major_version != 0) { 7423 pr_warn("%s: ADD_NEW_DISK not supported\n", mdname(mddev)); 7424 return -EINVAL; 7425 } 7426 7427 if (!(info->state & (1<<MD_DISK_FAULTY))) { 7428 int err; 7429 rdev = md_import_device(dev, -1, 0); 7430 if (IS_ERR(rdev)) { 7431 pr_warn("md: error, md_import_device() returned %ld\n", 7432 PTR_ERR(rdev)); 7433 return PTR_ERR(rdev); 7434 } 7435 rdev->desc_nr = info->number; 7436 if (info->raid_disk < mddev->raid_disks) 7437 rdev->raid_disk = info->raid_disk; 7438 else 7439 rdev->raid_disk = -1; 7440 7441 if (rdev->raid_disk < mddev->raid_disks) 7442 if (info->state & (1<<MD_DISK_SYNC)) 7443 set_bit(In_sync, &rdev->flags); 7444 7445 if (info->state & (1<<MD_DISK_WRITEMOSTLY)) 7446 set_bit(WriteMostly, &rdev->flags); 7447 if (info->state & (1<<MD_DISK_FAILFAST)) 7448 set_bit(FailFast, &rdev->flags); 7449 7450 if (!mddev->persistent) { 7451 pr_debug("md: nonpersistent superblock ...\n"); 7452 rdev->sb_start = bdev_nr_sectors(rdev->bdev); 7453 } else 7454 rdev->sb_start = calc_dev_sboffset(rdev); 7455 rdev->sectors = rdev->sb_start; 7456 7457 err = bind_rdev_to_array(rdev, mddev); 7458 if (err) { 7459 export_rdev(rdev, mddev); 7460 return err; 7461 } 7462 } 7463 7464 return 0; 7465 } 7466 7467 static int hot_remove_disk(struct mddev *mddev, dev_t dev) 7468 { 7469 struct md_rdev *rdev; 7470 7471 if (!mddev->pers) 7472 return -ENODEV; 7473 7474 rdev = find_rdev(mddev, dev); 7475 if (!rdev) 7476 return -ENXIO; 7477 7478 if (rdev->raid_disk < 0) 7479 goto kick_rdev; 7480 7481 clear_bit(Blocked, &rdev->flags); 7482 remove_and_add_spares(mddev, rdev); 7483 7484 if (rdev->raid_disk >= 0) 7485 goto busy; 7486 7487 kick_rdev: 7488 if (mddev_is_clustered(mddev) && 7489 mddev->cluster_ops->remove_disk(mddev, rdev)) 7490 goto busy; 7491 7492 md_kick_rdev_from_array(rdev); 7493 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 7494 if (!mddev->thread) 7495 md_update_sb(mddev, 1); 7496 md_new_event(); 7497 7498 return 0; 7499 busy: 7500 pr_debug("md: cannot remove active disk %pg from %s ...\n", 7501 rdev->bdev, mdname(mddev)); 7502 return -EBUSY; 7503 } 7504 7505 static int hot_add_disk(struct mddev *mddev, dev_t dev) 7506 { 7507 int err; 7508 struct md_rdev *rdev; 7509 7510 if (!mddev->pers) 7511 return -ENODEV; 7512 7513 if (mddev->major_version != 0) { 7514 pr_warn("%s: HOT_ADD may only be used with version-0 superblocks.\n", 7515 mdname(mddev)); 7516 return -EINVAL; 7517 } 7518 if (!mddev->pers->hot_add_disk) { 7519 pr_warn("%s: personality does not support diskops!\n", 7520 mdname(mddev)); 7521 return -EINVAL; 7522 } 7523 7524 rdev = md_import_device(dev, -1, 0); 7525 if (IS_ERR(rdev)) { 7526 pr_warn("md: error, md_import_device() returned %ld\n", 7527 PTR_ERR(rdev)); 7528 return -EINVAL; 7529 } 7530 7531 if (mddev->persistent) 7532 rdev->sb_start = calc_dev_sboffset(rdev); 7533 else 7534 rdev->sb_start = bdev_nr_sectors(rdev->bdev); 7535 7536 rdev->sectors = rdev->sb_start; 7537 7538 if (test_bit(Faulty, &rdev->flags)) { 7539 pr_warn("md: can not hot-add faulty %pg disk to %s!\n", 7540 rdev->bdev, mdname(mddev)); 7541 err = -EINVAL; 7542 goto abort_export; 7543 } 7544 7545 clear_bit(In_sync, &rdev->flags); 7546 rdev->desc_nr = -1; 7547 rdev->saved_raid_disk = -1; 7548 err = bind_rdev_to_array(rdev, mddev); 7549 if (err) 7550 goto abort_export; 7551 7552 /* 7553 * The rest should better be atomic, we can have disk failures 7554 * noticed in interrupt contexts ... 7555 */ 7556 7557 rdev->raid_disk = -1; 7558 7559 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 7560 if (!mddev->thread) 7561 md_update_sb(mddev, 1); 7562 /* 7563 * Kick recovery, maybe this spare has to be added to the 7564 * array immediately. 7565 */ 7566 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7567 md_new_event(); 7568 return 0; 7569 7570 abort_export: 7571 export_rdev(rdev, mddev); 7572 return err; 7573 } 7574 7575 static int set_bitmap_file(struct mddev *mddev, int fd) 7576 { 7577 int err = 0; 7578 7579 if (!md_bitmap_registered(mddev)) 7580 return -EINVAL; 7581 7582 if (mddev->pers) { 7583 if (!mddev->pers->quiesce || !mddev->thread) 7584 return -EBUSY; 7585 if (mddev->recovery || mddev->sync_thread) 7586 return -EBUSY; 7587 /* we should be able to change the bitmap.. */ 7588 } 7589 7590 if (fd >= 0) { 7591 struct inode *inode; 7592 struct file *f; 7593 7594 if (mddev->bitmap || mddev->bitmap_info.file) 7595 return -EEXIST; /* cannot add when bitmap is present */ 7596 7597 if (!IS_ENABLED(CONFIG_MD_BITMAP_FILE)) { 7598 pr_warn("%s: bitmap files not supported by this kernel\n", 7599 mdname(mddev)); 7600 return -EINVAL; 7601 } 7602 pr_warn("%s: using deprecated bitmap file support\n", 7603 mdname(mddev)); 7604 7605 f = fget(fd); 7606 7607 if (f == NULL) { 7608 pr_warn("%s: error: failed to get bitmap file\n", 7609 mdname(mddev)); 7610 return -EBADF; 7611 } 7612 7613 inode = f->f_mapping->host; 7614 if (!S_ISREG(inode->i_mode)) { 7615 pr_warn("%s: error: bitmap file must be a regular file\n", 7616 mdname(mddev)); 7617 err = -EBADF; 7618 } else if (!(f->f_mode & FMODE_WRITE)) { 7619 pr_warn("%s: error: bitmap file must open for write\n", 7620 mdname(mddev)); 7621 err = -EBADF; 7622 } else if (atomic_read(&inode->i_writecount) != 1) { 7623 pr_warn("%s: error: bitmap file is already in use\n", 7624 mdname(mddev)); 7625 err = -EBUSY; 7626 } 7627 if (err) { 7628 fput(f); 7629 return err; 7630 } 7631 mddev->bitmap_info.file = f; 7632 mddev->bitmap_info.offset = 0; /* file overrides offset */ 7633 } else if (mddev->bitmap == NULL) 7634 return -ENOENT; /* cannot remove what isn't there */ 7635 err = 0; 7636 if (mddev->pers) { 7637 if (fd >= 0) { 7638 err = md_bitmap_create(mddev); 7639 if (!err) 7640 err = mddev->bitmap_ops->load(mddev); 7641 7642 if (err) { 7643 md_bitmap_destroy(mddev); 7644 fd = -1; 7645 } 7646 } else if (fd < 0) { 7647 md_bitmap_destroy(mddev); 7648 } 7649 } 7650 7651 if (fd < 0) { 7652 struct file *f = mddev->bitmap_info.file; 7653 if (f) { 7654 spin_lock(&mddev->lock); 7655 mddev->bitmap_info.file = NULL; 7656 spin_unlock(&mddev->lock); 7657 fput(f); 7658 } 7659 } 7660 7661 return err; 7662 } 7663 7664 /* 7665 * md_set_array_info is used two different ways 7666 * The original usage is when creating a new array. 7667 * In this usage, raid_disks is > 0 and it together with 7668 * level, size, not_persistent,layout,chunksize determine the 7669 * shape of the array. 7670 * This will always create an array with a type-0.90.0 superblock. 7671 * The newer usage is when assembling an array. 7672 * In this case raid_disks will be 0, and the major_version field is 7673 * use to determine which style super-blocks are to be found on the devices. 7674 * The minor and patch _version numbers are also kept incase the 7675 * super_block handler wishes to interpret them. 7676 */ 7677 int md_set_array_info(struct mddev *mddev, struct mdu_array_info_s *info) 7678 { 7679 if (info->raid_disks == 0) { 7680 /* just setting version number for superblock loading */ 7681 if (info->major_version < 0 || 7682 info->major_version >= ARRAY_SIZE(super_types) || 7683 super_types[info->major_version].name == NULL) { 7684 /* maybe try to auto-load a module? */ 7685 pr_warn("md: superblock version %d not known\n", 7686 info->major_version); 7687 return -EINVAL; 7688 } 7689 mddev->major_version = info->major_version; 7690 mddev->minor_version = info->minor_version; 7691 mddev->patch_version = info->patch_version; 7692 mddev->persistent = !info->not_persistent; 7693 /* ensure mddev_put doesn't delete this now that there 7694 * is some minimal configuration. 7695 */ 7696 mddev->ctime = ktime_get_real_seconds(); 7697 return 0; 7698 } 7699 mddev->major_version = MD_MAJOR_VERSION; 7700 mddev->minor_version = MD_MINOR_VERSION; 7701 mddev->patch_version = MD_PATCHLEVEL_VERSION; 7702 mddev->ctime = ktime_get_real_seconds(); 7703 7704 mddev->level = info->level; 7705 mddev->clevel[0] = 0; 7706 mddev->dev_sectors = 2 * (sector_t)info->size; 7707 mddev->raid_disks = info->raid_disks; 7708 /* don't set md_minor, it is determined by which /dev/md* was 7709 * openned 7710 */ 7711 if (info->state & (1<<MD_SB_CLEAN)) 7712 mddev->resync_offset = MaxSector; 7713 else 7714 mddev->resync_offset = 0; 7715 mddev->persistent = ! info->not_persistent; 7716 mddev->external = 0; 7717 7718 mddev->layout = info->layout; 7719 if (mddev->level == 0) 7720 /* Cannot trust RAID0 layout info here */ 7721 mddev->layout = -1; 7722 mddev->chunk_sectors = info->chunk_size >> 9; 7723 7724 if (mddev->persistent) { 7725 mddev->max_disks = MD_SB_DISKS; 7726 mddev->flags = 0; 7727 mddev->sb_flags = 0; 7728 } 7729 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 7730 7731 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; 7732 mddev->bitmap_info.default_space = 64*2 - (MD_SB_BYTES >> 9); 7733 mddev->bitmap_info.offset = 0; 7734 7735 mddev->reshape_position = MaxSector; 7736 7737 /* 7738 * Generate a 128 bit UUID 7739 */ 7740 get_random_bytes(mddev->uuid, 16); 7741 7742 mddev->new_level = mddev->level; 7743 mddev->new_chunk_sectors = mddev->chunk_sectors; 7744 mddev->new_layout = mddev->layout; 7745 mddev->delta_disks = 0; 7746 mddev->reshape_backwards = 0; 7747 7748 return 0; 7749 } 7750 7751 void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors) 7752 { 7753 lockdep_assert_held(&mddev->reconfig_mutex); 7754 7755 if (mddev->external_size) 7756 return; 7757 7758 mddev->array_sectors = array_sectors; 7759 } 7760 EXPORT_SYMBOL(md_set_array_sectors); 7761 7762 static int update_size(struct mddev *mddev, sector_t num_sectors) 7763 { 7764 struct md_rdev *rdev; 7765 int rv; 7766 int fit = (num_sectors == 0); 7767 sector_t old_dev_sectors = mddev->dev_sectors; 7768 7769 if (mddev->pers->resize == NULL) 7770 return -EINVAL; 7771 /* The "num_sectors" is the number of sectors of each device that 7772 * is used. This can only make sense for arrays with redundancy. 7773 * linear and raid0 always use whatever space is available. We can only 7774 * consider changing this number if no resync or reconstruction is 7775 * happening, and if the new size is acceptable. It must fit before the 7776 * sb_start or, if that is <data_offset, it must fit before the size 7777 * of each device. If num_sectors is zero, we find the largest size 7778 * that fits. 7779 */ 7780 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 7781 return -EBUSY; 7782 if (!md_is_rdwr(mddev)) 7783 return -EROFS; 7784 7785 rdev_for_each(rdev, mddev) { 7786 sector_t avail = rdev->sectors; 7787 7788 if (fit && (num_sectors == 0 || num_sectors > avail)) 7789 num_sectors = avail; 7790 if (avail < num_sectors) 7791 return -ENOSPC; 7792 } 7793 rv = mddev->pers->resize(mddev, num_sectors); 7794 if (!rv) { 7795 if (mddev_is_clustered(mddev)) 7796 mddev->cluster_ops->update_size(mddev, old_dev_sectors); 7797 else if (!mddev_is_dm(mddev)) 7798 set_capacity_and_notify(mddev->gendisk, 7799 mddev->array_sectors); 7800 } 7801 return rv; 7802 } 7803 7804 static int update_raid_disks(struct mddev *mddev, int raid_disks) 7805 { 7806 int rv; 7807 struct md_rdev *rdev; 7808 /* change the number of raid disks */ 7809 if (mddev->pers->check_reshape == NULL) 7810 return -EINVAL; 7811 if (!md_is_rdwr(mddev)) 7812 return -EROFS; 7813 if (raid_disks <= 0 || 7814 (mddev->max_disks && raid_disks >= mddev->max_disks)) 7815 return -EINVAL; 7816 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 7817 test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) || 7818 mddev->reshape_position != MaxSector) 7819 return -EBUSY; 7820 7821 rdev_for_each(rdev, mddev) { 7822 if (mddev->raid_disks < raid_disks && 7823 rdev->data_offset < rdev->new_data_offset) 7824 return -EINVAL; 7825 if (mddev->raid_disks > raid_disks && 7826 rdev->data_offset > rdev->new_data_offset) 7827 return -EINVAL; 7828 } 7829 7830 mddev->delta_disks = raid_disks - mddev->raid_disks; 7831 if (mddev->delta_disks < 0) 7832 mddev->reshape_backwards = 1; 7833 else if (mddev->delta_disks > 0) 7834 mddev->reshape_backwards = 0; 7835 7836 rv = mddev->pers->check_reshape(mddev); 7837 if (rv < 0) { 7838 mddev->delta_disks = 0; 7839 mddev->reshape_backwards = 0; 7840 } 7841 return rv; 7842 } 7843 7844 static int get_cluster_ops(struct mddev *mddev) 7845 { 7846 xa_lock(&md_submodule); 7847 mddev->cluster_ops = xa_load(&md_submodule, ID_CLUSTER); 7848 if (mddev->cluster_ops && 7849 !try_module_get(mddev->cluster_ops->head.owner)) 7850 mddev->cluster_ops = NULL; 7851 xa_unlock(&md_submodule); 7852 7853 return mddev->cluster_ops == NULL ? -ENOENT : 0; 7854 } 7855 7856 static void put_cluster_ops(struct mddev *mddev) 7857 { 7858 if (!mddev->cluster_ops) 7859 return; 7860 7861 mddev->cluster_ops->leave(mddev); 7862 module_put(mddev->cluster_ops->head.owner); 7863 mddev->cluster_ops = NULL; 7864 } 7865 7866 /* 7867 * update_array_info is used to change the configuration of an 7868 * on-line array. 7869 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size 7870 * fields in the info are checked against the array. 7871 * Any differences that cannot be handled will cause an error. 7872 * Normally, only one change can be managed at a time. 7873 */ 7874 static int update_array_info(struct mddev *mddev, mdu_array_info_t *info) 7875 { 7876 int rv = 0; 7877 int cnt = 0; 7878 int state = 0; 7879 7880 /* calculate expected state,ignoring low bits */ 7881 if (mddev->bitmap && mddev->bitmap_info.offset) 7882 state |= (1 << MD_SB_BITMAP_PRESENT); 7883 7884 if (mddev->major_version != info->major_version || 7885 mddev->minor_version != info->minor_version || 7886 /* mddev->patch_version != info->patch_version || */ 7887 mddev->ctime != info->ctime || 7888 mddev->level != info->level || 7889 /* mddev->layout != info->layout || */ 7890 mddev->persistent != !info->not_persistent || 7891 mddev->chunk_sectors != info->chunk_size >> 9 || 7892 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */ 7893 ((state^info->state) & 0xfffffe00) 7894 ) 7895 return -EINVAL; 7896 /* Check there is only one change */ 7897 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) 7898 cnt++; 7899 if (mddev->raid_disks != info->raid_disks) 7900 cnt++; 7901 if (mddev->layout != info->layout) 7902 cnt++; 7903 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) 7904 cnt++; 7905 if (cnt == 0) 7906 return 0; 7907 if (cnt > 1) 7908 return -EINVAL; 7909 7910 if (mddev->layout != info->layout) { 7911 /* Change layout 7912 * we don't need to do anything at the md level, the 7913 * personality will take care of it all. 7914 */ 7915 if (mddev->pers->check_reshape == NULL) 7916 return -EINVAL; 7917 else { 7918 mddev->new_layout = info->layout; 7919 rv = mddev->pers->check_reshape(mddev); 7920 if (rv) 7921 mddev->new_layout = mddev->layout; 7922 return rv; 7923 } 7924 } 7925 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) 7926 rv = update_size(mddev, (sector_t)info->size * 2); 7927 7928 if (mddev->raid_disks != info->raid_disks) 7929 rv = update_raid_disks(mddev, info->raid_disks); 7930 7931 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) { 7932 if (mddev->pers->quiesce == NULL || mddev->thread == NULL) { 7933 rv = -EINVAL; 7934 goto err; 7935 } 7936 if (mddev->recovery || mddev->sync_thread) { 7937 rv = -EBUSY; 7938 goto err; 7939 } 7940 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) { 7941 /* add the bitmap */ 7942 if (mddev->bitmap) { 7943 rv = -EEXIST; 7944 goto err; 7945 } 7946 if (mddev->bitmap_info.default_offset == 0) { 7947 rv = -EINVAL; 7948 goto err; 7949 } 7950 mddev->bitmap_info.offset = 7951 mddev->bitmap_info.default_offset; 7952 mddev->bitmap_info.space = 7953 mddev->bitmap_info.default_space; 7954 rv = md_bitmap_create(mddev); 7955 if (!rv) 7956 rv = mddev->bitmap_ops->load(mddev); 7957 7958 if (rv) 7959 md_bitmap_destroy(mddev); 7960 } else { 7961 struct md_bitmap_stats stats; 7962 7963 rv = mddev->bitmap_ops->get_stats(mddev->bitmap, &stats); 7964 if (rv) 7965 goto err; 7966 7967 if (stats.file) { 7968 rv = -EINVAL; 7969 goto err; 7970 } 7971 7972 if (mddev->bitmap_info.nodes) { 7973 /* hold PW on all the bitmap lock */ 7974 if (mddev->cluster_ops->lock_all_bitmaps(mddev) <= 0) { 7975 pr_warn("md: can't change bitmap to none since the array is in use by more than one node\n"); 7976 rv = -EPERM; 7977 mddev->cluster_ops->unlock_all_bitmaps(mddev); 7978 goto err; 7979 } 7980 7981 mddev->bitmap_info.nodes = 0; 7982 put_cluster_ops(mddev); 7983 mddev->safemode_delay = DEFAULT_SAFEMODE_DELAY; 7984 } 7985 md_bitmap_destroy(mddev); 7986 mddev->bitmap_info.offset = 0; 7987 } 7988 } 7989 md_update_sb(mddev, 1); 7990 return rv; 7991 err: 7992 return rv; 7993 } 7994 7995 static int set_disk_faulty(struct mddev *mddev, dev_t dev) 7996 { 7997 struct md_rdev *rdev; 7998 int err = 0; 7999 8000 if (mddev->pers == NULL) 8001 return -ENODEV; 8002 8003 rcu_read_lock(); 8004 rdev = md_find_rdev_rcu(mddev, dev); 8005 if (!rdev) 8006 err = -ENODEV; 8007 else { 8008 md_error(mddev, rdev); 8009 if (test_bit(MD_BROKEN, &mddev->flags)) 8010 err = -EBUSY; 8011 } 8012 rcu_read_unlock(); 8013 return err; 8014 } 8015 8016 /* 8017 * We have a problem here : there is no easy way to give a CHS 8018 * virtual geometry. We currently pretend that we have a 2 heads 8019 * 4 sectors (with a BIG number of cylinders...). This drives 8020 * dosfs just mad... ;-) 8021 */ 8022 static int md_getgeo(struct gendisk *disk, struct hd_geometry *geo) 8023 { 8024 struct mddev *mddev = disk->private_data; 8025 8026 geo->heads = 2; 8027 geo->sectors = 4; 8028 geo->cylinders = mddev->array_sectors / 8; 8029 return 0; 8030 } 8031 8032 static inline int md_ioctl_valid(unsigned int cmd) 8033 { 8034 switch (cmd) { 8035 case GET_ARRAY_INFO: 8036 case GET_DISK_INFO: 8037 case RAID_VERSION: 8038 return 0; 8039 case ADD_NEW_DISK: 8040 case GET_BITMAP_FILE: 8041 case HOT_ADD_DISK: 8042 case HOT_REMOVE_DISK: 8043 case RESTART_ARRAY_RW: 8044 case RUN_ARRAY: 8045 case SET_ARRAY_INFO: 8046 case SET_BITMAP_FILE: 8047 case SET_DISK_FAULTY: 8048 case STOP_ARRAY: 8049 case STOP_ARRAY_RO: 8050 case CLUSTERED_DISK_NACK: 8051 if (!capable(CAP_SYS_ADMIN)) 8052 return -EACCES; 8053 return 0; 8054 default: 8055 return -ENOTTY; 8056 } 8057 } 8058 8059 static bool md_ioctl_need_suspend(unsigned int cmd) 8060 { 8061 switch (cmd) { 8062 case ADD_NEW_DISK: 8063 case HOT_ADD_DISK: 8064 case HOT_REMOVE_DISK: 8065 case SET_BITMAP_FILE: 8066 case SET_ARRAY_INFO: 8067 return true; 8068 default: 8069 return false; 8070 } 8071 } 8072 8073 static int __md_set_array_info(struct mddev *mddev, void __user *argp) 8074 { 8075 mdu_array_info_t info; 8076 int err; 8077 8078 if (!argp) 8079 memset(&info, 0, sizeof(info)); 8080 else if (copy_from_user(&info, argp, sizeof(info))) 8081 return -EFAULT; 8082 8083 if (mddev->pers) { 8084 err = update_array_info(mddev, &info); 8085 if (err) 8086 pr_warn("md: couldn't update array info. %d\n", err); 8087 return err; 8088 } 8089 8090 if (!list_empty(&mddev->disks)) { 8091 pr_warn("md: array %s already has disks!\n", mdname(mddev)); 8092 return -EBUSY; 8093 } 8094 8095 if (mddev->raid_disks) { 8096 pr_warn("md: array %s already initialised!\n", mdname(mddev)); 8097 return -EBUSY; 8098 } 8099 8100 err = md_set_array_info(mddev, &info); 8101 if (err) 8102 pr_warn("md: couldn't set array info. %d\n", err); 8103 8104 return err; 8105 } 8106 8107 static int md_ioctl(struct block_device *bdev, blk_mode_t mode, 8108 unsigned int cmd, unsigned long arg) 8109 { 8110 int err = 0; 8111 void __user *argp = (void __user *)arg; 8112 struct mddev *mddev = NULL; 8113 8114 err = md_ioctl_valid(cmd); 8115 if (err) 8116 return err; 8117 8118 /* 8119 * Commands dealing with the RAID driver but not any 8120 * particular array: 8121 */ 8122 if (cmd == RAID_VERSION) 8123 return get_version(argp); 8124 8125 /* 8126 * Commands creating/starting a new array: 8127 */ 8128 8129 mddev = bdev->bd_disk->private_data; 8130 8131 /* Some actions do not requires the mutex */ 8132 switch (cmd) { 8133 case GET_ARRAY_INFO: 8134 if (!mddev->raid_disks && !mddev->external) 8135 return -ENODEV; 8136 return get_array_info(mddev, argp); 8137 8138 case GET_DISK_INFO: 8139 if (!mddev->raid_disks && !mddev->external) 8140 return -ENODEV; 8141 return get_disk_info(mddev, argp); 8142 8143 case SET_DISK_FAULTY: 8144 return set_disk_faulty(mddev, new_decode_dev(arg)); 8145 8146 case GET_BITMAP_FILE: 8147 return get_bitmap_file(mddev, argp); 8148 } 8149 8150 if (cmd == STOP_ARRAY || cmd == STOP_ARRAY_RO) { 8151 /* Need to flush page cache, and ensure no-one else opens 8152 * and writes 8153 */ 8154 err = mddev_set_closing_and_sync_blockdev(mddev, 1); 8155 if (err) 8156 return err; 8157 } 8158 8159 if (!md_is_rdwr(mddev)) 8160 flush_work(&mddev->sync_work); 8161 8162 err = md_ioctl_need_suspend(cmd) ? mddev_suspend_and_lock(mddev) : 8163 mddev_lock(mddev); 8164 if (err) { 8165 pr_debug("md: ioctl lock interrupted, reason %d, cmd %d\n", 8166 err, cmd); 8167 goto out; 8168 } 8169 8170 if (cmd == SET_ARRAY_INFO) { 8171 err = __md_set_array_info(mddev, argp); 8172 goto unlock; 8173 } 8174 8175 /* 8176 * Commands querying/configuring an existing array: 8177 */ 8178 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY, 8179 * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */ 8180 if ((!mddev->raid_disks && !mddev->external) 8181 && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY 8182 && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE 8183 && cmd != GET_BITMAP_FILE) { 8184 err = -ENODEV; 8185 goto unlock; 8186 } 8187 8188 /* 8189 * Commands even a read-only array can execute: 8190 */ 8191 switch (cmd) { 8192 case RESTART_ARRAY_RW: 8193 err = restart_array(mddev); 8194 goto unlock; 8195 8196 case STOP_ARRAY: 8197 err = do_md_stop(mddev, 0); 8198 goto unlock; 8199 8200 case STOP_ARRAY_RO: 8201 if (mddev->pers) 8202 err = md_set_readonly(mddev); 8203 goto unlock; 8204 8205 case HOT_REMOVE_DISK: 8206 err = hot_remove_disk(mddev, new_decode_dev(arg)); 8207 goto unlock; 8208 8209 case ADD_NEW_DISK: 8210 /* We can support ADD_NEW_DISK on read-only arrays 8211 * only if we are re-adding a preexisting device. 8212 * So require mddev->pers and MD_DISK_SYNC. 8213 */ 8214 if (mddev->pers) { 8215 mdu_disk_info_t info; 8216 if (copy_from_user(&info, argp, sizeof(info))) 8217 err = -EFAULT; 8218 else if (!(info.state & (1<<MD_DISK_SYNC))) 8219 /* Need to clear read-only for this */ 8220 break; 8221 else 8222 err = md_add_new_disk(mddev, &info); 8223 goto unlock; 8224 } 8225 break; 8226 } 8227 8228 /* 8229 * The remaining ioctls are changing the state of the 8230 * superblock, so we do not allow them on read-only arrays. 8231 */ 8232 if (!md_is_rdwr(mddev) && mddev->pers) { 8233 if (mddev->ro != MD_AUTO_READ) { 8234 err = -EROFS; 8235 goto unlock; 8236 } 8237 mddev->ro = MD_RDWR; 8238 sysfs_notify_dirent_safe(mddev->sysfs_state); 8239 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 8240 /* mddev_unlock will wake thread */ 8241 /* If a device failed while we were read-only, we 8242 * need to make sure the metadata is updated now. 8243 */ 8244 if (test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags)) { 8245 mddev_unlock(mddev); 8246 wait_event(mddev->sb_wait, 8247 !test_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags) && 8248 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); 8249 mddev_lock_nointr(mddev); 8250 } 8251 } 8252 8253 switch (cmd) { 8254 case ADD_NEW_DISK: 8255 { 8256 mdu_disk_info_t info; 8257 if (copy_from_user(&info, argp, sizeof(info))) 8258 err = -EFAULT; 8259 else 8260 err = md_add_new_disk(mddev, &info); 8261 goto unlock; 8262 } 8263 8264 case CLUSTERED_DISK_NACK: 8265 if (mddev_is_clustered(mddev)) 8266 mddev->cluster_ops->new_disk_ack(mddev, false); 8267 else 8268 err = -EINVAL; 8269 goto unlock; 8270 8271 case HOT_ADD_DISK: 8272 err = hot_add_disk(mddev, new_decode_dev(arg)); 8273 goto unlock; 8274 8275 case RUN_ARRAY: 8276 err = do_md_run(mddev); 8277 goto unlock; 8278 8279 case SET_BITMAP_FILE: 8280 err = set_bitmap_file(mddev, (int)arg); 8281 goto unlock; 8282 8283 default: 8284 err = -EINVAL; 8285 goto unlock; 8286 } 8287 8288 unlock: 8289 if (mddev->hold_active == UNTIL_IOCTL && 8290 err != -EINVAL) 8291 mddev->hold_active = 0; 8292 8293 md_ioctl_need_suspend(cmd) ? mddev_unlock_and_resume(mddev) : 8294 mddev_unlock(mddev); 8295 8296 out: 8297 if (cmd == STOP_ARRAY_RO || (err && cmd == STOP_ARRAY)) 8298 clear_bit(MD_CLOSING, &mddev->flags); 8299 return err; 8300 } 8301 #ifdef CONFIG_COMPAT 8302 static int md_compat_ioctl(struct block_device *bdev, blk_mode_t mode, 8303 unsigned int cmd, unsigned long arg) 8304 { 8305 switch (cmd) { 8306 case HOT_REMOVE_DISK: 8307 case HOT_ADD_DISK: 8308 case SET_DISK_FAULTY: 8309 case SET_BITMAP_FILE: 8310 /* These take in integer arg, do not convert */ 8311 break; 8312 default: 8313 arg = (unsigned long)compat_ptr(arg); 8314 break; 8315 } 8316 8317 return md_ioctl(bdev, mode, cmd, arg); 8318 } 8319 #endif /* CONFIG_COMPAT */ 8320 8321 static int md_set_read_only(struct block_device *bdev, bool ro) 8322 { 8323 struct mddev *mddev = bdev->bd_disk->private_data; 8324 int err; 8325 8326 err = mddev_lock(mddev); 8327 if (err) 8328 return err; 8329 8330 if (!mddev->raid_disks && !mddev->external) { 8331 err = -ENODEV; 8332 goto out_unlock; 8333 } 8334 8335 /* 8336 * Transitioning to read-auto need only happen for arrays that call 8337 * md_write_start and which are not ready for writes yet. 8338 */ 8339 if (!ro && mddev->ro == MD_RDONLY && mddev->pers) { 8340 err = restart_array(mddev); 8341 if (err) 8342 goto out_unlock; 8343 mddev->ro = MD_AUTO_READ; 8344 } 8345 8346 out_unlock: 8347 mddev_unlock(mddev); 8348 return err; 8349 } 8350 8351 static int md_open(struct gendisk *disk, blk_mode_t mode) 8352 { 8353 struct mddev *mddev; 8354 int err; 8355 8356 spin_lock(&all_mddevs_lock); 8357 mddev = mddev_get(disk->private_data); 8358 spin_unlock(&all_mddevs_lock); 8359 if (!mddev) 8360 return -ENODEV; 8361 8362 err = mutex_lock_interruptible(&mddev->open_mutex); 8363 if (err) 8364 goto out; 8365 8366 err = -ENODEV; 8367 if (test_bit(MD_CLOSING, &mddev->flags)) 8368 goto out_unlock; 8369 8370 atomic_inc(&mddev->openers); 8371 mutex_unlock(&mddev->open_mutex); 8372 8373 disk_check_media_change(disk); 8374 return 0; 8375 8376 out_unlock: 8377 mutex_unlock(&mddev->open_mutex); 8378 out: 8379 mddev_put(mddev); 8380 return err; 8381 } 8382 8383 static void md_release(struct gendisk *disk) 8384 { 8385 struct mddev *mddev = disk->private_data; 8386 8387 BUG_ON(!mddev); 8388 atomic_dec(&mddev->openers); 8389 mddev_put(mddev); 8390 } 8391 8392 static unsigned int md_check_events(struct gendisk *disk, unsigned int clearing) 8393 { 8394 struct mddev *mddev = disk->private_data; 8395 unsigned int ret = 0; 8396 8397 if (mddev->changed) 8398 ret = DISK_EVENT_MEDIA_CHANGE; 8399 mddev->changed = 0; 8400 return ret; 8401 } 8402 8403 static void md_free_disk(struct gendisk *disk) 8404 { 8405 struct mddev *mddev = disk->private_data; 8406 8407 mddev_free(mddev); 8408 } 8409 8410 const struct block_device_operations md_fops = 8411 { 8412 .owner = THIS_MODULE, 8413 .submit_bio = md_submit_bio, 8414 .open = md_open, 8415 .release = md_release, 8416 .ioctl = md_ioctl, 8417 #ifdef CONFIG_COMPAT 8418 .compat_ioctl = md_compat_ioctl, 8419 #endif 8420 .getgeo = md_getgeo, 8421 .check_events = md_check_events, 8422 .set_read_only = md_set_read_only, 8423 .free_disk = md_free_disk, 8424 }; 8425 8426 static int md_thread(void *arg) 8427 { 8428 struct md_thread *thread = arg; 8429 8430 /* 8431 * md_thread is a 'system-thread', it's priority should be very 8432 * high. We avoid resource deadlocks individually in each 8433 * raid personality. (RAID5 does preallocation) We also use RR and 8434 * the very same RT priority as kswapd, thus we will never get 8435 * into a priority inversion deadlock. 8436 * 8437 * we definitely have to have equal or higher priority than 8438 * bdflush, otherwise bdflush will deadlock if there are too 8439 * many dirty RAID5 blocks. 8440 */ 8441 8442 allow_signal(SIGKILL); 8443 while (!kthread_should_stop()) { 8444 8445 /* We need to wait INTERRUPTIBLE so that 8446 * we don't add to the load-average. 8447 * That means we need to be sure no signals are 8448 * pending 8449 */ 8450 if (signal_pending(current)) 8451 flush_signals(current); 8452 8453 wait_event_interruptible_timeout 8454 (thread->wqueue, 8455 test_bit(THREAD_WAKEUP, &thread->flags) 8456 || kthread_should_stop() || kthread_should_park(), 8457 thread->timeout); 8458 8459 clear_bit(THREAD_WAKEUP, &thread->flags); 8460 if (kthread_should_park()) 8461 kthread_parkme(); 8462 if (!kthread_should_stop()) 8463 thread->run(thread); 8464 } 8465 8466 return 0; 8467 } 8468 8469 static void md_wakeup_thread_directly(struct md_thread __rcu **thread) 8470 { 8471 struct md_thread *t; 8472 8473 rcu_read_lock(); 8474 t = rcu_dereference(*thread); 8475 if (t) 8476 wake_up_process(t->tsk); 8477 rcu_read_unlock(); 8478 } 8479 8480 void __md_wakeup_thread(struct md_thread __rcu *thread) 8481 { 8482 struct md_thread *t; 8483 8484 t = rcu_dereference(thread); 8485 if (t) { 8486 pr_debug("md: waking up MD thread %s.\n", t->tsk->comm); 8487 set_bit(THREAD_WAKEUP, &t->flags); 8488 if (wq_has_sleeper(&t->wqueue)) 8489 wake_up(&t->wqueue); 8490 } 8491 } 8492 EXPORT_SYMBOL(__md_wakeup_thread); 8493 8494 struct md_thread *md_register_thread(void (*run) (struct md_thread *), 8495 struct mddev *mddev, const char *name) 8496 { 8497 struct md_thread *thread; 8498 8499 thread = kzalloc(sizeof(struct md_thread), GFP_KERNEL); 8500 if (!thread) 8501 return NULL; 8502 8503 init_waitqueue_head(&thread->wqueue); 8504 8505 thread->run = run; 8506 thread->mddev = mddev; 8507 thread->timeout = MAX_SCHEDULE_TIMEOUT; 8508 thread->tsk = kthread_run(md_thread, thread, 8509 "%s_%s", 8510 mdname(thread->mddev), 8511 name); 8512 if (IS_ERR(thread->tsk)) { 8513 kfree(thread); 8514 return NULL; 8515 } 8516 return thread; 8517 } 8518 EXPORT_SYMBOL(md_register_thread); 8519 8520 void md_unregister_thread(struct mddev *mddev, struct md_thread __rcu **threadp) 8521 { 8522 struct md_thread *thread = rcu_dereference_protected(*threadp, 8523 lockdep_is_held(&mddev->reconfig_mutex)); 8524 8525 if (!thread) 8526 return; 8527 8528 rcu_assign_pointer(*threadp, NULL); 8529 synchronize_rcu(); 8530 8531 pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk)); 8532 kthread_stop(thread->tsk); 8533 kfree(thread); 8534 } 8535 EXPORT_SYMBOL(md_unregister_thread); 8536 8537 void md_error(struct mddev *mddev, struct md_rdev *rdev) 8538 { 8539 if (!rdev || test_bit(Faulty, &rdev->flags)) 8540 return; 8541 8542 if (!mddev->pers || !mddev->pers->error_handler) 8543 return; 8544 mddev->pers->error_handler(mddev, rdev); 8545 8546 if (mddev->pers->head.id == ID_RAID0 || 8547 mddev->pers->head.id == ID_LINEAR) 8548 return; 8549 8550 if (mddev->degraded && !test_bit(MD_BROKEN, &mddev->flags)) 8551 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 8552 sysfs_notify_dirent_safe(rdev->sysfs_state); 8553 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 8554 if (!test_bit(MD_BROKEN, &mddev->flags)) { 8555 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 8556 md_wakeup_thread(mddev->thread); 8557 } 8558 if (mddev->event_work.func) 8559 queue_work(md_misc_wq, &mddev->event_work); 8560 md_new_event(); 8561 } 8562 EXPORT_SYMBOL(md_error); 8563 8564 /* seq_file implementation /proc/mdstat */ 8565 8566 static void status_unused(struct seq_file *seq) 8567 { 8568 int i = 0; 8569 struct md_rdev *rdev; 8570 8571 seq_printf(seq, "unused devices: "); 8572 8573 list_for_each_entry(rdev, &pending_raid_disks, same_set) { 8574 i++; 8575 seq_printf(seq, "%pg ", rdev->bdev); 8576 } 8577 if (!i) 8578 seq_printf(seq, "<none>"); 8579 8580 seq_printf(seq, "\n"); 8581 } 8582 8583 static void status_personalities(struct seq_file *seq) 8584 { 8585 struct md_submodule_head *head; 8586 unsigned long i; 8587 8588 seq_puts(seq, "Personalities : "); 8589 8590 xa_lock(&md_submodule); 8591 xa_for_each(&md_submodule, i, head) 8592 if (head->type == MD_PERSONALITY) 8593 seq_printf(seq, "[%s] ", head->name); 8594 xa_unlock(&md_submodule); 8595 8596 seq_puts(seq, "\n"); 8597 } 8598 8599 static int status_resync(struct seq_file *seq, struct mddev *mddev) 8600 { 8601 sector_t max_sectors, resync, res; 8602 unsigned long dt, db = 0; 8603 sector_t rt, curr_mark_cnt, resync_mark_cnt; 8604 int scale, recovery_active; 8605 unsigned int per_milli; 8606 8607 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || 8608 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 8609 max_sectors = mddev->resync_max_sectors; 8610 else 8611 max_sectors = mddev->dev_sectors; 8612 8613 resync = mddev->curr_resync; 8614 if (resync < MD_RESYNC_ACTIVE) { 8615 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) 8616 /* Still cleaning up */ 8617 resync = max_sectors; 8618 } else if (resync > max_sectors) { 8619 resync = max_sectors; 8620 } else { 8621 res = atomic_read(&mddev->recovery_active); 8622 /* 8623 * Resync has started, but the subtraction has overflowed or 8624 * yielded one of the special values. Force it to active to 8625 * ensure the status reports an active resync. 8626 */ 8627 if (resync < res || resync - res < MD_RESYNC_ACTIVE) 8628 resync = MD_RESYNC_ACTIVE; 8629 else 8630 resync -= res; 8631 } 8632 8633 if (resync == MD_RESYNC_NONE) { 8634 if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery)) { 8635 struct md_rdev *rdev; 8636 8637 rdev_for_each(rdev, mddev) 8638 if (rdev->raid_disk >= 0 && 8639 !test_bit(Faulty, &rdev->flags) && 8640 rdev->recovery_offset != MaxSector && 8641 rdev->recovery_offset) { 8642 seq_printf(seq, "\trecover=REMOTE"); 8643 return 1; 8644 } 8645 if (mddev->reshape_position != MaxSector) 8646 seq_printf(seq, "\treshape=REMOTE"); 8647 else 8648 seq_printf(seq, "\tresync=REMOTE"); 8649 return 1; 8650 } 8651 if (mddev->resync_offset < MaxSector) { 8652 seq_printf(seq, "\tresync=PENDING"); 8653 return 1; 8654 } 8655 return 0; 8656 } 8657 if (resync < MD_RESYNC_ACTIVE) { 8658 seq_printf(seq, "\tresync=DELAYED"); 8659 return 1; 8660 } 8661 8662 WARN_ON(max_sectors == 0); 8663 /* Pick 'scale' such that (resync>>scale)*1000 will fit 8664 * in a sector_t, and (max_sectors>>scale) will fit in a 8665 * u32, as those are the requirements for sector_div. 8666 * Thus 'scale' must be at least 10 8667 */ 8668 scale = 10; 8669 if (sizeof(sector_t) > sizeof(unsigned long)) { 8670 while ( max_sectors/2 > (1ULL<<(scale+32))) 8671 scale++; 8672 } 8673 res = (resync>>scale)*1000; 8674 sector_div(res, (u32)((max_sectors>>scale)+1)); 8675 8676 per_milli = res; 8677 { 8678 int i, x = per_milli/50, y = 20-x; 8679 seq_printf(seq, "["); 8680 for (i = 0; i < x; i++) 8681 seq_printf(seq, "="); 8682 seq_printf(seq, ">"); 8683 for (i = 0; i < y; i++) 8684 seq_printf(seq, "."); 8685 seq_printf(seq, "] "); 8686 } 8687 seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)", 8688 (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)? 8689 "reshape" : 8690 (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)? 8691 "check" : 8692 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ? 8693 "resync" : "recovery"))), 8694 per_milli/10, per_milli % 10, 8695 (unsigned long long) resync/2, 8696 (unsigned long long) max_sectors/2); 8697 8698 /* 8699 * dt: time from mark until now 8700 * db: blocks written from mark until now 8701 * rt: remaining time 8702 * 8703 * rt is a sector_t, which is always 64bit now. We are keeping 8704 * the original algorithm, but it is not really necessary. 8705 * 8706 * Original algorithm: 8707 * So we divide before multiply in case it is 32bit and close 8708 * to the limit. 8709 * We scale the divisor (db) by 32 to avoid losing precision 8710 * near the end of resync when the number of remaining sectors 8711 * is close to 'db'. 8712 * We then divide rt by 32 after multiplying by db to compensate. 8713 * The '+1' avoids division by zero if db is very small. 8714 */ 8715 dt = ((jiffies - mddev->resync_mark) / HZ); 8716 if (!dt) dt++; 8717 8718 curr_mark_cnt = mddev->curr_mark_cnt; 8719 recovery_active = atomic_read(&mddev->recovery_active); 8720 resync_mark_cnt = mddev->resync_mark_cnt; 8721 8722 if (curr_mark_cnt >= (recovery_active + resync_mark_cnt)) 8723 db = curr_mark_cnt - (recovery_active + resync_mark_cnt); 8724 8725 rt = max_sectors - resync; /* number of remaining sectors */ 8726 rt = div64_u64(rt, db/32+1); 8727 rt *= dt; 8728 rt >>= 5; 8729 8730 seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60, 8731 ((unsigned long)rt % 60)/6); 8732 8733 seq_printf(seq, " speed=%ldK/sec", db/2/dt); 8734 return 1; 8735 } 8736 8737 static void *md_seq_start(struct seq_file *seq, loff_t *pos) 8738 __acquires(&all_mddevs_lock) 8739 { 8740 seq->poll_event = atomic_read(&md_event_count); 8741 spin_lock(&all_mddevs_lock); 8742 8743 return seq_list_start_head(&all_mddevs, *pos); 8744 } 8745 8746 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos) 8747 { 8748 return seq_list_next(v, &all_mddevs, pos); 8749 } 8750 8751 static void md_seq_stop(struct seq_file *seq, void *v) 8752 __releases(&all_mddevs_lock) 8753 { 8754 spin_unlock(&all_mddevs_lock); 8755 } 8756 8757 static void md_bitmap_status(struct seq_file *seq, struct mddev *mddev) 8758 { 8759 struct md_bitmap_stats stats; 8760 unsigned long used_pages; 8761 unsigned long chunk_kb; 8762 int err; 8763 8764 if (!md_bitmap_enabled(mddev, false)) 8765 return; 8766 8767 err = mddev->bitmap_ops->get_stats(mddev->bitmap, &stats); 8768 if (err) 8769 return; 8770 8771 chunk_kb = mddev->bitmap_info.chunksize >> 10; 8772 used_pages = stats.pages - stats.missing_pages; 8773 8774 seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], %lu%s chunk", 8775 used_pages, stats.pages, used_pages << (PAGE_SHIFT - 10), 8776 chunk_kb ? chunk_kb : mddev->bitmap_info.chunksize, 8777 chunk_kb ? "KB" : "B"); 8778 8779 if (stats.file) { 8780 seq_puts(seq, ", file: "); 8781 seq_file_path(seq, stats.file, " \t\n"); 8782 } 8783 8784 seq_putc(seq, '\n'); 8785 } 8786 8787 static int md_seq_show(struct seq_file *seq, void *v) 8788 { 8789 struct mddev *mddev; 8790 sector_t sectors; 8791 struct md_rdev *rdev; 8792 8793 if (v == &all_mddevs) { 8794 status_personalities(seq); 8795 if (list_empty(&all_mddevs)) 8796 status_unused(seq); 8797 return 0; 8798 } 8799 8800 mddev = list_entry(v, struct mddev, all_mddevs); 8801 if (!mddev_get(mddev)) 8802 return 0; 8803 8804 spin_unlock(&all_mddevs_lock); 8805 8806 /* prevent bitmap to be freed after checking */ 8807 mutex_lock(&mddev->bitmap_info.mutex); 8808 8809 spin_lock(&mddev->lock); 8810 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) { 8811 seq_printf(seq, "%s : ", mdname(mddev)); 8812 if (mddev->pers) { 8813 if (test_bit(MD_BROKEN, &mddev->flags)) 8814 seq_printf(seq, "broken"); 8815 else 8816 seq_printf(seq, "active"); 8817 if (mddev->ro == MD_RDONLY) 8818 seq_printf(seq, " (read-only)"); 8819 if (mddev->ro == MD_AUTO_READ) 8820 seq_printf(seq, " (auto-read-only)"); 8821 seq_printf(seq, " %s", mddev->pers->head.name); 8822 } else { 8823 seq_printf(seq, "inactive"); 8824 } 8825 8826 sectors = 0; 8827 rcu_read_lock(); 8828 rdev_for_each_rcu(rdev, mddev) { 8829 seq_printf(seq, " %pg[%d]", rdev->bdev, rdev->desc_nr); 8830 8831 if (test_bit(WriteMostly, &rdev->flags)) 8832 seq_printf(seq, "(W)"); 8833 if (test_bit(Journal, &rdev->flags)) 8834 seq_printf(seq, "(J)"); 8835 if (test_bit(Faulty, &rdev->flags)) { 8836 seq_printf(seq, "(F)"); 8837 continue; 8838 } 8839 if (rdev->raid_disk < 0) 8840 seq_printf(seq, "(S)"); /* spare */ 8841 if (test_bit(Replacement, &rdev->flags)) 8842 seq_printf(seq, "(R)"); 8843 sectors += rdev->sectors; 8844 } 8845 rcu_read_unlock(); 8846 8847 if (!list_empty(&mddev->disks)) { 8848 if (mddev->pers) 8849 seq_printf(seq, "\n %llu blocks", 8850 (unsigned long long) 8851 mddev->array_sectors / 2); 8852 else 8853 seq_printf(seq, "\n %llu blocks", 8854 (unsigned long long)sectors / 2); 8855 } 8856 if (mddev->persistent) { 8857 if (mddev->major_version != 0 || 8858 mddev->minor_version != 90) { 8859 seq_printf(seq," super %d.%d", 8860 mddev->major_version, 8861 mddev->minor_version); 8862 } 8863 } else if (mddev->external) 8864 seq_printf(seq, " super external:%s", 8865 mddev->metadata_type); 8866 else 8867 seq_printf(seq, " super non-persistent"); 8868 8869 if (mddev->pers) { 8870 mddev->pers->status(seq, mddev); 8871 seq_printf(seq, "\n "); 8872 if (mddev->pers->sync_request) { 8873 if (status_resync(seq, mddev)) 8874 seq_printf(seq, "\n "); 8875 } 8876 } else 8877 seq_printf(seq, "\n "); 8878 8879 md_bitmap_status(seq, mddev); 8880 8881 seq_printf(seq, "\n"); 8882 } 8883 spin_unlock(&mddev->lock); 8884 mutex_unlock(&mddev->bitmap_info.mutex); 8885 spin_lock(&all_mddevs_lock); 8886 8887 if (mddev == list_last_entry(&all_mddevs, struct mddev, all_mddevs)) 8888 status_unused(seq); 8889 8890 mddev_put_locked(mddev); 8891 return 0; 8892 } 8893 8894 static const struct seq_operations md_seq_ops = { 8895 .start = md_seq_start, 8896 .next = md_seq_next, 8897 .stop = md_seq_stop, 8898 .show = md_seq_show, 8899 }; 8900 8901 static int md_seq_open(struct inode *inode, struct file *file) 8902 { 8903 struct seq_file *seq; 8904 int error; 8905 8906 error = seq_open(file, &md_seq_ops); 8907 if (error) 8908 return error; 8909 8910 seq = file->private_data; 8911 seq->poll_event = atomic_read(&md_event_count); 8912 return error; 8913 } 8914 8915 static int md_unloading; 8916 static __poll_t mdstat_poll(struct file *filp, poll_table *wait) 8917 { 8918 struct seq_file *seq = filp->private_data; 8919 __poll_t mask; 8920 8921 if (md_unloading) 8922 return EPOLLIN|EPOLLRDNORM|EPOLLERR|EPOLLPRI; 8923 poll_wait(filp, &md_event_waiters, wait); 8924 8925 /* always allow read */ 8926 mask = EPOLLIN | EPOLLRDNORM; 8927 8928 if (seq->poll_event != atomic_read(&md_event_count)) 8929 mask |= EPOLLERR | EPOLLPRI; 8930 return mask; 8931 } 8932 8933 static const struct proc_ops mdstat_proc_ops = { 8934 .proc_open = md_seq_open, 8935 .proc_read = seq_read, 8936 .proc_lseek = seq_lseek, 8937 .proc_release = seq_release, 8938 .proc_poll = mdstat_poll, 8939 }; 8940 8941 int register_md_submodule(struct md_submodule_head *msh) 8942 { 8943 return xa_insert(&md_submodule, msh->id, msh, GFP_KERNEL); 8944 } 8945 EXPORT_SYMBOL_GPL(register_md_submodule); 8946 8947 void unregister_md_submodule(struct md_submodule_head *msh) 8948 { 8949 xa_erase(&md_submodule, msh->id); 8950 } 8951 EXPORT_SYMBOL_GPL(unregister_md_submodule); 8952 8953 int md_setup_cluster(struct mddev *mddev, int nodes) 8954 { 8955 int ret = get_cluster_ops(mddev); 8956 8957 if (ret) { 8958 request_module("md-cluster"); 8959 ret = get_cluster_ops(mddev); 8960 } 8961 8962 /* ensure module won't be unloaded */ 8963 if (ret) { 8964 pr_warn("can't find md-cluster module or get its reference.\n"); 8965 return ret; 8966 } 8967 8968 ret = mddev->cluster_ops->join(mddev, nodes); 8969 if (!ret) 8970 mddev->safemode_delay = 0; 8971 return ret; 8972 } 8973 8974 void md_cluster_stop(struct mddev *mddev) 8975 { 8976 put_cluster_ops(mddev); 8977 } 8978 8979 static bool is_rdev_holder_idle(struct md_rdev *rdev, bool init) 8980 { 8981 unsigned long last_events = rdev->last_events; 8982 8983 if (!bdev_is_partition(rdev->bdev)) 8984 return true; 8985 8986 /* 8987 * If rdev is partition, and user doesn't issue IO to the array, the 8988 * array is still not idle if user issues IO to other partitions. 8989 */ 8990 rdev->last_events = part_stat_read_accum(rdev->bdev->bd_disk->part0, 8991 sectors) - 8992 part_stat_read_accum(rdev->bdev, sectors); 8993 8994 return init || rdev->last_events <= last_events; 8995 } 8996 8997 /* 8998 * mddev is idle if following conditions are matched since last check: 8999 * 1) mddev doesn't have normal IO completed; 9000 * 2) mddev doesn't have inflight normal IO; 9001 * 3) if any member disk is partition, and other partitions don't have IO 9002 * completed; 9003 * 9004 * Noted this checking rely on IO accounting is enabled. 9005 */ 9006 static bool is_mddev_idle(struct mddev *mddev, int init) 9007 { 9008 unsigned long last_events = mddev->normal_io_events; 9009 struct gendisk *disk; 9010 struct md_rdev *rdev; 9011 bool idle = true; 9012 9013 disk = mddev_is_dm(mddev) ? mddev->dm_gendisk : mddev->gendisk; 9014 if (!disk) 9015 return true; 9016 9017 mddev->normal_io_events = part_stat_read_accum(disk->part0, sectors); 9018 if (!init && (mddev->normal_io_events > last_events || 9019 bdev_count_inflight(disk->part0))) 9020 idle = false; 9021 9022 rcu_read_lock(); 9023 rdev_for_each_rcu(rdev, mddev) 9024 if (!is_rdev_holder_idle(rdev, init)) 9025 idle = false; 9026 rcu_read_unlock(); 9027 9028 return idle; 9029 } 9030 9031 void md_done_sync(struct mddev *mddev, int blocks, int ok) 9032 { 9033 /* another "blocks" (512byte) blocks have been synced */ 9034 atomic_sub(blocks, &mddev->recovery_active); 9035 wake_up(&mddev->recovery_wait); 9036 if (!ok) { 9037 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 9038 set_bit(MD_RECOVERY_ERROR, &mddev->recovery); 9039 md_wakeup_thread(mddev->thread); 9040 // stop recovery, signal do_sync .... 9041 } 9042 } 9043 EXPORT_SYMBOL(md_done_sync); 9044 9045 /* md_write_start(mddev, bi) 9046 * If we need to update some array metadata (e.g. 'active' flag 9047 * in superblock) before writing, schedule a superblock update 9048 * and wait for it to complete. 9049 * A return value of 'false' means that the write wasn't recorded 9050 * and cannot proceed as the array is being suspend. 9051 */ 9052 void md_write_start(struct mddev *mddev, struct bio *bi) 9053 { 9054 int did_change = 0; 9055 9056 if (bio_data_dir(bi) != WRITE) 9057 return; 9058 9059 BUG_ON(mddev->ro == MD_RDONLY); 9060 if (mddev->ro == MD_AUTO_READ) { 9061 /* need to switch to read/write */ 9062 mddev->ro = MD_RDWR; 9063 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 9064 md_wakeup_thread(mddev->thread); 9065 md_wakeup_thread(mddev->sync_thread); 9066 did_change = 1; 9067 } 9068 rcu_read_lock(); 9069 percpu_ref_get(&mddev->writes_pending); 9070 smp_mb(); /* Match smp_mb in set_in_sync() */ 9071 if (mddev->safemode == 1) 9072 mddev->safemode = 0; 9073 /* sync_checkers is always 0 when writes_pending is in per-cpu mode */ 9074 if (mddev->in_sync || mddev->sync_checkers) { 9075 spin_lock(&mddev->lock); 9076 if (mddev->in_sync) { 9077 mddev->in_sync = 0; 9078 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); 9079 set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); 9080 md_wakeup_thread(mddev->thread); 9081 did_change = 1; 9082 } 9083 spin_unlock(&mddev->lock); 9084 } 9085 rcu_read_unlock(); 9086 if (did_change) 9087 sysfs_notify_dirent_safe(mddev->sysfs_state); 9088 if (!mddev->has_superblocks) 9089 return; 9090 wait_event(mddev->sb_wait, 9091 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); 9092 } 9093 EXPORT_SYMBOL(md_write_start); 9094 9095 /* md_write_inc can only be called when md_write_start() has 9096 * already been called at least once of the current request. 9097 * It increments the counter and is useful when a single request 9098 * is split into several parts. Each part causes an increment and 9099 * so needs a matching md_write_end(). 9100 * Unlike md_write_start(), it is safe to call md_write_inc() inside 9101 * a spinlocked region. 9102 */ 9103 void md_write_inc(struct mddev *mddev, struct bio *bi) 9104 { 9105 if (bio_data_dir(bi) != WRITE) 9106 return; 9107 WARN_ON_ONCE(mddev->in_sync || !md_is_rdwr(mddev)); 9108 percpu_ref_get(&mddev->writes_pending); 9109 } 9110 EXPORT_SYMBOL(md_write_inc); 9111 9112 void md_write_end(struct mddev *mddev) 9113 { 9114 percpu_ref_put(&mddev->writes_pending); 9115 9116 if (mddev->safemode == 2) 9117 md_wakeup_thread(mddev->thread); 9118 else if (mddev->safemode_delay) 9119 /* The roundup() ensures this only performs locking once 9120 * every ->safemode_delay jiffies 9121 */ 9122 mod_timer(&mddev->safemode_timer, 9123 roundup(jiffies, mddev->safemode_delay) + 9124 mddev->safemode_delay); 9125 } 9126 9127 EXPORT_SYMBOL(md_write_end); 9128 9129 /* This is used by raid0 and raid10 */ 9130 void md_submit_discard_bio(struct mddev *mddev, struct md_rdev *rdev, 9131 struct bio *bio, sector_t start, sector_t size) 9132 { 9133 struct bio *discard_bio = NULL; 9134 9135 if (__blkdev_issue_discard(rdev->bdev, start, size, GFP_NOIO, 9136 &discard_bio) || !discard_bio) 9137 return; 9138 9139 bio_chain(discard_bio, bio); 9140 bio_clone_blkg_association(discard_bio, bio); 9141 mddev_trace_remap(mddev, discard_bio, bio->bi_iter.bi_sector); 9142 submit_bio_noacct(discard_bio); 9143 } 9144 EXPORT_SYMBOL_GPL(md_submit_discard_bio); 9145 9146 static void md_bitmap_start(struct mddev *mddev, 9147 struct md_io_clone *md_io_clone) 9148 { 9149 md_bitmap_fn *fn = unlikely(md_io_clone->rw == STAT_DISCARD) ? 9150 mddev->bitmap_ops->start_discard : 9151 mddev->bitmap_ops->start_write; 9152 9153 if (mddev->pers->bitmap_sector) 9154 mddev->pers->bitmap_sector(mddev, &md_io_clone->offset, 9155 &md_io_clone->sectors); 9156 9157 fn(mddev, md_io_clone->offset, md_io_clone->sectors); 9158 } 9159 9160 static void md_bitmap_end(struct mddev *mddev, struct md_io_clone *md_io_clone) 9161 { 9162 md_bitmap_fn *fn = unlikely(md_io_clone->rw == STAT_DISCARD) ? 9163 mddev->bitmap_ops->end_discard : 9164 mddev->bitmap_ops->end_write; 9165 9166 fn(mddev, md_io_clone->offset, md_io_clone->sectors); 9167 } 9168 9169 static void md_end_clone_io(struct bio *bio) 9170 { 9171 struct md_io_clone *md_io_clone = bio->bi_private; 9172 struct bio *orig_bio = md_io_clone->orig_bio; 9173 struct mddev *mddev = md_io_clone->mddev; 9174 9175 if (bio_data_dir(orig_bio) == WRITE && md_bitmap_enabled(mddev, false)) 9176 md_bitmap_end(mddev, md_io_clone); 9177 9178 if (bio->bi_status && !orig_bio->bi_status) 9179 orig_bio->bi_status = bio->bi_status; 9180 9181 if (md_io_clone->start_time) 9182 bio_end_io_acct(orig_bio, md_io_clone->start_time); 9183 9184 bio_put(bio); 9185 bio_endio(orig_bio); 9186 percpu_ref_put(&mddev->active_io); 9187 } 9188 9189 static void md_clone_bio(struct mddev *mddev, struct bio **bio) 9190 { 9191 struct block_device *bdev = (*bio)->bi_bdev; 9192 struct md_io_clone *md_io_clone; 9193 struct bio *clone = 9194 bio_alloc_clone(bdev, *bio, GFP_NOIO, &mddev->io_clone_set); 9195 9196 md_io_clone = container_of(clone, struct md_io_clone, bio_clone); 9197 md_io_clone->orig_bio = *bio; 9198 md_io_clone->mddev = mddev; 9199 if (blk_queue_io_stat(bdev->bd_disk->queue)) 9200 md_io_clone->start_time = bio_start_io_acct(*bio); 9201 9202 if (bio_data_dir(*bio) == WRITE && md_bitmap_enabled(mddev, false)) { 9203 md_io_clone->offset = (*bio)->bi_iter.bi_sector; 9204 md_io_clone->sectors = bio_sectors(*bio); 9205 md_io_clone->rw = op_stat_group(bio_op(*bio)); 9206 md_bitmap_start(mddev, md_io_clone); 9207 } 9208 9209 clone->bi_end_io = md_end_clone_io; 9210 clone->bi_private = md_io_clone; 9211 *bio = clone; 9212 } 9213 9214 void md_account_bio(struct mddev *mddev, struct bio **bio) 9215 { 9216 percpu_ref_get(&mddev->active_io); 9217 md_clone_bio(mddev, bio); 9218 } 9219 EXPORT_SYMBOL_GPL(md_account_bio); 9220 9221 void md_free_cloned_bio(struct bio *bio) 9222 { 9223 struct md_io_clone *md_io_clone = bio->bi_private; 9224 struct bio *orig_bio = md_io_clone->orig_bio; 9225 struct mddev *mddev = md_io_clone->mddev; 9226 9227 if (bio_data_dir(orig_bio) == WRITE && md_bitmap_enabled(mddev, false)) 9228 md_bitmap_end(mddev, md_io_clone); 9229 9230 if (bio->bi_status && !orig_bio->bi_status) 9231 orig_bio->bi_status = bio->bi_status; 9232 9233 if (md_io_clone->start_time) 9234 bio_end_io_acct(orig_bio, md_io_clone->start_time); 9235 9236 bio_put(bio); 9237 percpu_ref_put(&mddev->active_io); 9238 } 9239 EXPORT_SYMBOL_GPL(md_free_cloned_bio); 9240 9241 /* md_allow_write(mddev) 9242 * Calling this ensures that the array is marked 'active' so that writes 9243 * may proceed without blocking. It is important to call this before 9244 * attempting a GFP_KERNEL allocation while holding the mddev lock. 9245 * Must be called with mddev_lock held. 9246 */ 9247 void md_allow_write(struct mddev *mddev) 9248 { 9249 if (!mddev->pers) 9250 return; 9251 if (!md_is_rdwr(mddev)) 9252 return; 9253 if (!mddev->pers->sync_request) 9254 return; 9255 9256 spin_lock(&mddev->lock); 9257 if (mddev->in_sync) { 9258 mddev->in_sync = 0; 9259 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); 9260 set_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); 9261 if (mddev->safemode_delay && 9262 mddev->safemode == 0) 9263 mddev->safemode = 1; 9264 spin_unlock(&mddev->lock); 9265 md_update_sb(mddev, 0); 9266 sysfs_notify_dirent_safe(mddev->sysfs_state); 9267 /* wait for the dirty state to be recorded in the metadata */ 9268 wait_event(mddev->sb_wait, 9269 !test_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags)); 9270 } else 9271 spin_unlock(&mddev->lock); 9272 } 9273 EXPORT_SYMBOL_GPL(md_allow_write); 9274 9275 static sector_t md_sync_max_sectors(struct mddev *mddev, 9276 enum sync_action action) 9277 { 9278 switch (action) { 9279 case ACTION_RESYNC: 9280 case ACTION_CHECK: 9281 case ACTION_REPAIR: 9282 atomic64_set(&mddev->resync_mismatches, 0); 9283 fallthrough; 9284 case ACTION_RESHAPE: 9285 return mddev->resync_max_sectors; 9286 case ACTION_RECOVER: 9287 return mddev->dev_sectors; 9288 default: 9289 return 0; 9290 } 9291 } 9292 9293 /* 9294 * If lazy recovery is requested and all rdevs are in sync, select the rdev with 9295 * the higest index to perfore recovery to build initial xor data, this is the 9296 * same as old bitmap. 9297 */ 9298 static bool mddev_select_lazy_recover_rdev(struct mddev *mddev) 9299 { 9300 struct md_rdev *recover_rdev = NULL; 9301 struct md_rdev *rdev; 9302 bool ret = false; 9303 9304 rcu_read_lock(); 9305 rdev_for_each_rcu(rdev, mddev) { 9306 if (rdev->raid_disk < 0) 9307 continue; 9308 9309 if (test_bit(Faulty, &rdev->flags) || 9310 !test_bit(In_sync, &rdev->flags)) 9311 break; 9312 9313 if (!recover_rdev || recover_rdev->raid_disk < rdev->raid_disk) 9314 recover_rdev = rdev; 9315 } 9316 9317 if (recover_rdev) { 9318 clear_bit(In_sync, &recover_rdev->flags); 9319 ret = true; 9320 } 9321 9322 rcu_read_unlock(); 9323 return ret; 9324 } 9325 9326 static sector_t md_sync_position(struct mddev *mddev, enum sync_action action) 9327 { 9328 sector_t start = 0; 9329 struct md_rdev *rdev; 9330 9331 switch (action) { 9332 case ACTION_CHECK: 9333 case ACTION_REPAIR: 9334 return mddev->resync_min; 9335 case ACTION_RESYNC: 9336 if (!mddev->bitmap) 9337 return mddev->resync_offset; 9338 return 0; 9339 case ACTION_RESHAPE: 9340 /* 9341 * If the original node aborts reshaping then we continue the 9342 * reshaping, so set again to avoid restart reshape from the 9343 * first beginning 9344 */ 9345 if (mddev_is_clustered(mddev) && 9346 mddev->reshape_position != MaxSector) 9347 return mddev->reshape_position; 9348 return 0; 9349 case ACTION_RECOVER: 9350 start = MaxSector; 9351 rcu_read_lock(); 9352 rdev_for_each_rcu(rdev, mddev) 9353 if (rdev_needs_recovery(rdev, start)) 9354 start = rdev->recovery_offset; 9355 rcu_read_unlock(); 9356 9357 /* 9358 * If there are no spares, and raid456 lazy initial recover is 9359 * requested. 9360 */ 9361 if (test_bit(MD_RECOVERY_LAZY_RECOVER, &mddev->recovery) && 9362 start == MaxSector && mddev_select_lazy_recover_rdev(mddev)) 9363 start = 0; 9364 9365 /* If there is a bitmap, we need to make sure all 9366 * writes that started before we added a spare 9367 * complete before we start doing a recovery. 9368 * Otherwise the write might complete and (via 9369 * bitmap_endwrite) set a bit in the bitmap after the 9370 * recovery has checked that bit and skipped that 9371 * region. 9372 */ 9373 if (mddev->bitmap) { 9374 mddev->pers->quiesce(mddev, 1); 9375 mddev->pers->quiesce(mddev, 0); 9376 } 9377 return start; 9378 default: 9379 return MaxSector; 9380 } 9381 } 9382 9383 static bool sync_io_within_limit(struct mddev *mddev) 9384 { 9385 /* 9386 * For raid456, sync IO is stripe(4k) per IO, for other levels, it's 9387 * RESYNC_PAGES(64k) per IO. 9388 */ 9389 return atomic_read(&mddev->recovery_active) < 9390 (raid_is_456(mddev) ? 8 : 128) * sync_io_depth(mddev); 9391 } 9392 9393 #define SYNC_MARKS 10 9394 #define SYNC_MARK_STEP (3*HZ) 9395 #define UPDATE_FREQUENCY (5*60*HZ) 9396 void md_do_sync(struct md_thread *thread) 9397 { 9398 struct mddev *mddev = thread->mddev; 9399 struct mddev *mddev2; 9400 unsigned int currspeed = 0, window; 9401 sector_t max_sectors,j, io_sectors, recovery_done; 9402 unsigned long mark[SYNC_MARKS]; 9403 unsigned long update_time; 9404 sector_t mark_cnt[SYNC_MARKS]; 9405 int last_mark,m; 9406 sector_t last_check; 9407 int skipped = 0; 9408 struct md_rdev *rdev; 9409 enum sync_action action; 9410 const char *desc; 9411 struct blk_plug plug; 9412 int ret; 9413 9414 /* just incase thread restarts... */ 9415 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) 9416 return; 9417 9418 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 9419 goto skip; 9420 9421 if (test_bit(MD_RECOVERY_WAIT, &mddev->recovery) || 9422 !md_is_rdwr(mddev)) {/* never try to sync a read-only array */ 9423 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 9424 goto skip; 9425 } 9426 9427 if (mddev_is_clustered(mddev)) { 9428 ret = mddev->cluster_ops->resync_start(mddev); 9429 if (ret) 9430 goto skip; 9431 9432 set_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags); 9433 if (!(test_bit(MD_RECOVERY_SYNC, &mddev->recovery) || 9434 test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) || 9435 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) 9436 && ((unsigned long long)mddev->curr_resync_completed 9437 < (unsigned long long)mddev->resync_max_sectors)) 9438 goto skip; 9439 } 9440 9441 action = md_sync_action(mddev); 9442 if (action == ACTION_FROZEN || action == ACTION_IDLE) { 9443 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 9444 goto skip; 9445 } 9446 9447 desc = md_sync_action_name(action); 9448 mddev->last_sync_action = action; 9449 9450 /* 9451 * Before starting a resync we must have set curr_resync to 9452 * 2, and then checked that every "conflicting" array has curr_resync 9453 * less than ours. When we find one that is the same or higher 9454 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync 9455 * to 1 if we choose to yield (based arbitrarily on address of mddev structure). 9456 * This will mean we have to start checking from the beginning again. 9457 * 9458 */ 9459 if (mddev_is_clustered(mddev)) 9460 mddev->cluster_ops->resync_start_notify(mddev); 9461 do { 9462 int mddev2_minor = -1; 9463 mddev->curr_resync = MD_RESYNC_DELAYED; 9464 9465 try_again: 9466 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 9467 goto skip; 9468 spin_lock(&all_mddevs_lock); 9469 list_for_each_entry(mddev2, &all_mddevs, all_mddevs) { 9470 if (test_bit(MD_DELETED, &mddev2->flags)) 9471 continue; 9472 if (mddev2 == mddev) 9473 continue; 9474 if (!mddev->parallel_resync 9475 && mddev2->curr_resync 9476 && match_mddev_units(mddev, mddev2)) { 9477 DEFINE_WAIT(wq); 9478 if (mddev < mddev2 && 9479 mddev->curr_resync == MD_RESYNC_DELAYED) { 9480 /* arbitrarily yield */ 9481 mddev->curr_resync = MD_RESYNC_YIELDED; 9482 wake_up(&resync_wait); 9483 } 9484 if (mddev > mddev2 && 9485 mddev->curr_resync == MD_RESYNC_YIELDED) 9486 /* no need to wait here, we can wait the next 9487 * time 'round when curr_resync == 2 9488 */ 9489 continue; 9490 /* We need to wait 'interruptible' so as not to 9491 * contribute to the load average, and not to 9492 * be caught by 'softlockup' 9493 */ 9494 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE); 9495 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 9496 mddev2->curr_resync >= mddev->curr_resync) { 9497 if (mddev2_minor != mddev2->md_minor) { 9498 mddev2_minor = mddev2->md_minor; 9499 pr_info("md: delaying %s of %s until %s has finished (they share one or more physical units)\n", 9500 desc, mdname(mddev), 9501 mdname(mddev2)); 9502 } 9503 spin_unlock(&all_mddevs_lock); 9504 9505 if (signal_pending(current)) 9506 flush_signals(current); 9507 schedule(); 9508 finish_wait(&resync_wait, &wq); 9509 goto try_again; 9510 } 9511 finish_wait(&resync_wait, &wq); 9512 } 9513 } 9514 spin_unlock(&all_mddevs_lock); 9515 } while (mddev->curr_resync < MD_RESYNC_DELAYED); 9516 9517 max_sectors = md_sync_max_sectors(mddev, action); 9518 j = md_sync_position(mddev, action); 9519 9520 pr_info("md: %s of RAID array %s\n", desc, mdname(mddev)); 9521 pr_debug("md: minimum _guaranteed_ speed: %d KB/sec/disk.\n", speed_min(mddev)); 9522 pr_debug("md: using maximum available idle IO bandwidth (but not more than %d KB/sec) for %s.\n", 9523 speed_max(mddev), desc); 9524 9525 is_mddev_idle(mddev, 1); /* this initializes IO event counters */ 9526 9527 io_sectors = 0; 9528 for (m = 0; m < SYNC_MARKS; m++) { 9529 mark[m] = jiffies; 9530 mark_cnt[m] = io_sectors; 9531 } 9532 last_mark = 0; 9533 mddev->resync_mark = mark[last_mark]; 9534 mddev->resync_mark_cnt = mark_cnt[last_mark]; 9535 9536 /* 9537 * Tune reconstruction: 9538 */ 9539 window = 32 * (PAGE_SIZE / 512); 9540 pr_debug("md: using %dk window, over a total of %lluk.\n", 9541 window/2, (unsigned long long)max_sectors/2); 9542 9543 atomic_set(&mddev->recovery_active, 0); 9544 last_check = 0; 9545 9546 if (j >= MD_RESYNC_ACTIVE) { 9547 pr_debug("md: resuming %s of %s from checkpoint.\n", 9548 desc, mdname(mddev)); 9549 mddev->curr_resync = j; 9550 } else 9551 mddev->curr_resync = MD_RESYNC_ACTIVE; /* no longer delayed */ 9552 mddev->curr_resync_completed = j; 9553 sysfs_notify_dirent_safe(mddev->sysfs_completed); 9554 md_new_event(); 9555 update_time = jiffies; 9556 9557 blk_start_plug(&plug); 9558 while (j < max_sectors) { 9559 sector_t sectors; 9560 9561 skipped = 0; 9562 9563 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 9564 ((mddev->curr_resync > mddev->curr_resync_completed && 9565 (mddev->curr_resync - mddev->curr_resync_completed) 9566 > (max_sectors >> 4)) || 9567 time_after_eq(jiffies, update_time + UPDATE_FREQUENCY) || 9568 (j - mddev->curr_resync_completed)*2 9569 >= mddev->resync_max - mddev->curr_resync_completed || 9570 mddev->curr_resync_completed > mddev->resync_max 9571 )) { 9572 /* time to update curr_resync_completed */ 9573 wait_event(mddev->recovery_wait, 9574 atomic_read(&mddev->recovery_active) == 0); 9575 mddev->curr_resync_completed = j; 9576 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && 9577 j > mddev->resync_offset) 9578 mddev->resync_offset = j; 9579 update_time = jiffies; 9580 set_bit(MD_SB_CHANGE_CLEAN, &mddev->sb_flags); 9581 sysfs_notify_dirent_safe(mddev->sysfs_completed); 9582 } 9583 9584 while (j >= mddev->resync_max && 9585 !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 9586 /* As this condition is controlled by user-space, 9587 * we can block indefinitely, so use '_interruptible' 9588 * to avoid triggering warnings. 9589 */ 9590 flush_signals(current); /* just in case */ 9591 wait_event_interruptible(mddev->recovery_wait, 9592 mddev->resync_max > j 9593 || test_bit(MD_RECOVERY_INTR, 9594 &mddev->recovery)); 9595 } 9596 9597 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 9598 break; 9599 9600 if (mddev->bitmap_ops && mddev->bitmap_ops->skip_sync_blocks) { 9601 sectors = mddev->bitmap_ops->skip_sync_blocks(mddev, j); 9602 if (sectors) 9603 goto update; 9604 } 9605 9606 sectors = mddev->pers->sync_request(mddev, j, max_sectors, 9607 &skipped); 9608 if (sectors == 0) { 9609 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 9610 break; 9611 } 9612 9613 if (!skipped) { /* actual IO requested */ 9614 io_sectors += sectors; 9615 atomic_add(sectors, &mddev->recovery_active); 9616 } 9617 9618 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 9619 break; 9620 9621 update: 9622 j += sectors; 9623 if (j > max_sectors) 9624 /* when skipping, extra large numbers can be returned. */ 9625 j = max_sectors; 9626 if (j >= MD_RESYNC_ACTIVE) 9627 mddev->curr_resync = j; 9628 mddev->curr_mark_cnt = io_sectors; 9629 if (last_check == 0) 9630 /* this is the earliest that rebuild will be 9631 * visible in /proc/mdstat 9632 */ 9633 md_new_event(); 9634 9635 if (last_check + window > io_sectors || j == max_sectors) 9636 continue; 9637 9638 last_check = io_sectors; 9639 repeat: 9640 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) { 9641 /* step marks */ 9642 int next = (last_mark+1) % SYNC_MARKS; 9643 9644 mddev->resync_mark = mark[next]; 9645 mddev->resync_mark_cnt = mark_cnt[next]; 9646 mark[next] = jiffies; 9647 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active); 9648 last_mark = next; 9649 } 9650 9651 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 9652 break; 9653 9654 /* 9655 * this loop exits only if either when we are slower than 9656 * the 'hard' speed limit, or the system was IO-idle for 9657 * a jiffy. 9658 * the system might be non-idle CPU-wise, but we only care 9659 * about not overloading the IO subsystem. (things like an 9660 * e2fsck being done on the RAID array should execute fast) 9661 */ 9662 cond_resched(); 9663 9664 recovery_done = io_sectors - atomic_read(&mddev->recovery_active); 9665 currspeed = ((unsigned long)(recovery_done - mddev->resync_mark_cnt))/2 9666 /((jiffies-mddev->resync_mark)/HZ +1) +1; 9667 9668 if (currspeed > speed_min(mddev)) { 9669 if (currspeed > speed_max(mddev)) { 9670 msleep(500); 9671 goto repeat; 9672 } 9673 if (!sync_io_within_limit(mddev) && 9674 !is_mddev_idle(mddev, 0)) { 9675 /* 9676 * Give other IO more of a chance. 9677 * The faster the devices, the less we wait. 9678 */ 9679 wait_event(mddev->recovery_wait, 9680 !atomic_read(&mddev->recovery_active)); 9681 } 9682 } 9683 } 9684 pr_info("md: %s: %s %s.\n",mdname(mddev), desc, 9685 test_bit(MD_RECOVERY_INTR, &mddev->recovery) 9686 ? "interrupted" : "done"); 9687 /* 9688 * this also signals 'finished resyncing' to md_stop 9689 */ 9690 blk_finish_plug(&plug); 9691 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); 9692 9693 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 9694 !test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 9695 mddev->curr_resync >= MD_RESYNC_ACTIVE) { 9696 mddev->curr_resync_completed = mddev->curr_resync; 9697 sysfs_notify_dirent_safe(mddev->sysfs_completed); 9698 } 9699 mddev->pers->sync_request(mddev, max_sectors, max_sectors, &skipped); 9700 9701 if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) && 9702 mddev->curr_resync > MD_RESYNC_ACTIVE) { 9703 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 9704 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 9705 if (mddev->curr_resync >= mddev->resync_offset) { 9706 pr_debug("md: checkpointing %s of %s.\n", 9707 desc, mdname(mddev)); 9708 if (test_bit(MD_RECOVERY_ERROR, 9709 &mddev->recovery)) 9710 mddev->resync_offset = 9711 mddev->curr_resync_completed; 9712 else 9713 mddev->resync_offset = 9714 mddev->curr_resync; 9715 } 9716 } else 9717 mddev->resync_offset = MaxSector; 9718 } else { 9719 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 9720 mddev->curr_resync = MaxSector; 9721 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 9722 test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) { 9723 rcu_read_lock(); 9724 rdev_for_each_rcu(rdev, mddev) 9725 if (mddev->delta_disks >= 0 && 9726 rdev_needs_recovery(rdev, mddev->curr_resync)) 9727 rdev->recovery_offset = mddev->curr_resync; 9728 rcu_read_unlock(); 9729 } 9730 } 9731 } 9732 skip: 9733 /* set CHANGE_PENDING here since maybe another update is needed, 9734 * so other nodes are informed. It should be harmless for normal 9735 * raid */ 9736 set_mask_bits(&mddev->sb_flags, 0, 9737 BIT(MD_SB_CHANGE_PENDING) | BIT(MD_SB_CHANGE_DEVS)); 9738 9739 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 9740 !test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 9741 mddev->delta_disks > 0 && 9742 mddev->pers->finish_reshape && 9743 mddev->pers->size && 9744 !mddev_is_dm(mddev)) { 9745 mddev_lock_nointr(mddev); 9746 md_set_array_sectors(mddev, mddev->pers->size(mddev, 0, 0)); 9747 mddev_unlock(mddev); 9748 if (!mddev_is_clustered(mddev)) 9749 set_capacity_and_notify(mddev->gendisk, 9750 mddev->array_sectors); 9751 } 9752 9753 spin_lock(&mddev->lock); 9754 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 9755 /* We completed so min/max setting can be forgotten if used. */ 9756 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 9757 mddev->resync_min = 0; 9758 mddev->resync_max = MaxSector; 9759 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 9760 mddev->resync_min = mddev->curr_resync_completed; 9761 set_bit(MD_RECOVERY_DONE, &mddev->recovery); 9762 mddev->curr_resync = MD_RESYNC_NONE; 9763 spin_unlock(&mddev->lock); 9764 9765 wake_up(&resync_wait); 9766 md_wakeup_thread(mddev->thread); 9767 return; 9768 } 9769 EXPORT_SYMBOL_GPL(md_do_sync); 9770 9771 static bool rdev_removeable(struct md_rdev *rdev) 9772 { 9773 /* rdev is not used. */ 9774 if (rdev->raid_disk < 0) 9775 return false; 9776 9777 /* There are still inflight io, don't remove this rdev. */ 9778 if (atomic_read(&rdev->nr_pending)) 9779 return false; 9780 9781 /* 9782 * An error occurred but has not yet been acknowledged by the metadata 9783 * handler, don't remove this rdev. 9784 */ 9785 if (test_bit(Blocked, &rdev->flags)) 9786 return false; 9787 9788 /* Fautly rdev is not used, it's safe to remove it. */ 9789 if (test_bit(Faulty, &rdev->flags)) 9790 return true; 9791 9792 /* Journal disk can only be removed if it's faulty. */ 9793 if (test_bit(Journal, &rdev->flags)) 9794 return false; 9795 9796 /* 9797 * 'In_sync' is cleared while 'raid_disk' is valid, which means 9798 * replacement has just become active from pers->spare_active(), and 9799 * then pers->hot_remove_disk() will replace this rdev with replacement. 9800 */ 9801 if (!test_bit(In_sync, &rdev->flags)) 9802 return true; 9803 9804 return false; 9805 } 9806 9807 static bool rdev_is_spare(struct md_rdev *rdev) 9808 { 9809 return !test_bit(Candidate, &rdev->flags) && rdev->raid_disk >= 0 && 9810 !test_bit(In_sync, &rdev->flags) && 9811 !test_bit(Journal, &rdev->flags) && 9812 !test_bit(Faulty, &rdev->flags); 9813 } 9814 9815 static bool rdev_addable(struct md_rdev *rdev) 9816 { 9817 struct mddev *mddev; 9818 9819 mddev = READ_ONCE(rdev->mddev); 9820 if (!mddev) 9821 return false; 9822 9823 /* rdev is already used, don't add it again. */ 9824 if (test_bit(Candidate, &rdev->flags) || rdev->raid_disk >= 0 || 9825 test_bit(Faulty, &rdev->flags)) 9826 return false; 9827 9828 /* Allow to add journal disk. */ 9829 if (test_bit(Journal, &rdev->flags)) 9830 return true; 9831 9832 /* Allow to add if array is read-write. */ 9833 if (md_is_rdwr(mddev)) 9834 return true; 9835 9836 /* 9837 * For read-only array, only allow to readd a rdev. And if bitmap is 9838 * used, don't allow to readd a rdev that is too old. 9839 */ 9840 if (rdev->saved_raid_disk >= 0 && !test_bit(Bitmap_sync, &rdev->flags)) 9841 return true; 9842 9843 return false; 9844 } 9845 9846 static bool md_spares_need_change(struct mddev *mddev) 9847 { 9848 struct md_rdev *rdev; 9849 9850 rcu_read_lock(); 9851 rdev_for_each_rcu(rdev, mddev) { 9852 if (rdev_removeable(rdev) || rdev_addable(rdev)) { 9853 rcu_read_unlock(); 9854 return true; 9855 } 9856 } 9857 rcu_read_unlock(); 9858 return false; 9859 } 9860 9861 static int remove_spares(struct mddev *mddev, struct md_rdev *this) 9862 { 9863 struct md_rdev *rdev; 9864 int removed = 0; 9865 9866 rdev_for_each(rdev, mddev) { 9867 if ((this == NULL || rdev == this) && rdev_removeable(rdev) && 9868 !mddev->pers->hot_remove_disk(mddev, rdev)) { 9869 sysfs_unlink_rdev(mddev, rdev); 9870 rdev->saved_raid_disk = rdev->raid_disk; 9871 rdev->raid_disk = -1; 9872 removed++; 9873 } 9874 } 9875 9876 if (removed && mddev->kobj.sd) 9877 sysfs_notify_dirent_safe(mddev->sysfs_degraded); 9878 9879 return removed; 9880 } 9881 9882 static int remove_and_add_spares(struct mddev *mddev, 9883 struct md_rdev *this) 9884 { 9885 struct md_rdev *rdev; 9886 int spares = 0; 9887 int removed = 0; 9888 9889 if (this && test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 9890 /* Mustn't remove devices when resync thread is running */ 9891 return 0; 9892 9893 removed = remove_spares(mddev, this); 9894 if (this && removed) 9895 goto no_add; 9896 9897 rdev_for_each(rdev, mddev) { 9898 if (this && this != rdev) 9899 continue; 9900 if (rdev_is_spare(rdev)) 9901 spares++; 9902 if (!rdev_addable(rdev)) 9903 continue; 9904 if (!test_bit(Journal, &rdev->flags)) 9905 rdev->recovery_offset = 0; 9906 if (mddev->pers->hot_add_disk(mddev, rdev) == 0) { 9907 /* failure here is OK */ 9908 sysfs_link_rdev(mddev, rdev); 9909 if (!test_bit(Journal, &rdev->flags)) 9910 spares++; 9911 md_new_event(); 9912 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 9913 } 9914 } 9915 no_add: 9916 if (removed) 9917 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 9918 return spares; 9919 } 9920 9921 static bool md_choose_sync_action(struct mddev *mddev, int *spares) 9922 { 9923 /* Check if reshape is in progress first. */ 9924 if (mddev->reshape_position != MaxSector) { 9925 if (mddev->pers->check_reshape == NULL || 9926 mddev->pers->check_reshape(mddev) != 0) 9927 return false; 9928 9929 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 9930 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 9931 clear_bit(MD_RECOVERY_LAZY_RECOVER, &mddev->recovery); 9932 return true; 9933 } 9934 9935 /* Check if resync is in progress. */ 9936 if (mddev->resync_offset < MaxSector) { 9937 remove_spares(mddev, NULL); 9938 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 9939 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 9940 clear_bit(MD_RECOVERY_LAZY_RECOVER, &mddev->recovery); 9941 return true; 9942 } 9943 9944 /* 9945 * Remove any failed drives, then add spares if possible. Spares are 9946 * also removed and re-added, to allow the personality to fail the 9947 * re-add. 9948 */ 9949 *spares = remove_and_add_spares(mddev, NULL); 9950 if (*spares || test_bit(MD_RECOVERY_LAZY_RECOVER, &mddev->recovery)) { 9951 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 9952 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 9953 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 9954 9955 /* Start new recovery. */ 9956 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 9957 return true; 9958 } 9959 9960 /* Delay to choose resync/check/repair in md_do_sync(). */ 9961 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 9962 return true; 9963 9964 /* Nothing to be done */ 9965 return false; 9966 } 9967 9968 static void md_start_sync(struct work_struct *ws) 9969 { 9970 struct mddev *mddev = container_of(ws, struct mddev, sync_work); 9971 int spares = 0; 9972 bool suspend = false; 9973 char *name; 9974 9975 /* 9976 * If reshape is still in progress, spares won't be added or removed 9977 * from conf until reshape is done. 9978 */ 9979 if (mddev->reshape_position == MaxSector && 9980 md_spares_need_change(mddev)) { 9981 suspend = true; 9982 mddev_suspend(mddev, false); 9983 } 9984 9985 mddev_lock_nointr(mddev); 9986 if (!md_is_rdwr(mddev)) { 9987 /* 9988 * On a read-only array we can: 9989 * - remove failed devices 9990 * - add already-in_sync devices if the array itself is in-sync. 9991 * As we only add devices that are already in-sync, we can 9992 * activate the spares immediately. 9993 */ 9994 remove_and_add_spares(mddev, NULL); 9995 goto not_running; 9996 } 9997 9998 if (!md_choose_sync_action(mddev, &spares)) 9999 goto not_running; 10000 10001 if (!mddev->pers->sync_request) 10002 goto not_running; 10003 10004 /* 10005 * We are adding a device or devices to an array which has the bitmap 10006 * stored on all devices. So make sure all bitmap pages get written. 10007 */ 10008 if (spares && md_bitmap_enabled(mddev, true)) 10009 mddev->bitmap_ops->write_all(mddev); 10010 10011 name = test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) ? 10012 "reshape" : "resync"; 10013 rcu_assign_pointer(mddev->sync_thread, 10014 md_register_thread(md_do_sync, mddev, name)); 10015 if (!mddev->sync_thread) { 10016 pr_warn("%s: could not start resync thread...\n", 10017 mdname(mddev)); 10018 /* leave the spares where they are, it shouldn't hurt */ 10019 goto not_running; 10020 } 10021 10022 mddev_unlock(mddev); 10023 /* 10024 * md_start_sync was triggered by MD_RECOVERY_NEEDED, so we should 10025 * not set it again. Otherwise, we may cause issue like this one: 10026 * https://bugzilla.kernel.org/show_bug.cgi?id=218200 10027 * Therefore, use __mddev_resume(mddev, false). 10028 */ 10029 if (suspend) 10030 __mddev_resume(mddev, false); 10031 md_wakeup_thread(mddev->sync_thread); 10032 sysfs_notify_dirent_safe(mddev->sysfs_action); 10033 md_new_event(); 10034 return; 10035 10036 not_running: 10037 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 10038 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 10039 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 10040 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 10041 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 10042 mddev_unlock(mddev); 10043 /* 10044 * md_start_sync was triggered by MD_RECOVERY_NEEDED, so we should 10045 * not set it again. Otherwise, we may cause issue like this one: 10046 * https://bugzilla.kernel.org/show_bug.cgi?id=218200 10047 * Therefore, use __mddev_resume(mddev, false). 10048 */ 10049 if (suspend) 10050 __mddev_resume(mddev, false); 10051 10052 wake_up(&resync_wait); 10053 if (test_and_clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery) && 10054 mddev->sysfs_action) 10055 sysfs_notify_dirent_safe(mddev->sysfs_action); 10056 } 10057 10058 static void unregister_sync_thread(struct mddev *mddev) 10059 { 10060 if (!test_bit(MD_RECOVERY_DONE, &mddev->recovery)) { 10061 /* resync/recovery still happening */ 10062 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 10063 return; 10064 } 10065 10066 if (WARN_ON_ONCE(!mddev->sync_thread)) 10067 return; 10068 10069 md_reap_sync_thread(mddev); 10070 } 10071 10072 static bool md_should_do_recovery(struct mddev *mddev) 10073 { 10074 /* 10075 * As long as one of the following flags is set, 10076 * recovery needs to do or cleanup. 10077 */ 10078 if (test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || 10079 test_bit(MD_RECOVERY_DONE, &mddev->recovery)) 10080 return true; 10081 10082 /* 10083 * If no flags are set and it is in read-only status, 10084 * there is nothing to do. 10085 */ 10086 if (!md_is_rdwr(mddev)) 10087 return false; 10088 10089 /* 10090 * MD_SB_CHANGE_PENDING indicates that the array is switching from clean to 10091 * active, and no action is needed for now. 10092 * All other MD_SB_* flags require to update the superblock. 10093 */ 10094 if (mddev->sb_flags & ~ (1<<MD_SB_CHANGE_PENDING)) 10095 return true; 10096 10097 /* 10098 * If the array is not using external metadata and there has been no data 10099 * written for some time, then the array's status needs to be set to 10100 * in_sync. 10101 */ 10102 if (mddev->external == 0 && mddev->safemode == 1) 10103 return true; 10104 10105 /* 10106 * When the system is about to restart or the process receives an signal, 10107 * the array needs to be synchronized as soon as possible. 10108 * Once the data synchronization is completed, need to change the array 10109 * status to in_sync. 10110 */ 10111 if (mddev->safemode == 2 && !mddev->in_sync && 10112 mddev->resync_offset == MaxSector) 10113 return true; 10114 10115 return false; 10116 } 10117 10118 /* 10119 * This routine is regularly called by all per-raid-array threads to 10120 * deal with generic issues like resync and super-block update. 10121 * Raid personalities that don't have a thread (linear/raid0) do not 10122 * need this as they never do any recovery or update the superblock. 10123 * 10124 * It does not do any resync itself, but rather "forks" off other threads 10125 * to do that as needed. 10126 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in 10127 * "->recovery" and create a thread at ->sync_thread. 10128 * When the thread finishes it sets MD_RECOVERY_DONE 10129 * and wakeups up this thread which will reap the thread and finish up. 10130 * This thread also removes any faulty devices (with nr_pending == 0). 10131 * 10132 * The overall approach is: 10133 * 1/ if the superblock needs updating, update it. 10134 * 2/ If a recovery thread is running, don't do anything else. 10135 * 3/ If recovery has finished, clean up, possibly marking spares active. 10136 * 4/ If there are any faulty devices, remove them. 10137 * 5/ If array is degraded, try to add spares devices 10138 * 6/ If array has spares or is not in-sync, start a resync thread. 10139 */ 10140 void md_check_recovery(struct mddev *mddev) 10141 { 10142 if (md_bitmap_enabled(mddev, false) && mddev->bitmap_ops->daemon_work) 10143 mddev->bitmap_ops->daemon_work(mddev); 10144 10145 if (signal_pending(current)) { 10146 if (mddev->pers->sync_request && !mddev->external) { 10147 pr_debug("md: %s in immediate safe mode\n", 10148 mdname(mddev)); 10149 mddev->safemode = 2; 10150 } 10151 flush_signals(current); 10152 } 10153 10154 if (!md_should_do_recovery(mddev)) 10155 return; 10156 10157 if (mddev_trylock(mddev)) { 10158 bool try_set_sync = mddev->safemode != 0; 10159 10160 if (!mddev->external && mddev->safemode == 1) 10161 mddev->safemode = 0; 10162 10163 if (!md_is_rdwr(mddev)) { 10164 struct md_rdev *rdev; 10165 10166 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { 10167 unregister_sync_thread(mddev); 10168 goto unlock; 10169 } 10170 10171 if (!mddev->external && mddev->in_sync) 10172 /* 10173 * 'Blocked' flag not needed as failed devices 10174 * will be recorded if array switched to read/write. 10175 * Leaving it set will prevent the device 10176 * from being removed. 10177 */ 10178 rdev_for_each(rdev, mddev) 10179 clear_bit(Blocked, &rdev->flags); 10180 10181 /* 10182 * There is no thread, but we need to call 10183 * ->spare_active and clear saved_raid_disk 10184 */ 10185 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 10186 md_reap_sync_thread(mddev); 10187 10188 /* 10189 * Let md_start_sync() to remove and add rdevs to the 10190 * array. 10191 */ 10192 if (md_spares_need_change(mddev)) { 10193 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 10194 queue_work(md_misc_wq, &mddev->sync_work); 10195 } 10196 10197 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 10198 clear_bit(MD_RECOVERY_LAZY_RECOVER, &mddev->recovery); 10199 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 10200 clear_bit(MD_SB_CHANGE_PENDING, &mddev->sb_flags); 10201 10202 goto unlock; 10203 } 10204 10205 if (mddev_is_clustered(mddev)) { 10206 struct md_rdev *rdev, *tmp; 10207 /* kick the device if another node issued a 10208 * remove disk. 10209 */ 10210 rdev_for_each_safe(rdev, tmp, mddev) { 10211 if (rdev->raid_disk < 0 && 10212 test_and_clear_bit(ClusterRemove, &rdev->flags)) 10213 md_kick_rdev_from_array(rdev); 10214 } 10215 } 10216 10217 if (try_set_sync && !mddev->external && !mddev->in_sync) { 10218 spin_lock(&mddev->lock); 10219 set_in_sync(mddev); 10220 spin_unlock(&mddev->lock); 10221 } 10222 10223 if (mddev->sb_flags) 10224 md_update_sb(mddev, 0); 10225 10226 /* 10227 * Never start a new sync thread if MD_RECOVERY_RUNNING is 10228 * still set. 10229 */ 10230 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) { 10231 unregister_sync_thread(mddev); 10232 goto unlock; 10233 } 10234 10235 /* Set RUNNING before clearing NEEDED to avoid 10236 * any transients in the value of "sync_action". 10237 */ 10238 mddev->curr_resync_completed = 0; 10239 spin_lock(&mddev->lock); 10240 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 10241 spin_unlock(&mddev->lock); 10242 /* Clear some bits that don't mean anything, but 10243 * might be left set 10244 */ 10245 clear_bit(MD_RECOVERY_INTR, &mddev->recovery); 10246 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); 10247 10248 if (test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) && 10249 !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) { 10250 queue_work(md_misc_wq, &mddev->sync_work); 10251 } else { 10252 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 10253 wake_up(&resync_wait); 10254 } 10255 10256 unlock: 10257 wake_up(&mddev->sb_wait); 10258 mddev_unlock(mddev); 10259 } 10260 } 10261 EXPORT_SYMBOL(md_check_recovery); 10262 10263 void md_reap_sync_thread(struct mddev *mddev) 10264 { 10265 struct md_rdev *rdev; 10266 sector_t old_dev_sectors = mddev->dev_sectors; 10267 bool is_reshaped = false; 10268 10269 /* resync has finished, collect result */ 10270 md_unregister_thread(mddev, &mddev->sync_thread); 10271 atomic_inc(&mddev->sync_seq); 10272 10273 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 10274 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && 10275 mddev->degraded != mddev->raid_disks) { 10276 /* success...*/ 10277 /* activate any spares */ 10278 if (mddev->pers->spare_active(mddev)) { 10279 sysfs_notify_dirent_safe(mddev->sysfs_degraded); 10280 set_bit(MD_SB_CHANGE_DEVS, &mddev->sb_flags); 10281 } 10282 } 10283 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 10284 mddev->pers->finish_reshape) { 10285 mddev->pers->finish_reshape(mddev); 10286 if (mddev_is_clustered(mddev)) 10287 is_reshaped = true; 10288 } 10289 10290 /* If array is no-longer degraded, then any saved_raid_disk 10291 * information must be scrapped. 10292 */ 10293 if (!mddev->degraded) 10294 rdev_for_each(rdev, mddev) 10295 rdev->saved_raid_disk = -1; 10296 10297 md_update_sb(mddev, 1); 10298 /* MD_SB_CHANGE_PENDING should be cleared by md_update_sb, so we can 10299 * call resync_finish here if MD_CLUSTER_RESYNC_LOCKED is set by 10300 * clustered raid */ 10301 if (test_and_clear_bit(MD_CLUSTER_RESYNC_LOCKED, &mddev->flags)) 10302 mddev->cluster_ops->resync_finish(mddev); 10303 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 10304 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); 10305 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 10306 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 10307 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 10308 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 10309 clear_bit(MD_RECOVERY_LAZY_RECOVER, &mddev->recovery); 10310 /* 10311 * We call mddev->cluster_ops->update_size here because sync_size could 10312 * be changed by md_update_sb, and MD_RECOVERY_RESHAPE is cleared, 10313 * so it is time to update size across cluster. 10314 */ 10315 if (mddev_is_clustered(mddev) && is_reshaped 10316 && !test_bit(MD_CLOSING, &mddev->flags)) 10317 mddev->cluster_ops->update_size(mddev, old_dev_sectors); 10318 /* flag recovery needed just to double check */ 10319 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 10320 sysfs_notify_dirent_safe(mddev->sysfs_completed); 10321 sysfs_notify_dirent_safe(mddev->sysfs_action); 10322 md_new_event(); 10323 if (mddev->event_work.func) 10324 queue_work(md_misc_wq, &mddev->event_work); 10325 wake_up(&resync_wait); 10326 } 10327 EXPORT_SYMBOL(md_reap_sync_thread); 10328 10329 void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev) 10330 { 10331 sysfs_notify_dirent_safe(rdev->sysfs_state); 10332 wait_event_timeout(rdev->blocked_wait, !rdev_blocked(rdev), 10333 msecs_to_jiffies(5000)); 10334 rdev_dec_pending(rdev, mddev); 10335 } 10336 EXPORT_SYMBOL(md_wait_for_blocked_rdev); 10337 10338 void md_finish_reshape(struct mddev *mddev) 10339 { 10340 /* called be personality module when reshape completes. */ 10341 struct md_rdev *rdev; 10342 10343 rdev_for_each(rdev, mddev) { 10344 if (rdev->data_offset > rdev->new_data_offset) 10345 rdev->sectors += rdev->data_offset - rdev->new_data_offset; 10346 else 10347 rdev->sectors -= rdev->new_data_offset - rdev->data_offset; 10348 rdev->data_offset = rdev->new_data_offset; 10349 } 10350 } 10351 EXPORT_SYMBOL(md_finish_reshape); 10352 10353 /* Bad block management */ 10354 10355 /* Returns true on success, false on failure */ 10356 bool rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors, 10357 int is_new) 10358 { 10359 struct mddev *mddev = rdev->mddev; 10360 10361 /* 10362 * Recording new badblocks for faulty rdev will force unnecessary 10363 * super block updating. This is fragile for external management because 10364 * userspace daemon may trying to remove this device and deadlock may 10365 * occur. This will be probably solved in the mdadm, but it is safer to 10366 * avoid it. 10367 */ 10368 if (test_bit(Faulty, &rdev->flags)) 10369 return true; 10370 10371 if (is_new) 10372 s += rdev->new_data_offset; 10373 else 10374 s += rdev->data_offset; 10375 10376 if (!badblocks_set(&rdev->badblocks, s, sectors, 0)) 10377 return false; 10378 10379 /* Make sure they get written out promptly */ 10380 if (test_bit(ExternalBbl, &rdev->flags)) 10381 sysfs_notify_dirent_safe(rdev->sysfs_unack_badblocks); 10382 sysfs_notify_dirent_safe(rdev->sysfs_state); 10383 set_mask_bits(&mddev->sb_flags, 0, 10384 BIT(MD_SB_CHANGE_CLEAN) | BIT(MD_SB_CHANGE_PENDING)); 10385 md_wakeup_thread(rdev->mddev->thread); 10386 return true; 10387 } 10388 EXPORT_SYMBOL_GPL(rdev_set_badblocks); 10389 10390 void rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors, 10391 int is_new) 10392 { 10393 if (is_new) 10394 s += rdev->new_data_offset; 10395 else 10396 s += rdev->data_offset; 10397 10398 if (!badblocks_clear(&rdev->badblocks, s, sectors)) 10399 return; 10400 10401 if (test_bit(ExternalBbl, &rdev->flags)) 10402 sysfs_notify_dirent_safe(rdev->sysfs_badblocks); 10403 } 10404 EXPORT_SYMBOL_GPL(rdev_clear_badblocks); 10405 10406 static int md_notify_reboot(struct notifier_block *this, 10407 unsigned long code, void *x) 10408 { 10409 struct mddev *mddev; 10410 int need_delay = 0; 10411 10412 spin_lock(&all_mddevs_lock); 10413 list_for_each_entry(mddev, &all_mddevs, all_mddevs) { 10414 if (!mddev_get(mddev)) 10415 continue; 10416 spin_unlock(&all_mddevs_lock); 10417 if (mddev_trylock(mddev)) { 10418 if (mddev->pers) 10419 __md_stop_writes(mddev); 10420 if (mddev->persistent) 10421 mddev->safemode = 2; 10422 mddev_unlock(mddev); 10423 } 10424 need_delay = 1; 10425 spin_lock(&all_mddevs_lock); 10426 mddev_put_locked(mddev); 10427 } 10428 spin_unlock(&all_mddevs_lock); 10429 10430 /* 10431 * certain more exotic SCSI devices are known to be 10432 * volatile wrt too early system reboots. While the 10433 * right place to handle this issue is the given 10434 * driver, we do want to have a safe RAID driver ... 10435 */ 10436 if (need_delay) 10437 msleep(1000); 10438 10439 return NOTIFY_DONE; 10440 } 10441 10442 static struct notifier_block md_notifier = { 10443 .notifier_call = md_notify_reboot, 10444 .next = NULL, 10445 .priority = INT_MAX, /* before any real devices */ 10446 }; 10447 10448 static void md_geninit(void) 10449 { 10450 pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t)); 10451 10452 proc_create("mdstat", S_IRUGO, NULL, &mdstat_proc_ops); 10453 } 10454 10455 static int __init md_init(void) 10456 { 10457 int ret = md_bitmap_init(); 10458 10459 if (ret) 10460 return ret; 10461 10462 ret = md_llbitmap_init(); 10463 if (ret) 10464 goto err_bitmap; 10465 10466 ret = -ENOMEM; 10467 md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0); 10468 if (!md_wq) 10469 goto err_wq; 10470 10471 md_misc_wq = alloc_workqueue("md_misc", 0, 0); 10472 if (!md_misc_wq) 10473 goto err_misc_wq; 10474 10475 ret = __register_blkdev(MD_MAJOR, "md", md_probe); 10476 if (ret < 0) 10477 goto err_md; 10478 10479 ret = __register_blkdev(0, "mdp", md_probe); 10480 if (ret < 0) 10481 goto err_mdp; 10482 mdp_major = ret; 10483 10484 register_reboot_notifier(&md_notifier); 10485 raid_table_header = register_sysctl("dev/raid", raid_table); 10486 10487 md_geninit(); 10488 return 0; 10489 10490 err_mdp: 10491 unregister_blkdev(MD_MAJOR, "md"); 10492 err_md: 10493 destroy_workqueue(md_misc_wq); 10494 err_misc_wq: 10495 destroy_workqueue(md_wq); 10496 err_wq: 10497 md_llbitmap_exit(); 10498 err_bitmap: 10499 md_bitmap_exit(); 10500 return ret; 10501 } 10502 10503 static void check_sb_changes(struct mddev *mddev, struct md_rdev *rdev) 10504 { 10505 struct mdp_superblock_1 *sb = page_address(rdev->sb_page); 10506 struct md_rdev *rdev2, *tmp; 10507 int role, ret; 10508 10509 /* 10510 * If size is changed in another node then we need to 10511 * do resize as well. 10512 */ 10513 if (mddev->dev_sectors != le64_to_cpu(sb->size)) { 10514 ret = mddev->pers->resize(mddev, le64_to_cpu(sb->size)); 10515 if (ret) 10516 pr_info("md-cluster: resize failed\n"); 10517 else if (md_bitmap_enabled(mddev, false)) 10518 mddev->bitmap_ops->update_sb(mddev->bitmap); 10519 } 10520 10521 /* Check for change of roles in the active devices */ 10522 rdev_for_each_safe(rdev2, tmp, mddev) { 10523 if (test_bit(Faulty, &rdev2->flags)) { 10524 if (test_bit(ClusterRemove, &rdev2->flags)) 10525 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 10526 continue; 10527 } 10528 10529 /* Check if the roles changed */ 10530 role = le16_to_cpu(sb->dev_roles[rdev2->desc_nr]); 10531 10532 if (test_bit(Candidate, &rdev2->flags)) { 10533 if (role == MD_DISK_ROLE_FAULTY) { 10534 pr_info("md: Removing Candidate device %pg because add failed\n", 10535 rdev2->bdev); 10536 md_kick_rdev_from_array(rdev2); 10537 continue; 10538 } 10539 else 10540 clear_bit(Candidate, &rdev2->flags); 10541 } 10542 10543 if (role != rdev2->raid_disk) { 10544 /* 10545 * got activated except reshape is happening. 10546 */ 10547 if (rdev2->raid_disk == -1 && role != MD_DISK_ROLE_SPARE && 10548 !(le32_to_cpu(sb->feature_map) & 10549 MD_FEATURE_RESHAPE_ACTIVE) && 10550 !mddev->cluster_ops->resync_status_get(mddev)) { 10551 /* 10552 * -1 to make raid1_add_disk() set conf->fullsync 10553 * to 1. This could avoid skipping sync when the 10554 * remote node is down during resyncing. 10555 */ 10556 if ((le32_to_cpu(sb->feature_map) 10557 & MD_FEATURE_RECOVERY_OFFSET)) 10558 rdev2->saved_raid_disk = -1; 10559 else 10560 rdev2->saved_raid_disk = role; 10561 ret = remove_and_add_spares(mddev, rdev2); 10562 pr_info("Activated spare: %pg\n", 10563 rdev2->bdev); 10564 /* wakeup mddev->thread here, so array could 10565 * perform resync with the new activated disk */ 10566 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 10567 md_wakeup_thread(mddev->thread); 10568 } 10569 /* device faulty 10570 * We just want to do the minimum to mark the disk 10571 * as faulty. The recovery is performed by the 10572 * one who initiated the error. 10573 */ 10574 if (role == MD_DISK_ROLE_FAULTY || 10575 role == MD_DISK_ROLE_JOURNAL) { 10576 md_error(mddev, rdev2); 10577 clear_bit(Blocked, &rdev2->flags); 10578 } 10579 } 10580 } 10581 10582 if (mddev->raid_disks != le32_to_cpu(sb->raid_disks)) { 10583 ret = update_raid_disks(mddev, le32_to_cpu(sb->raid_disks)); 10584 if (ret) 10585 pr_warn("md: updating array disks failed. %d\n", ret); 10586 } 10587 10588 /* 10589 * Since mddev->delta_disks has already updated in update_raid_disks, 10590 * so it is time to check reshape. 10591 */ 10592 if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) && 10593 (le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) { 10594 /* 10595 * reshape is happening in the remote node, we need to 10596 * update reshape_position and call start_reshape. 10597 */ 10598 mddev->reshape_position = le64_to_cpu(sb->reshape_position); 10599 if (mddev->pers->update_reshape_pos) 10600 mddev->pers->update_reshape_pos(mddev); 10601 if (mddev->pers->start_reshape) 10602 mddev->pers->start_reshape(mddev); 10603 } else if (test_bit(MD_RESYNCING_REMOTE, &mddev->recovery) && 10604 mddev->reshape_position != MaxSector && 10605 !(le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) { 10606 /* reshape is just done in another node. */ 10607 mddev->reshape_position = MaxSector; 10608 if (mddev->pers->update_reshape_pos) 10609 mddev->pers->update_reshape_pos(mddev); 10610 } 10611 10612 /* Finally set the event to be up to date */ 10613 mddev->events = le64_to_cpu(sb->events); 10614 } 10615 10616 static int read_rdev(struct mddev *mddev, struct md_rdev *rdev) 10617 { 10618 int err; 10619 struct page *swapout = rdev->sb_page; 10620 struct mdp_superblock_1 *sb; 10621 10622 /* Store the sb page of the rdev in the swapout temporary 10623 * variable in case we err in the future 10624 */ 10625 rdev->sb_page = NULL; 10626 err = alloc_disk_sb(rdev); 10627 if (err == 0) { 10628 ClearPageUptodate(rdev->sb_page); 10629 rdev->sb_loaded = 0; 10630 err = super_types[mddev->major_version]. 10631 load_super(rdev, NULL, mddev->minor_version); 10632 } 10633 if (err < 0) { 10634 pr_warn("%s: %d Could not reload rdev(%d) err: %d. Restoring old values\n", 10635 __func__, __LINE__, rdev->desc_nr, err); 10636 if (rdev->sb_page) 10637 put_page(rdev->sb_page); 10638 rdev->sb_page = swapout; 10639 rdev->sb_loaded = 1; 10640 return err; 10641 } 10642 10643 sb = page_address(rdev->sb_page); 10644 /* Read the offset unconditionally, even if MD_FEATURE_RECOVERY_OFFSET 10645 * is not set 10646 */ 10647 10648 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RECOVERY_OFFSET)) 10649 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset); 10650 10651 /* The other node finished recovery, call spare_active to set 10652 * device In_sync and mddev->degraded 10653 */ 10654 if (rdev->recovery_offset == MaxSector && 10655 !test_bit(In_sync, &rdev->flags) && 10656 mddev->pers->spare_active(mddev)) 10657 sysfs_notify_dirent_safe(mddev->sysfs_degraded); 10658 10659 put_page(swapout); 10660 return 0; 10661 } 10662 10663 void md_reload_sb(struct mddev *mddev, int nr) 10664 { 10665 struct md_rdev *rdev = NULL, *iter; 10666 int err; 10667 10668 /* Find the rdev */ 10669 rdev_for_each_rcu(iter, mddev) { 10670 if (iter->desc_nr == nr) { 10671 rdev = iter; 10672 break; 10673 } 10674 } 10675 10676 if (!rdev) { 10677 pr_warn("%s: %d Could not find rdev with nr %d\n", __func__, __LINE__, nr); 10678 return; 10679 } 10680 10681 err = read_rdev(mddev, rdev); 10682 if (err < 0) 10683 return; 10684 10685 check_sb_changes(mddev, rdev); 10686 10687 /* Read all rdev's to update recovery_offset */ 10688 rdev_for_each_rcu(rdev, mddev) { 10689 if (!test_bit(Faulty, &rdev->flags)) 10690 read_rdev(mddev, rdev); 10691 } 10692 } 10693 EXPORT_SYMBOL(md_reload_sb); 10694 10695 #ifndef MODULE 10696 10697 /* 10698 * Searches all registered partitions for autorun RAID arrays 10699 * at boot time. 10700 */ 10701 10702 static DEFINE_MUTEX(detected_devices_mutex); 10703 static LIST_HEAD(all_detected_devices); 10704 struct detected_devices_node { 10705 struct list_head list; 10706 dev_t dev; 10707 }; 10708 10709 void md_autodetect_dev(dev_t dev) 10710 { 10711 struct detected_devices_node *node_detected_dev; 10712 10713 node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL); 10714 if (node_detected_dev) { 10715 node_detected_dev->dev = dev; 10716 mutex_lock(&detected_devices_mutex); 10717 list_add_tail(&node_detected_dev->list, &all_detected_devices); 10718 mutex_unlock(&detected_devices_mutex); 10719 } 10720 } 10721 10722 void md_autostart_arrays(int part) 10723 { 10724 struct md_rdev *rdev; 10725 struct detected_devices_node *node_detected_dev; 10726 dev_t dev; 10727 int i_scanned, i_passed; 10728 10729 i_scanned = 0; 10730 i_passed = 0; 10731 10732 pr_info("md: Autodetecting RAID arrays.\n"); 10733 10734 mutex_lock(&detected_devices_mutex); 10735 while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) { 10736 i_scanned++; 10737 node_detected_dev = list_entry(all_detected_devices.next, 10738 struct detected_devices_node, list); 10739 list_del(&node_detected_dev->list); 10740 dev = node_detected_dev->dev; 10741 kfree(node_detected_dev); 10742 mutex_unlock(&detected_devices_mutex); 10743 rdev = md_import_device(dev,0, 90); 10744 mutex_lock(&detected_devices_mutex); 10745 if (IS_ERR(rdev)) 10746 continue; 10747 10748 if (test_bit(Faulty, &rdev->flags)) 10749 continue; 10750 10751 set_bit(AutoDetected, &rdev->flags); 10752 list_add(&rdev->same_set, &pending_raid_disks); 10753 i_passed++; 10754 } 10755 mutex_unlock(&detected_devices_mutex); 10756 10757 pr_debug("md: Scanned %d and added %d devices.\n", i_scanned, i_passed); 10758 10759 autorun_devices(part); 10760 } 10761 10762 #endif /* !MODULE */ 10763 10764 static __exit void md_exit(void) 10765 { 10766 struct mddev *mddev; 10767 int delay = 1; 10768 10769 unregister_blkdev(MD_MAJOR,"md"); 10770 unregister_blkdev(mdp_major, "mdp"); 10771 unregister_reboot_notifier(&md_notifier); 10772 unregister_sysctl_table(raid_table_header); 10773 10774 /* We cannot unload the modules while some process is 10775 * waiting for us in select() or poll() - wake them up 10776 */ 10777 md_unloading = 1; 10778 while (waitqueue_active(&md_event_waiters)) { 10779 /* not safe to leave yet */ 10780 wake_up(&md_event_waiters); 10781 msleep(delay); 10782 delay += delay; 10783 } 10784 remove_proc_entry("mdstat", NULL); 10785 10786 spin_lock(&all_mddevs_lock); 10787 list_for_each_entry(mddev, &all_mddevs, all_mddevs) { 10788 if (!mddev_get(mddev)) 10789 continue; 10790 spin_unlock(&all_mddevs_lock); 10791 export_array(mddev); 10792 mddev->ctime = 0; 10793 mddev->hold_active = 0; 10794 /* 10795 * As the mddev is now fully clear, mddev_put will schedule 10796 * the mddev for destruction by a workqueue, and the 10797 * destroy_workqueue() below will wait for that to complete. 10798 */ 10799 spin_lock(&all_mddevs_lock); 10800 mddev_put_locked(mddev); 10801 } 10802 spin_unlock(&all_mddevs_lock); 10803 10804 destroy_workqueue(md_misc_wq); 10805 destroy_workqueue(md_wq); 10806 md_bitmap_exit(); 10807 } 10808 10809 subsys_initcall(md_init); 10810 module_exit(md_exit) 10811 10812 static int get_ro(char *buffer, const struct kernel_param *kp) 10813 { 10814 return sprintf(buffer, "%d\n", start_readonly); 10815 } 10816 static int set_ro(const char *val, const struct kernel_param *kp) 10817 { 10818 return kstrtouint(val, 10, (unsigned int *)&start_readonly); 10819 } 10820 10821 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR); 10822 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR); 10823 module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR); 10824 module_param(create_on_open, bool, S_IRUSR|S_IWUSR); 10825 module_param(legacy_async_del_gendisk, bool, 0600); 10826 module_param(check_new_feature, bool, 0600); 10827 10828 MODULE_LICENSE("GPL"); 10829 MODULE_DESCRIPTION("MD RAID framework"); 10830 MODULE_ALIAS("md"); 10831 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR); 10832