1 /* 2 md.c : Multiple Devices driver for Linux 3 Copyright (C) 1998, 1999, 2000 Ingo Molnar 4 5 completely rewritten, based on the MD driver code from Marc Zyngier 6 7 Changes: 8 9 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar 10 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com> 11 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net> 12 - kerneld support by Boris Tobotras <boris@xtalk.msk.su> 13 - kmod support by: Cyrus Durgin 14 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com> 15 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au> 16 17 - lots of fixes and improvements to the RAID1/RAID5 and generic 18 RAID code (such as request based resynchronization): 19 20 Neil Brown <neilb@cse.unsw.edu.au>. 21 22 - persistent bitmap code 23 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc. 24 25 This program is free software; you can redistribute it and/or modify 26 it under the terms of the GNU General Public License as published by 27 the Free Software Foundation; either version 2, or (at your option) 28 any later version. 29 30 You should have received a copy of the GNU General Public License 31 (for example /usr/src/linux/COPYING); if not, write to the Free 32 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 33 */ 34 35 #include <linux/kthread.h> 36 #include <linux/blkdev.h> 37 #include <linux/sysctl.h> 38 #include <linux/seq_file.h> 39 #include <linux/buffer_head.h> /* for invalidate_bdev */ 40 #include <linux/poll.h> 41 #include <linux/ctype.h> 42 #include <linux/hdreg.h> 43 #include <linux/proc_fs.h> 44 #include <linux/random.h> 45 #include <linux/reboot.h> 46 #include <linux/file.h> 47 #include <linux/delay.h> 48 #include <linux/raid/md_p.h> 49 #include <linux/raid/md_u.h> 50 #include "md.h" 51 #include "bitmap.h" 52 53 #define DEBUG 0 54 #define dprintk(x...) ((void)(DEBUG && printk(x))) 55 56 57 #ifndef MODULE 58 static void autostart_arrays(int part); 59 #endif 60 61 static LIST_HEAD(pers_list); 62 static DEFINE_SPINLOCK(pers_lock); 63 64 static void md_print_devices(void); 65 66 static DECLARE_WAIT_QUEUE_HEAD(resync_wait); 67 68 #define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); } 69 70 /* 71 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit' 72 * is 1000 KB/sec, so the extra system load does not show up that much. 73 * Increase it if you want to have more _guaranteed_ speed. Note that 74 * the RAID driver will use the maximum available bandwidth if the IO 75 * subsystem is idle. There is also an 'absolute maximum' reconstruction 76 * speed limit - in case reconstruction slows down your system despite 77 * idle IO detection. 78 * 79 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max. 80 * or /sys/block/mdX/md/sync_speed_{min,max} 81 */ 82 83 static int sysctl_speed_limit_min = 1000; 84 static int sysctl_speed_limit_max = 200000; 85 static inline int speed_min(mddev_t *mddev) 86 { 87 return mddev->sync_speed_min ? 88 mddev->sync_speed_min : sysctl_speed_limit_min; 89 } 90 91 static inline int speed_max(mddev_t *mddev) 92 { 93 return mddev->sync_speed_max ? 94 mddev->sync_speed_max : sysctl_speed_limit_max; 95 } 96 97 static struct ctl_table_header *raid_table_header; 98 99 static ctl_table raid_table[] = { 100 { 101 .ctl_name = DEV_RAID_SPEED_LIMIT_MIN, 102 .procname = "speed_limit_min", 103 .data = &sysctl_speed_limit_min, 104 .maxlen = sizeof(int), 105 .mode = S_IRUGO|S_IWUSR, 106 .proc_handler = &proc_dointvec, 107 }, 108 { 109 .ctl_name = DEV_RAID_SPEED_LIMIT_MAX, 110 .procname = "speed_limit_max", 111 .data = &sysctl_speed_limit_max, 112 .maxlen = sizeof(int), 113 .mode = S_IRUGO|S_IWUSR, 114 .proc_handler = &proc_dointvec, 115 }, 116 { .ctl_name = 0 } 117 }; 118 119 static ctl_table raid_dir_table[] = { 120 { 121 .ctl_name = DEV_RAID, 122 .procname = "raid", 123 .maxlen = 0, 124 .mode = S_IRUGO|S_IXUGO, 125 .child = raid_table, 126 }, 127 { .ctl_name = 0 } 128 }; 129 130 static ctl_table raid_root_table[] = { 131 { 132 .ctl_name = CTL_DEV, 133 .procname = "dev", 134 .maxlen = 0, 135 .mode = 0555, 136 .child = raid_dir_table, 137 }, 138 { .ctl_name = 0 } 139 }; 140 141 static struct block_device_operations md_fops; 142 143 static int start_readonly; 144 145 /* 146 * We have a system wide 'event count' that is incremented 147 * on any 'interesting' event, and readers of /proc/mdstat 148 * can use 'poll' or 'select' to find out when the event 149 * count increases. 150 * 151 * Events are: 152 * start array, stop array, error, add device, remove device, 153 * start build, activate spare 154 */ 155 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters); 156 static atomic_t md_event_count; 157 void md_new_event(mddev_t *mddev) 158 { 159 atomic_inc(&md_event_count); 160 wake_up(&md_event_waiters); 161 } 162 EXPORT_SYMBOL_GPL(md_new_event); 163 164 /* Alternate version that can be called from interrupts 165 * when calling sysfs_notify isn't needed. 166 */ 167 static void md_new_event_inintr(mddev_t *mddev) 168 { 169 atomic_inc(&md_event_count); 170 wake_up(&md_event_waiters); 171 } 172 173 /* 174 * Enables to iterate over all existing md arrays 175 * all_mddevs_lock protects this list. 176 */ 177 static LIST_HEAD(all_mddevs); 178 static DEFINE_SPINLOCK(all_mddevs_lock); 179 180 181 /* 182 * iterates through all used mddevs in the system. 183 * We take care to grab the all_mddevs_lock whenever navigating 184 * the list, and to always hold a refcount when unlocked. 185 * Any code which breaks out of this loop while own 186 * a reference to the current mddev and must mddev_put it. 187 */ 188 #define for_each_mddev(mddev,tmp) \ 189 \ 190 for (({ spin_lock(&all_mddevs_lock); \ 191 tmp = all_mddevs.next; \ 192 mddev = NULL;}); \ 193 ({ if (tmp != &all_mddevs) \ 194 mddev_get(list_entry(tmp, mddev_t, all_mddevs));\ 195 spin_unlock(&all_mddevs_lock); \ 196 if (mddev) mddev_put(mddev); \ 197 mddev = list_entry(tmp, mddev_t, all_mddevs); \ 198 tmp != &all_mddevs;}); \ 199 ({ spin_lock(&all_mddevs_lock); \ 200 tmp = tmp->next;}) \ 201 ) 202 203 204 /* Rather than calling directly into the personality make_request function, 205 * IO requests come here first so that we can check if the device is 206 * being suspended pending a reconfiguration. 207 * We hold a refcount over the call to ->make_request. By the time that 208 * call has finished, the bio has been linked into some internal structure 209 * and so is visible to ->quiesce(), so we don't need the refcount any more. 210 */ 211 static int md_make_request(struct request_queue *q, struct bio *bio) 212 { 213 mddev_t *mddev = q->queuedata; 214 int rv; 215 if (mddev == NULL || mddev->pers == NULL) { 216 bio_io_error(bio); 217 return 0; 218 } 219 rcu_read_lock(); 220 if (mddev->suspended) { 221 DEFINE_WAIT(__wait); 222 for (;;) { 223 prepare_to_wait(&mddev->sb_wait, &__wait, 224 TASK_UNINTERRUPTIBLE); 225 if (!mddev->suspended) 226 break; 227 rcu_read_unlock(); 228 schedule(); 229 rcu_read_lock(); 230 } 231 finish_wait(&mddev->sb_wait, &__wait); 232 } 233 atomic_inc(&mddev->active_io); 234 rcu_read_unlock(); 235 rv = mddev->pers->make_request(q, bio); 236 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) 237 wake_up(&mddev->sb_wait); 238 239 return rv; 240 } 241 242 static void mddev_suspend(mddev_t *mddev) 243 { 244 BUG_ON(mddev->suspended); 245 mddev->suspended = 1; 246 synchronize_rcu(); 247 wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0); 248 mddev->pers->quiesce(mddev, 1); 249 md_unregister_thread(mddev->thread); 250 mddev->thread = NULL; 251 /* we now know that no code is executing in the personality module, 252 * except possibly the tail end of a ->bi_end_io function, but that 253 * is certain to complete before the module has a chance to get 254 * unloaded 255 */ 256 } 257 258 static void mddev_resume(mddev_t *mddev) 259 { 260 mddev->suspended = 0; 261 wake_up(&mddev->sb_wait); 262 mddev->pers->quiesce(mddev, 0); 263 } 264 265 266 static inline mddev_t *mddev_get(mddev_t *mddev) 267 { 268 atomic_inc(&mddev->active); 269 return mddev; 270 } 271 272 static void mddev_delayed_delete(struct work_struct *ws); 273 274 static void mddev_put(mddev_t *mddev) 275 { 276 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock)) 277 return; 278 if (!mddev->raid_disks && list_empty(&mddev->disks) && 279 !mddev->hold_active) { 280 list_del(&mddev->all_mddevs); 281 if (mddev->gendisk) { 282 /* we did a probe so need to clean up. 283 * Call schedule_work inside the spinlock 284 * so that flush_scheduled_work() after 285 * mddev_find will succeed in waiting for the 286 * work to be done. 287 */ 288 INIT_WORK(&mddev->del_work, mddev_delayed_delete); 289 schedule_work(&mddev->del_work); 290 } else 291 kfree(mddev); 292 } 293 spin_unlock(&all_mddevs_lock); 294 } 295 296 static mddev_t * mddev_find(dev_t unit) 297 { 298 mddev_t *mddev, *new = NULL; 299 300 retry: 301 spin_lock(&all_mddevs_lock); 302 303 if (unit) { 304 list_for_each_entry(mddev, &all_mddevs, all_mddevs) 305 if (mddev->unit == unit) { 306 mddev_get(mddev); 307 spin_unlock(&all_mddevs_lock); 308 kfree(new); 309 return mddev; 310 } 311 312 if (new) { 313 list_add(&new->all_mddevs, &all_mddevs); 314 spin_unlock(&all_mddevs_lock); 315 new->hold_active = UNTIL_IOCTL; 316 return new; 317 } 318 } else if (new) { 319 /* find an unused unit number */ 320 static int next_minor = 512; 321 int start = next_minor; 322 int is_free = 0; 323 int dev = 0; 324 while (!is_free) { 325 dev = MKDEV(MD_MAJOR, next_minor); 326 next_minor++; 327 if (next_minor > MINORMASK) 328 next_minor = 0; 329 if (next_minor == start) { 330 /* Oh dear, all in use. */ 331 spin_unlock(&all_mddevs_lock); 332 kfree(new); 333 return NULL; 334 } 335 336 is_free = 1; 337 list_for_each_entry(mddev, &all_mddevs, all_mddevs) 338 if (mddev->unit == dev) { 339 is_free = 0; 340 break; 341 } 342 } 343 new->unit = dev; 344 new->md_minor = MINOR(dev); 345 new->hold_active = UNTIL_STOP; 346 list_add(&new->all_mddevs, &all_mddevs); 347 spin_unlock(&all_mddevs_lock); 348 return new; 349 } 350 spin_unlock(&all_mddevs_lock); 351 352 new = kzalloc(sizeof(*new), GFP_KERNEL); 353 if (!new) 354 return NULL; 355 356 new->unit = unit; 357 if (MAJOR(unit) == MD_MAJOR) 358 new->md_minor = MINOR(unit); 359 else 360 new->md_minor = MINOR(unit) >> MdpMinorShift; 361 362 mutex_init(&new->open_mutex); 363 mutex_init(&new->reconfig_mutex); 364 INIT_LIST_HEAD(&new->disks); 365 INIT_LIST_HEAD(&new->all_mddevs); 366 init_timer(&new->safemode_timer); 367 atomic_set(&new->active, 1); 368 atomic_set(&new->openers, 0); 369 atomic_set(&new->active_io, 0); 370 spin_lock_init(&new->write_lock); 371 init_waitqueue_head(&new->sb_wait); 372 init_waitqueue_head(&new->recovery_wait); 373 new->reshape_position = MaxSector; 374 new->resync_min = 0; 375 new->resync_max = MaxSector; 376 new->level = LEVEL_NONE; 377 378 goto retry; 379 } 380 381 static inline int mddev_lock(mddev_t * mddev) 382 { 383 return mutex_lock_interruptible(&mddev->reconfig_mutex); 384 } 385 386 static inline int mddev_is_locked(mddev_t *mddev) 387 { 388 return mutex_is_locked(&mddev->reconfig_mutex); 389 } 390 391 static inline int mddev_trylock(mddev_t * mddev) 392 { 393 return mutex_trylock(&mddev->reconfig_mutex); 394 } 395 396 static inline void mddev_unlock(mddev_t * mddev) 397 { 398 mutex_unlock(&mddev->reconfig_mutex); 399 400 md_wakeup_thread(mddev->thread); 401 } 402 403 static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr) 404 { 405 mdk_rdev_t *rdev; 406 407 list_for_each_entry(rdev, &mddev->disks, same_set) 408 if (rdev->desc_nr == nr) 409 return rdev; 410 411 return NULL; 412 } 413 414 static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev) 415 { 416 mdk_rdev_t *rdev; 417 418 list_for_each_entry(rdev, &mddev->disks, same_set) 419 if (rdev->bdev->bd_dev == dev) 420 return rdev; 421 422 return NULL; 423 } 424 425 static struct mdk_personality *find_pers(int level, char *clevel) 426 { 427 struct mdk_personality *pers; 428 list_for_each_entry(pers, &pers_list, list) { 429 if (level != LEVEL_NONE && pers->level == level) 430 return pers; 431 if (strcmp(pers->name, clevel)==0) 432 return pers; 433 } 434 return NULL; 435 } 436 437 /* return the offset of the super block in 512byte sectors */ 438 static inline sector_t calc_dev_sboffset(struct block_device *bdev) 439 { 440 sector_t num_sectors = bdev->bd_inode->i_size / 512; 441 return MD_NEW_SIZE_SECTORS(num_sectors); 442 } 443 444 static int alloc_disk_sb(mdk_rdev_t * rdev) 445 { 446 if (rdev->sb_page) 447 MD_BUG(); 448 449 rdev->sb_page = alloc_page(GFP_KERNEL); 450 if (!rdev->sb_page) { 451 printk(KERN_ALERT "md: out of memory.\n"); 452 return -ENOMEM; 453 } 454 455 return 0; 456 } 457 458 static void free_disk_sb(mdk_rdev_t * rdev) 459 { 460 if (rdev->sb_page) { 461 put_page(rdev->sb_page); 462 rdev->sb_loaded = 0; 463 rdev->sb_page = NULL; 464 rdev->sb_start = 0; 465 rdev->sectors = 0; 466 } 467 } 468 469 470 static void super_written(struct bio *bio, int error) 471 { 472 mdk_rdev_t *rdev = bio->bi_private; 473 mddev_t *mddev = rdev->mddev; 474 475 if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) { 476 printk("md: super_written gets error=%d, uptodate=%d\n", 477 error, test_bit(BIO_UPTODATE, &bio->bi_flags)); 478 WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags)); 479 md_error(mddev, rdev); 480 } 481 482 if (atomic_dec_and_test(&mddev->pending_writes)) 483 wake_up(&mddev->sb_wait); 484 bio_put(bio); 485 } 486 487 static void super_written_barrier(struct bio *bio, int error) 488 { 489 struct bio *bio2 = bio->bi_private; 490 mdk_rdev_t *rdev = bio2->bi_private; 491 mddev_t *mddev = rdev->mddev; 492 493 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && 494 error == -EOPNOTSUPP) { 495 unsigned long flags; 496 /* barriers don't appear to be supported :-( */ 497 set_bit(BarriersNotsupp, &rdev->flags); 498 mddev->barriers_work = 0; 499 spin_lock_irqsave(&mddev->write_lock, flags); 500 bio2->bi_next = mddev->biolist; 501 mddev->biolist = bio2; 502 spin_unlock_irqrestore(&mddev->write_lock, flags); 503 wake_up(&mddev->sb_wait); 504 bio_put(bio); 505 } else { 506 bio_put(bio2); 507 bio->bi_private = rdev; 508 super_written(bio, error); 509 } 510 } 511 512 void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, 513 sector_t sector, int size, struct page *page) 514 { 515 /* write first size bytes of page to sector of rdev 516 * Increment mddev->pending_writes before returning 517 * and decrement it on completion, waking up sb_wait 518 * if zero is reached. 519 * If an error occurred, call md_error 520 * 521 * As we might need to resubmit the request if BIO_RW_BARRIER 522 * causes ENOTSUPP, we allocate a spare bio... 523 */ 524 struct bio *bio = bio_alloc(GFP_NOIO, 1); 525 int rw = (1<<BIO_RW) | (1<<BIO_RW_SYNCIO) | (1<<BIO_RW_UNPLUG); 526 527 bio->bi_bdev = rdev->bdev; 528 bio->bi_sector = sector; 529 bio_add_page(bio, page, size, 0); 530 bio->bi_private = rdev; 531 bio->bi_end_io = super_written; 532 bio->bi_rw = rw; 533 534 atomic_inc(&mddev->pending_writes); 535 if (!test_bit(BarriersNotsupp, &rdev->flags)) { 536 struct bio *rbio; 537 rw |= (1<<BIO_RW_BARRIER); 538 rbio = bio_clone(bio, GFP_NOIO); 539 rbio->bi_private = bio; 540 rbio->bi_end_io = super_written_barrier; 541 submit_bio(rw, rbio); 542 } else 543 submit_bio(rw, bio); 544 } 545 546 void md_super_wait(mddev_t *mddev) 547 { 548 /* wait for all superblock writes that were scheduled to complete. 549 * if any had to be retried (due to BARRIER problems), retry them 550 */ 551 DEFINE_WAIT(wq); 552 for(;;) { 553 prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE); 554 if (atomic_read(&mddev->pending_writes)==0) 555 break; 556 while (mddev->biolist) { 557 struct bio *bio; 558 spin_lock_irq(&mddev->write_lock); 559 bio = mddev->biolist; 560 mddev->biolist = bio->bi_next ; 561 bio->bi_next = NULL; 562 spin_unlock_irq(&mddev->write_lock); 563 submit_bio(bio->bi_rw, bio); 564 } 565 schedule(); 566 } 567 finish_wait(&mddev->sb_wait, &wq); 568 } 569 570 static void bi_complete(struct bio *bio, int error) 571 { 572 complete((struct completion*)bio->bi_private); 573 } 574 575 int sync_page_io(struct block_device *bdev, sector_t sector, int size, 576 struct page *page, int rw) 577 { 578 struct bio *bio = bio_alloc(GFP_NOIO, 1); 579 struct completion event; 580 int ret; 581 582 rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG); 583 584 bio->bi_bdev = bdev; 585 bio->bi_sector = sector; 586 bio_add_page(bio, page, size, 0); 587 init_completion(&event); 588 bio->bi_private = &event; 589 bio->bi_end_io = bi_complete; 590 submit_bio(rw, bio); 591 wait_for_completion(&event); 592 593 ret = test_bit(BIO_UPTODATE, &bio->bi_flags); 594 bio_put(bio); 595 return ret; 596 } 597 EXPORT_SYMBOL_GPL(sync_page_io); 598 599 static int read_disk_sb(mdk_rdev_t * rdev, int size) 600 { 601 char b[BDEVNAME_SIZE]; 602 if (!rdev->sb_page) { 603 MD_BUG(); 604 return -EINVAL; 605 } 606 if (rdev->sb_loaded) 607 return 0; 608 609 610 if (!sync_page_io(rdev->bdev, rdev->sb_start, size, rdev->sb_page, READ)) 611 goto fail; 612 rdev->sb_loaded = 1; 613 return 0; 614 615 fail: 616 printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n", 617 bdevname(rdev->bdev,b)); 618 return -EINVAL; 619 } 620 621 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2) 622 { 623 return sb1->set_uuid0 == sb2->set_uuid0 && 624 sb1->set_uuid1 == sb2->set_uuid1 && 625 sb1->set_uuid2 == sb2->set_uuid2 && 626 sb1->set_uuid3 == sb2->set_uuid3; 627 } 628 629 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2) 630 { 631 int ret; 632 mdp_super_t *tmp1, *tmp2; 633 634 tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL); 635 tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL); 636 637 if (!tmp1 || !tmp2) { 638 ret = 0; 639 printk(KERN_INFO "md.c sb_equal(): failed to allocate memory!\n"); 640 goto abort; 641 } 642 643 *tmp1 = *sb1; 644 *tmp2 = *sb2; 645 646 /* 647 * nr_disks is not constant 648 */ 649 tmp1->nr_disks = 0; 650 tmp2->nr_disks = 0; 651 652 ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0); 653 abort: 654 kfree(tmp1); 655 kfree(tmp2); 656 return ret; 657 } 658 659 660 static u32 md_csum_fold(u32 csum) 661 { 662 csum = (csum & 0xffff) + (csum >> 16); 663 return (csum & 0xffff) + (csum >> 16); 664 } 665 666 static unsigned int calc_sb_csum(mdp_super_t * sb) 667 { 668 u64 newcsum = 0; 669 u32 *sb32 = (u32*)sb; 670 int i; 671 unsigned int disk_csum, csum; 672 673 disk_csum = sb->sb_csum; 674 sb->sb_csum = 0; 675 676 for (i = 0; i < MD_SB_BYTES/4 ; i++) 677 newcsum += sb32[i]; 678 csum = (newcsum & 0xffffffff) + (newcsum>>32); 679 680 681 #ifdef CONFIG_ALPHA 682 /* This used to use csum_partial, which was wrong for several 683 * reasons including that different results are returned on 684 * different architectures. It isn't critical that we get exactly 685 * the same return value as before (we always csum_fold before 686 * testing, and that removes any differences). However as we 687 * know that csum_partial always returned a 16bit value on 688 * alphas, do a fold to maximise conformity to previous behaviour. 689 */ 690 sb->sb_csum = md_csum_fold(disk_csum); 691 #else 692 sb->sb_csum = disk_csum; 693 #endif 694 return csum; 695 } 696 697 698 /* 699 * Handle superblock details. 700 * We want to be able to handle multiple superblock formats 701 * so we have a common interface to them all, and an array of 702 * different handlers. 703 * We rely on user-space to write the initial superblock, and support 704 * reading and updating of superblocks. 705 * Interface methods are: 706 * int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version) 707 * loads and validates a superblock on dev. 708 * if refdev != NULL, compare superblocks on both devices 709 * Return: 710 * 0 - dev has a superblock that is compatible with refdev 711 * 1 - dev has a superblock that is compatible and newer than refdev 712 * so dev should be used as the refdev in future 713 * -EINVAL superblock incompatible or invalid 714 * -othererror e.g. -EIO 715 * 716 * int validate_super(mddev_t *mddev, mdk_rdev_t *dev) 717 * Verify that dev is acceptable into mddev. 718 * The first time, mddev->raid_disks will be 0, and data from 719 * dev should be merged in. Subsequent calls check that dev 720 * is new enough. Return 0 or -EINVAL 721 * 722 * void sync_super(mddev_t *mddev, mdk_rdev_t *dev) 723 * Update the superblock for rdev with data in mddev 724 * This does not write to disc. 725 * 726 */ 727 728 struct super_type { 729 char *name; 730 struct module *owner; 731 int (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev, 732 int minor_version); 733 int (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev); 734 void (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev); 735 unsigned long long (*rdev_size_change)(mdk_rdev_t *rdev, 736 sector_t num_sectors); 737 }; 738 739 /* 740 * Check that the given mddev has no bitmap. 741 * 742 * This function is called from the run method of all personalities that do not 743 * support bitmaps. It prints an error message and returns non-zero if mddev 744 * has a bitmap. Otherwise, it returns 0. 745 * 746 */ 747 int md_check_no_bitmap(mddev_t *mddev) 748 { 749 if (!mddev->bitmap_file && !mddev->bitmap_offset) 750 return 0; 751 printk(KERN_ERR "%s: bitmaps are not supported for %s\n", 752 mdname(mddev), mddev->pers->name); 753 return 1; 754 } 755 EXPORT_SYMBOL(md_check_no_bitmap); 756 757 /* 758 * load_super for 0.90.0 759 */ 760 static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) 761 { 762 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 763 mdp_super_t *sb; 764 int ret; 765 766 /* 767 * Calculate the position of the superblock (512byte sectors), 768 * it's at the end of the disk. 769 * 770 * It also happens to be a multiple of 4Kb. 771 */ 772 rdev->sb_start = calc_dev_sboffset(rdev->bdev); 773 774 ret = read_disk_sb(rdev, MD_SB_BYTES); 775 if (ret) return ret; 776 777 ret = -EINVAL; 778 779 bdevname(rdev->bdev, b); 780 sb = (mdp_super_t*)page_address(rdev->sb_page); 781 782 if (sb->md_magic != MD_SB_MAGIC) { 783 printk(KERN_ERR "md: invalid raid superblock magic on %s\n", 784 b); 785 goto abort; 786 } 787 788 if (sb->major_version != 0 || 789 sb->minor_version < 90 || 790 sb->minor_version > 91) { 791 printk(KERN_WARNING "Bad version number %d.%d on %s\n", 792 sb->major_version, sb->minor_version, 793 b); 794 goto abort; 795 } 796 797 if (sb->raid_disks <= 0) 798 goto abort; 799 800 if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) { 801 printk(KERN_WARNING "md: invalid superblock checksum on %s\n", 802 b); 803 goto abort; 804 } 805 806 rdev->preferred_minor = sb->md_minor; 807 rdev->data_offset = 0; 808 rdev->sb_size = MD_SB_BYTES; 809 810 if (sb->level == LEVEL_MULTIPATH) 811 rdev->desc_nr = -1; 812 else 813 rdev->desc_nr = sb->this_disk.number; 814 815 if (!refdev) { 816 ret = 1; 817 } else { 818 __u64 ev1, ev2; 819 mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page); 820 if (!uuid_equal(refsb, sb)) { 821 printk(KERN_WARNING "md: %s has different UUID to %s\n", 822 b, bdevname(refdev->bdev,b2)); 823 goto abort; 824 } 825 if (!sb_equal(refsb, sb)) { 826 printk(KERN_WARNING "md: %s has same UUID" 827 " but different superblock to %s\n", 828 b, bdevname(refdev->bdev, b2)); 829 goto abort; 830 } 831 ev1 = md_event(sb); 832 ev2 = md_event(refsb); 833 if (ev1 > ev2) 834 ret = 1; 835 else 836 ret = 0; 837 } 838 rdev->sectors = rdev->sb_start; 839 840 if (rdev->sectors < sb->size * 2 && sb->level > 1) 841 /* "this cannot possibly happen" ... */ 842 ret = -EINVAL; 843 844 abort: 845 return ret; 846 } 847 848 /* 849 * validate_super for 0.90.0 850 */ 851 static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev) 852 { 853 mdp_disk_t *desc; 854 mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page); 855 __u64 ev1 = md_event(sb); 856 857 rdev->raid_disk = -1; 858 clear_bit(Faulty, &rdev->flags); 859 clear_bit(In_sync, &rdev->flags); 860 clear_bit(WriteMostly, &rdev->flags); 861 clear_bit(BarriersNotsupp, &rdev->flags); 862 863 if (mddev->raid_disks == 0) { 864 mddev->major_version = 0; 865 mddev->minor_version = sb->minor_version; 866 mddev->patch_version = sb->patch_version; 867 mddev->external = 0; 868 mddev->chunk_sectors = sb->chunk_size >> 9; 869 mddev->ctime = sb->ctime; 870 mddev->utime = sb->utime; 871 mddev->level = sb->level; 872 mddev->clevel[0] = 0; 873 mddev->layout = sb->layout; 874 mddev->raid_disks = sb->raid_disks; 875 mddev->dev_sectors = sb->size * 2; 876 mddev->events = ev1; 877 mddev->bitmap_offset = 0; 878 mddev->default_bitmap_offset = MD_SB_BYTES >> 9; 879 880 if (mddev->minor_version >= 91) { 881 mddev->reshape_position = sb->reshape_position; 882 mddev->delta_disks = sb->delta_disks; 883 mddev->new_level = sb->new_level; 884 mddev->new_layout = sb->new_layout; 885 mddev->new_chunk_sectors = sb->new_chunk >> 9; 886 } else { 887 mddev->reshape_position = MaxSector; 888 mddev->delta_disks = 0; 889 mddev->new_level = mddev->level; 890 mddev->new_layout = mddev->layout; 891 mddev->new_chunk_sectors = mddev->chunk_sectors; 892 } 893 894 if (sb->state & (1<<MD_SB_CLEAN)) 895 mddev->recovery_cp = MaxSector; 896 else { 897 if (sb->events_hi == sb->cp_events_hi && 898 sb->events_lo == sb->cp_events_lo) { 899 mddev->recovery_cp = sb->recovery_cp; 900 } else 901 mddev->recovery_cp = 0; 902 } 903 904 memcpy(mddev->uuid+0, &sb->set_uuid0, 4); 905 memcpy(mddev->uuid+4, &sb->set_uuid1, 4); 906 memcpy(mddev->uuid+8, &sb->set_uuid2, 4); 907 memcpy(mddev->uuid+12,&sb->set_uuid3, 4); 908 909 mddev->max_disks = MD_SB_DISKS; 910 911 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) && 912 mddev->bitmap_file == NULL) 913 mddev->bitmap_offset = mddev->default_bitmap_offset; 914 915 } else if (mddev->pers == NULL) { 916 /* Insist on good event counter while assembling */ 917 ++ev1; 918 if (ev1 < mddev->events) 919 return -EINVAL; 920 } else if (mddev->bitmap) { 921 /* if adding to array with a bitmap, then we can accept an 922 * older device ... but not too old. 923 */ 924 if (ev1 < mddev->bitmap->events_cleared) 925 return 0; 926 } else { 927 if (ev1 < mddev->events) 928 /* just a hot-add of a new device, leave raid_disk at -1 */ 929 return 0; 930 } 931 932 if (mddev->level != LEVEL_MULTIPATH) { 933 desc = sb->disks + rdev->desc_nr; 934 935 if (desc->state & (1<<MD_DISK_FAULTY)) 936 set_bit(Faulty, &rdev->flags); 937 else if (desc->state & (1<<MD_DISK_SYNC) /* && 938 desc->raid_disk < mddev->raid_disks */) { 939 set_bit(In_sync, &rdev->flags); 940 rdev->raid_disk = desc->raid_disk; 941 } 942 if (desc->state & (1<<MD_DISK_WRITEMOSTLY)) 943 set_bit(WriteMostly, &rdev->flags); 944 } else /* MULTIPATH are always insync */ 945 set_bit(In_sync, &rdev->flags); 946 return 0; 947 } 948 949 /* 950 * sync_super for 0.90.0 951 */ 952 static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev) 953 { 954 mdp_super_t *sb; 955 mdk_rdev_t *rdev2; 956 int next_spare = mddev->raid_disks; 957 958 959 /* make rdev->sb match mddev data.. 960 * 961 * 1/ zero out disks 962 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare); 963 * 3/ any empty disks < next_spare become removed 964 * 965 * disks[0] gets initialised to REMOVED because 966 * we cannot be sure from other fields if it has 967 * been initialised or not. 968 */ 969 int i; 970 int active=0, working=0,failed=0,spare=0,nr_disks=0; 971 972 rdev->sb_size = MD_SB_BYTES; 973 974 sb = (mdp_super_t*)page_address(rdev->sb_page); 975 976 memset(sb, 0, sizeof(*sb)); 977 978 sb->md_magic = MD_SB_MAGIC; 979 sb->major_version = mddev->major_version; 980 sb->patch_version = mddev->patch_version; 981 sb->gvalid_words = 0; /* ignored */ 982 memcpy(&sb->set_uuid0, mddev->uuid+0, 4); 983 memcpy(&sb->set_uuid1, mddev->uuid+4, 4); 984 memcpy(&sb->set_uuid2, mddev->uuid+8, 4); 985 memcpy(&sb->set_uuid3, mddev->uuid+12,4); 986 987 sb->ctime = mddev->ctime; 988 sb->level = mddev->level; 989 sb->size = mddev->dev_sectors / 2; 990 sb->raid_disks = mddev->raid_disks; 991 sb->md_minor = mddev->md_minor; 992 sb->not_persistent = 0; 993 sb->utime = mddev->utime; 994 sb->state = 0; 995 sb->events_hi = (mddev->events>>32); 996 sb->events_lo = (u32)mddev->events; 997 998 if (mddev->reshape_position == MaxSector) 999 sb->minor_version = 90; 1000 else { 1001 sb->minor_version = 91; 1002 sb->reshape_position = mddev->reshape_position; 1003 sb->new_level = mddev->new_level; 1004 sb->delta_disks = mddev->delta_disks; 1005 sb->new_layout = mddev->new_layout; 1006 sb->new_chunk = mddev->new_chunk_sectors << 9; 1007 } 1008 mddev->minor_version = sb->minor_version; 1009 if (mddev->in_sync) 1010 { 1011 sb->recovery_cp = mddev->recovery_cp; 1012 sb->cp_events_hi = (mddev->events>>32); 1013 sb->cp_events_lo = (u32)mddev->events; 1014 if (mddev->recovery_cp == MaxSector) 1015 sb->state = (1<< MD_SB_CLEAN); 1016 } else 1017 sb->recovery_cp = 0; 1018 1019 sb->layout = mddev->layout; 1020 sb->chunk_size = mddev->chunk_sectors << 9; 1021 1022 if (mddev->bitmap && mddev->bitmap_file == NULL) 1023 sb->state |= (1<<MD_SB_BITMAP_PRESENT); 1024 1025 sb->disks[0].state = (1<<MD_DISK_REMOVED); 1026 list_for_each_entry(rdev2, &mddev->disks, same_set) { 1027 mdp_disk_t *d; 1028 int desc_nr; 1029 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags) 1030 && !test_bit(Faulty, &rdev2->flags)) 1031 desc_nr = rdev2->raid_disk; 1032 else 1033 desc_nr = next_spare++; 1034 rdev2->desc_nr = desc_nr; 1035 d = &sb->disks[rdev2->desc_nr]; 1036 nr_disks++; 1037 d->number = rdev2->desc_nr; 1038 d->major = MAJOR(rdev2->bdev->bd_dev); 1039 d->minor = MINOR(rdev2->bdev->bd_dev); 1040 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags) 1041 && !test_bit(Faulty, &rdev2->flags)) 1042 d->raid_disk = rdev2->raid_disk; 1043 else 1044 d->raid_disk = rdev2->desc_nr; /* compatibility */ 1045 if (test_bit(Faulty, &rdev2->flags)) 1046 d->state = (1<<MD_DISK_FAULTY); 1047 else if (test_bit(In_sync, &rdev2->flags)) { 1048 d->state = (1<<MD_DISK_ACTIVE); 1049 d->state |= (1<<MD_DISK_SYNC); 1050 active++; 1051 working++; 1052 } else { 1053 d->state = 0; 1054 spare++; 1055 working++; 1056 } 1057 if (test_bit(WriteMostly, &rdev2->flags)) 1058 d->state |= (1<<MD_DISK_WRITEMOSTLY); 1059 } 1060 /* now set the "removed" and "faulty" bits on any missing devices */ 1061 for (i=0 ; i < mddev->raid_disks ; i++) { 1062 mdp_disk_t *d = &sb->disks[i]; 1063 if (d->state == 0 && d->number == 0) { 1064 d->number = i; 1065 d->raid_disk = i; 1066 d->state = (1<<MD_DISK_REMOVED); 1067 d->state |= (1<<MD_DISK_FAULTY); 1068 failed++; 1069 } 1070 } 1071 sb->nr_disks = nr_disks; 1072 sb->active_disks = active; 1073 sb->working_disks = working; 1074 sb->failed_disks = failed; 1075 sb->spare_disks = spare; 1076 1077 sb->this_disk = sb->disks[rdev->desc_nr]; 1078 sb->sb_csum = calc_sb_csum(sb); 1079 } 1080 1081 /* 1082 * rdev_size_change for 0.90.0 1083 */ 1084 static unsigned long long 1085 super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors) 1086 { 1087 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) 1088 return 0; /* component must fit device */ 1089 if (rdev->mddev->bitmap_offset) 1090 return 0; /* can't move bitmap */ 1091 rdev->sb_start = calc_dev_sboffset(rdev->bdev); 1092 if (!num_sectors || num_sectors > rdev->sb_start) 1093 num_sectors = rdev->sb_start; 1094 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, 1095 rdev->sb_page); 1096 md_super_wait(rdev->mddev); 1097 return num_sectors / 2; /* kB for sysfs */ 1098 } 1099 1100 1101 /* 1102 * version 1 superblock 1103 */ 1104 1105 static __le32 calc_sb_1_csum(struct mdp_superblock_1 * sb) 1106 { 1107 __le32 disk_csum; 1108 u32 csum; 1109 unsigned long long newcsum; 1110 int size = 256 + le32_to_cpu(sb->max_dev)*2; 1111 __le32 *isuper = (__le32*)sb; 1112 int i; 1113 1114 disk_csum = sb->sb_csum; 1115 sb->sb_csum = 0; 1116 newcsum = 0; 1117 for (i=0; size>=4; size -= 4 ) 1118 newcsum += le32_to_cpu(*isuper++); 1119 1120 if (size == 2) 1121 newcsum += le16_to_cpu(*(__le16*) isuper); 1122 1123 csum = (newcsum & 0xffffffff) + (newcsum >> 32); 1124 sb->sb_csum = disk_csum; 1125 return cpu_to_le32(csum); 1126 } 1127 1128 static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) 1129 { 1130 struct mdp_superblock_1 *sb; 1131 int ret; 1132 sector_t sb_start; 1133 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 1134 int bmask; 1135 1136 /* 1137 * Calculate the position of the superblock in 512byte sectors. 1138 * It is always aligned to a 4K boundary and 1139 * depeding on minor_version, it can be: 1140 * 0: At least 8K, but less than 12K, from end of device 1141 * 1: At start of device 1142 * 2: 4K from start of device. 1143 */ 1144 switch(minor_version) { 1145 case 0: 1146 sb_start = rdev->bdev->bd_inode->i_size >> 9; 1147 sb_start -= 8*2; 1148 sb_start &= ~(sector_t)(4*2-1); 1149 break; 1150 case 1: 1151 sb_start = 0; 1152 break; 1153 case 2: 1154 sb_start = 8; 1155 break; 1156 default: 1157 return -EINVAL; 1158 } 1159 rdev->sb_start = sb_start; 1160 1161 /* superblock is rarely larger than 1K, but it can be larger, 1162 * and it is safe to read 4k, so we do that 1163 */ 1164 ret = read_disk_sb(rdev, 4096); 1165 if (ret) return ret; 1166 1167 1168 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page); 1169 1170 if (sb->magic != cpu_to_le32(MD_SB_MAGIC) || 1171 sb->major_version != cpu_to_le32(1) || 1172 le32_to_cpu(sb->max_dev) > (4096-256)/2 || 1173 le64_to_cpu(sb->super_offset) != rdev->sb_start || 1174 (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0) 1175 return -EINVAL; 1176 1177 if (calc_sb_1_csum(sb) != sb->sb_csum) { 1178 printk("md: invalid superblock checksum on %s\n", 1179 bdevname(rdev->bdev,b)); 1180 return -EINVAL; 1181 } 1182 if (le64_to_cpu(sb->data_size) < 10) { 1183 printk("md: data_size too small on %s\n", 1184 bdevname(rdev->bdev,b)); 1185 return -EINVAL; 1186 } 1187 1188 rdev->preferred_minor = 0xffff; 1189 rdev->data_offset = le64_to_cpu(sb->data_offset); 1190 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); 1191 1192 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; 1193 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; 1194 if (rdev->sb_size & bmask) 1195 rdev->sb_size = (rdev->sb_size | bmask) + 1; 1196 1197 if (minor_version 1198 && rdev->data_offset < sb_start + (rdev->sb_size/512)) 1199 return -EINVAL; 1200 1201 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH)) 1202 rdev->desc_nr = -1; 1203 else 1204 rdev->desc_nr = le32_to_cpu(sb->dev_number); 1205 1206 if (!refdev) { 1207 ret = 1; 1208 } else { 1209 __u64 ev1, ev2; 1210 struct mdp_superblock_1 *refsb = 1211 (struct mdp_superblock_1*)page_address(refdev->sb_page); 1212 1213 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 || 1214 sb->level != refsb->level || 1215 sb->layout != refsb->layout || 1216 sb->chunksize != refsb->chunksize) { 1217 printk(KERN_WARNING "md: %s has strangely different" 1218 " superblock to %s\n", 1219 bdevname(rdev->bdev,b), 1220 bdevname(refdev->bdev,b2)); 1221 return -EINVAL; 1222 } 1223 ev1 = le64_to_cpu(sb->events); 1224 ev2 = le64_to_cpu(refsb->events); 1225 1226 if (ev1 > ev2) 1227 ret = 1; 1228 else 1229 ret = 0; 1230 } 1231 if (minor_version) 1232 rdev->sectors = (rdev->bdev->bd_inode->i_size >> 9) - 1233 le64_to_cpu(sb->data_offset); 1234 else 1235 rdev->sectors = rdev->sb_start; 1236 if (rdev->sectors < le64_to_cpu(sb->data_size)) 1237 return -EINVAL; 1238 rdev->sectors = le64_to_cpu(sb->data_size); 1239 if (le64_to_cpu(sb->size) > rdev->sectors) 1240 return -EINVAL; 1241 return ret; 1242 } 1243 1244 static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev) 1245 { 1246 struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page); 1247 __u64 ev1 = le64_to_cpu(sb->events); 1248 1249 rdev->raid_disk = -1; 1250 clear_bit(Faulty, &rdev->flags); 1251 clear_bit(In_sync, &rdev->flags); 1252 clear_bit(WriteMostly, &rdev->flags); 1253 clear_bit(BarriersNotsupp, &rdev->flags); 1254 1255 if (mddev->raid_disks == 0) { 1256 mddev->major_version = 1; 1257 mddev->patch_version = 0; 1258 mddev->external = 0; 1259 mddev->chunk_sectors = le32_to_cpu(sb->chunksize); 1260 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1); 1261 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1); 1262 mddev->level = le32_to_cpu(sb->level); 1263 mddev->clevel[0] = 0; 1264 mddev->layout = le32_to_cpu(sb->layout); 1265 mddev->raid_disks = le32_to_cpu(sb->raid_disks); 1266 mddev->dev_sectors = le64_to_cpu(sb->size); 1267 mddev->events = ev1; 1268 mddev->bitmap_offset = 0; 1269 mddev->default_bitmap_offset = 1024 >> 9; 1270 1271 mddev->recovery_cp = le64_to_cpu(sb->resync_offset); 1272 memcpy(mddev->uuid, sb->set_uuid, 16); 1273 1274 mddev->max_disks = (4096-256)/2; 1275 1276 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) && 1277 mddev->bitmap_file == NULL ) 1278 mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset); 1279 1280 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) { 1281 mddev->reshape_position = le64_to_cpu(sb->reshape_position); 1282 mddev->delta_disks = le32_to_cpu(sb->delta_disks); 1283 mddev->new_level = le32_to_cpu(sb->new_level); 1284 mddev->new_layout = le32_to_cpu(sb->new_layout); 1285 mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk); 1286 } else { 1287 mddev->reshape_position = MaxSector; 1288 mddev->delta_disks = 0; 1289 mddev->new_level = mddev->level; 1290 mddev->new_layout = mddev->layout; 1291 mddev->new_chunk_sectors = mddev->chunk_sectors; 1292 } 1293 1294 } else if (mddev->pers == NULL) { 1295 /* Insist of good event counter while assembling */ 1296 ++ev1; 1297 if (ev1 < mddev->events) 1298 return -EINVAL; 1299 } else if (mddev->bitmap) { 1300 /* If adding to array with a bitmap, then we can accept an 1301 * older device, but not too old. 1302 */ 1303 if (ev1 < mddev->bitmap->events_cleared) 1304 return 0; 1305 } else { 1306 if (ev1 < mddev->events) 1307 /* just a hot-add of a new device, leave raid_disk at -1 */ 1308 return 0; 1309 } 1310 if (mddev->level != LEVEL_MULTIPATH) { 1311 int role; 1312 if (rdev->desc_nr < 0 || 1313 rdev->desc_nr >= le32_to_cpu(sb->max_dev)) { 1314 role = 0xffff; 1315 rdev->desc_nr = -1; 1316 } else 1317 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]); 1318 switch(role) { 1319 case 0xffff: /* spare */ 1320 break; 1321 case 0xfffe: /* faulty */ 1322 set_bit(Faulty, &rdev->flags); 1323 break; 1324 default: 1325 if ((le32_to_cpu(sb->feature_map) & 1326 MD_FEATURE_RECOVERY_OFFSET)) 1327 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset); 1328 else 1329 set_bit(In_sync, &rdev->flags); 1330 rdev->raid_disk = role; 1331 break; 1332 } 1333 if (sb->devflags & WriteMostly1) 1334 set_bit(WriteMostly, &rdev->flags); 1335 } else /* MULTIPATH are always insync */ 1336 set_bit(In_sync, &rdev->flags); 1337 1338 return 0; 1339 } 1340 1341 static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev) 1342 { 1343 struct mdp_superblock_1 *sb; 1344 mdk_rdev_t *rdev2; 1345 int max_dev, i; 1346 /* make rdev->sb match mddev and rdev data. */ 1347 1348 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page); 1349 1350 sb->feature_map = 0; 1351 sb->pad0 = 0; 1352 sb->recovery_offset = cpu_to_le64(0); 1353 memset(sb->pad1, 0, sizeof(sb->pad1)); 1354 memset(sb->pad2, 0, sizeof(sb->pad2)); 1355 memset(sb->pad3, 0, sizeof(sb->pad3)); 1356 1357 sb->utime = cpu_to_le64((__u64)mddev->utime); 1358 sb->events = cpu_to_le64(mddev->events); 1359 if (mddev->in_sync) 1360 sb->resync_offset = cpu_to_le64(mddev->recovery_cp); 1361 else 1362 sb->resync_offset = cpu_to_le64(0); 1363 1364 sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors)); 1365 1366 sb->raid_disks = cpu_to_le32(mddev->raid_disks); 1367 sb->size = cpu_to_le64(mddev->dev_sectors); 1368 sb->chunksize = cpu_to_le32(mddev->chunk_sectors); 1369 sb->level = cpu_to_le32(mddev->level); 1370 sb->layout = cpu_to_le32(mddev->layout); 1371 1372 if (mddev->bitmap && mddev->bitmap_file == NULL) { 1373 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset); 1374 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET); 1375 } 1376 1377 if (rdev->raid_disk >= 0 && 1378 !test_bit(In_sync, &rdev->flags)) { 1379 if (mddev->curr_resync_completed > rdev->recovery_offset) 1380 rdev->recovery_offset = mddev->curr_resync_completed; 1381 if (rdev->recovery_offset > 0) { 1382 sb->feature_map |= 1383 cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET); 1384 sb->recovery_offset = 1385 cpu_to_le64(rdev->recovery_offset); 1386 } 1387 } 1388 1389 if (mddev->reshape_position != MaxSector) { 1390 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE); 1391 sb->reshape_position = cpu_to_le64(mddev->reshape_position); 1392 sb->new_layout = cpu_to_le32(mddev->new_layout); 1393 sb->delta_disks = cpu_to_le32(mddev->delta_disks); 1394 sb->new_level = cpu_to_le32(mddev->new_level); 1395 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors); 1396 } 1397 1398 max_dev = 0; 1399 list_for_each_entry(rdev2, &mddev->disks, same_set) 1400 if (rdev2->desc_nr+1 > max_dev) 1401 max_dev = rdev2->desc_nr+1; 1402 1403 if (max_dev > le32_to_cpu(sb->max_dev)) { 1404 int bmask; 1405 sb->max_dev = cpu_to_le32(max_dev); 1406 rdev->sb_size = max_dev * 2 + 256; 1407 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; 1408 if (rdev->sb_size & bmask) 1409 rdev->sb_size = (rdev->sb_size | bmask) + 1; 1410 } 1411 for (i=0; i<max_dev;i++) 1412 sb->dev_roles[i] = cpu_to_le16(0xfffe); 1413 1414 list_for_each_entry(rdev2, &mddev->disks, same_set) { 1415 i = rdev2->desc_nr; 1416 if (test_bit(Faulty, &rdev2->flags)) 1417 sb->dev_roles[i] = cpu_to_le16(0xfffe); 1418 else if (test_bit(In_sync, &rdev2->flags)) 1419 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); 1420 else if (rdev2->raid_disk >= 0 && rdev2->recovery_offset > 0) 1421 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); 1422 else 1423 sb->dev_roles[i] = cpu_to_le16(0xffff); 1424 } 1425 1426 sb->sb_csum = calc_sb_1_csum(sb); 1427 } 1428 1429 static unsigned long long 1430 super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors) 1431 { 1432 struct mdp_superblock_1 *sb; 1433 sector_t max_sectors; 1434 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) 1435 return 0; /* component must fit device */ 1436 if (rdev->sb_start < rdev->data_offset) { 1437 /* minor versions 1 and 2; superblock before data */ 1438 max_sectors = rdev->bdev->bd_inode->i_size >> 9; 1439 max_sectors -= rdev->data_offset; 1440 if (!num_sectors || num_sectors > max_sectors) 1441 num_sectors = max_sectors; 1442 } else if (rdev->mddev->bitmap_offset) { 1443 /* minor version 0 with bitmap we can't move */ 1444 return 0; 1445 } else { 1446 /* minor version 0; superblock after data */ 1447 sector_t sb_start; 1448 sb_start = (rdev->bdev->bd_inode->i_size >> 9) - 8*2; 1449 sb_start &= ~(sector_t)(4*2 - 1); 1450 max_sectors = rdev->sectors + sb_start - rdev->sb_start; 1451 if (!num_sectors || num_sectors > max_sectors) 1452 num_sectors = max_sectors; 1453 rdev->sb_start = sb_start; 1454 } 1455 sb = (struct mdp_superblock_1 *) page_address(rdev->sb_page); 1456 sb->data_size = cpu_to_le64(num_sectors); 1457 sb->super_offset = rdev->sb_start; 1458 sb->sb_csum = calc_sb_1_csum(sb); 1459 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, 1460 rdev->sb_page); 1461 md_super_wait(rdev->mddev); 1462 return num_sectors / 2; /* kB for sysfs */ 1463 } 1464 1465 static struct super_type super_types[] = { 1466 [0] = { 1467 .name = "0.90.0", 1468 .owner = THIS_MODULE, 1469 .load_super = super_90_load, 1470 .validate_super = super_90_validate, 1471 .sync_super = super_90_sync, 1472 .rdev_size_change = super_90_rdev_size_change, 1473 }, 1474 [1] = { 1475 .name = "md-1", 1476 .owner = THIS_MODULE, 1477 .load_super = super_1_load, 1478 .validate_super = super_1_validate, 1479 .sync_super = super_1_sync, 1480 .rdev_size_change = super_1_rdev_size_change, 1481 }, 1482 }; 1483 1484 static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2) 1485 { 1486 mdk_rdev_t *rdev, *rdev2; 1487 1488 rcu_read_lock(); 1489 rdev_for_each_rcu(rdev, mddev1) 1490 rdev_for_each_rcu(rdev2, mddev2) 1491 if (rdev->bdev->bd_contains == 1492 rdev2->bdev->bd_contains) { 1493 rcu_read_unlock(); 1494 return 1; 1495 } 1496 rcu_read_unlock(); 1497 return 0; 1498 } 1499 1500 static LIST_HEAD(pending_raid_disks); 1501 1502 /* 1503 * Try to register data integrity profile for an mddev 1504 * 1505 * This is called when an array is started and after a disk has been kicked 1506 * from the array. It only succeeds if all working and active component devices 1507 * are integrity capable with matching profiles. 1508 */ 1509 int md_integrity_register(mddev_t *mddev) 1510 { 1511 mdk_rdev_t *rdev, *reference = NULL; 1512 1513 if (list_empty(&mddev->disks)) 1514 return 0; /* nothing to do */ 1515 if (blk_get_integrity(mddev->gendisk)) 1516 return 0; /* already registered */ 1517 list_for_each_entry(rdev, &mddev->disks, same_set) { 1518 /* skip spares and non-functional disks */ 1519 if (test_bit(Faulty, &rdev->flags)) 1520 continue; 1521 if (rdev->raid_disk < 0) 1522 continue; 1523 /* 1524 * If at least one rdev is not integrity capable, we can not 1525 * enable data integrity for the md device. 1526 */ 1527 if (!bdev_get_integrity(rdev->bdev)) 1528 return -EINVAL; 1529 if (!reference) { 1530 /* Use the first rdev as the reference */ 1531 reference = rdev; 1532 continue; 1533 } 1534 /* does this rdev's profile match the reference profile? */ 1535 if (blk_integrity_compare(reference->bdev->bd_disk, 1536 rdev->bdev->bd_disk) < 0) 1537 return -EINVAL; 1538 } 1539 /* 1540 * All component devices are integrity capable and have matching 1541 * profiles, register the common profile for the md device. 1542 */ 1543 if (blk_integrity_register(mddev->gendisk, 1544 bdev_get_integrity(reference->bdev)) != 0) { 1545 printk(KERN_ERR "md: failed to register integrity for %s\n", 1546 mdname(mddev)); 1547 return -EINVAL; 1548 } 1549 printk(KERN_NOTICE "md: data integrity on %s enabled\n", 1550 mdname(mddev)); 1551 return 0; 1552 } 1553 EXPORT_SYMBOL(md_integrity_register); 1554 1555 /* Disable data integrity if non-capable/non-matching disk is being added */ 1556 void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev) 1557 { 1558 struct blk_integrity *bi_rdev = bdev_get_integrity(rdev->bdev); 1559 struct blk_integrity *bi_mddev = blk_get_integrity(mddev->gendisk); 1560 1561 if (!bi_mddev) /* nothing to do */ 1562 return; 1563 if (rdev->raid_disk < 0) /* skip spares */ 1564 return; 1565 if (bi_rdev && blk_integrity_compare(mddev->gendisk, 1566 rdev->bdev->bd_disk) >= 0) 1567 return; 1568 printk(KERN_NOTICE "disabling data integrity on %s\n", mdname(mddev)); 1569 blk_integrity_unregister(mddev->gendisk); 1570 } 1571 EXPORT_SYMBOL(md_integrity_add_rdev); 1572 1573 static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev) 1574 { 1575 char b[BDEVNAME_SIZE]; 1576 struct kobject *ko; 1577 char *s; 1578 int err; 1579 1580 if (rdev->mddev) { 1581 MD_BUG(); 1582 return -EINVAL; 1583 } 1584 1585 /* prevent duplicates */ 1586 if (find_rdev(mddev, rdev->bdev->bd_dev)) 1587 return -EEXIST; 1588 1589 /* make sure rdev->sectors exceeds mddev->dev_sectors */ 1590 if (rdev->sectors && (mddev->dev_sectors == 0 || 1591 rdev->sectors < mddev->dev_sectors)) { 1592 if (mddev->pers) { 1593 /* Cannot change size, so fail 1594 * If mddev->level <= 0, then we don't care 1595 * about aligning sizes (e.g. linear) 1596 */ 1597 if (mddev->level > 0) 1598 return -ENOSPC; 1599 } else 1600 mddev->dev_sectors = rdev->sectors; 1601 } 1602 1603 /* Verify rdev->desc_nr is unique. 1604 * If it is -1, assign a free number, else 1605 * check number is not in use 1606 */ 1607 if (rdev->desc_nr < 0) { 1608 int choice = 0; 1609 if (mddev->pers) choice = mddev->raid_disks; 1610 while (find_rdev_nr(mddev, choice)) 1611 choice++; 1612 rdev->desc_nr = choice; 1613 } else { 1614 if (find_rdev_nr(mddev, rdev->desc_nr)) 1615 return -EBUSY; 1616 } 1617 if (mddev->max_disks && rdev->desc_nr >= mddev->max_disks) { 1618 printk(KERN_WARNING "md: %s: array is limited to %d devices\n", 1619 mdname(mddev), mddev->max_disks); 1620 return -EBUSY; 1621 } 1622 bdevname(rdev->bdev,b); 1623 while ( (s=strchr(b, '/')) != NULL) 1624 *s = '!'; 1625 1626 rdev->mddev = mddev; 1627 printk(KERN_INFO "md: bind<%s>\n", b); 1628 1629 if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b))) 1630 goto fail; 1631 1632 ko = &part_to_dev(rdev->bdev->bd_part)->kobj; 1633 if ((err = sysfs_create_link(&rdev->kobj, ko, "block"))) { 1634 kobject_del(&rdev->kobj); 1635 goto fail; 1636 } 1637 rdev->sysfs_state = sysfs_get_dirent(rdev->kobj.sd, "state"); 1638 1639 list_add_rcu(&rdev->same_set, &mddev->disks); 1640 bd_claim_by_disk(rdev->bdev, rdev->bdev->bd_holder, mddev->gendisk); 1641 1642 /* May as well allow recovery to be retried once */ 1643 mddev->recovery_disabled = 0; 1644 1645 return 0; 1646 1647 fail: 1648 printk(KERN_WARNING "md: failed to register dev-%s for %s\n", 1649 b, mdname(mddev)); 1650 return err; 1651 } 1652 1653 static void md_delayed_delete(struct work_struct *ws) 1654 { 1655 mdk_rdev_t *rdev = container_of(ws, mdk_rdev_t, del_work); 1656 kobject_del(&rdev->kobj); 1657 kobject_put(&rdev->kobj); 1658 } 1659 1660 static void unbind_rdev_from_array(mdk_rdev_t * rdev) 1661 { 1662 char b[BDEVNAME_SIZE]; 1663 if (!rdev->mddev) { 1664 MD_BUG(); 1665 return; 1666 } 1667 bd_release_from_disk(rdev->bdev, rdev->mddev->gendisk); 1668 list_del_rcu(&rdev->same_set); 1669 printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b)); 1670 rdev->mddev = NULL; 1671 sysfs_remove_link(&rdev->kobj, "block"); 1672 sysfs_put(rdev->sysfs_state); 1673 rdev->sysfs_state = NULL; 1674 /* We need to delay this, otherwise we can deadlock when 1675 * writing to 'remove' to "dev/state". We also need 1676 * to delay it due to rcu usage. 1677 */ 1678 synchronize_rcu(); 1679 INIT_WORK(&rdev->del_work, md_delayed_delete); 1680 kobject_get(&rdev->kobj); 1681 schedule_work(&rdev->del_work); 1682 } 1683 1684 /* 1685 * prevent the device from being mounted, repartitioned or 1686 * otherwise reused by a RAID array (or any other kernel 1687 * subsystem), by bd_claiming the device. 1688 */ 1689 static int lock_rdev(mdk_rdev_t *rdev, dev_t dev, int shared) 1690 { 1691 int err = 0; 1692 struct block_device *bdev; 1693 char b[BDEVNAME_SIZE]; 1694 1695 bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE); 1696 if (IS_ERR(bdev)) { 1697 printk(KERN_ERR "md: could not open %s.\n", 1698 __bdevname(dev, b)); 1699 return PTR_ERR(bdev); 1700 } 1701 err = bd_claim(bdev, shared ? (mdk_rdev_t *)lock_rdev : rdev); 1702 if (err) { 1703 printk(KERN_ERR "md: could not bd_claim %s.\n", 1704 bdevname(bdev, b)); 1705 blkdev_put(bdev, FMODE_READ|FMODE_WRITE); 1706 return err; 1707 } 1708 if (!shared) 1709 set_bit(AllReserved, &rdev->flags); 1710 rdev->bdev = bdev; 1711 return err; 1712 } 1713 1714 static void unlock_rdev(mdk_rdev_t *rdev) 1715 { 1716 struct block_device *bdev = rdev->bdev; 1717 rdev->bdev = NULL; 1718 if (!bdev) 1719 MD_BUG(); 1720 bd_release(bdev); 1721 blkdev_put(bdev, FMODE_READ|FMODE_WRITE); 1722 } 1723 1724 void md_autodetect_dev(dev_t dev); 1725 1726 static void export_rdev(mdk_rdev_t * rdev) 1727 { 1728 char b[BDEVNAME_SIZE]; 1729 printk(KERN_INFO "md: export_rdev(%s)\n", 1730 bdevname(rdev->bdev,b)); 1731 if (rdev->mddev) 1732 MD_BUG(); 1733 free_disk_sb(rdev); 1734 #ifndef MODULE 1735 if (test_bit(AutoDetected, &rdev->flags)) 1736 md_autodetect_dev(rdev->bdev->bd_dev); 1737 #endif 1738 unlock_rdev(rdev); 1739 kobject_put(&rdev->kobj); 1740 } 1741 1742 static void kick_rdev_from_array(mdk_rdev_t * rdev) 1743 { 1744 unbind_rdev_from_array(rdev); 1745 export_rdev(rdev); 1746 } 1747 1748 static void export_array(mddev_t *mddev) 1749 { 1750 mdk_rdev_t *rdev, *tmp; 1751 1752 rdev_for_each(rdev, tmp, mddev) { 1753 if (!rdev->mddev) { 1754 MD_BUG(); 1755 continue; 1756 } 1757 kick_rdev_from_array(rdev); 1758 } 1759 if (!list_empty(&mddev->disks)) 1760 MD_BUG(); 1761 mddev->raid_disks = 0; 1762 mddev->major_version = 0; 1763 } 1764 1765 static void print_desc(mdp_disk_t *desc) 1766 { 1767 printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number, 1768 desc->major,desc->minor,desc->raid_disk,desc->state); 1769 } 1770 1771 static void print_sb_90(mdp_super_t *sb) 1772 { 1773 int i; 1774 1775 printk(KERN_INFO 1776 "md: SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n", 1777 sb->major_version, sb->minor_version, sb->patch_version, 1778 sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3, 1779 sb->ctime); 1780 printk(KERN_INFO "md: L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n", 1781 sb->level, sb->size, sb->nr_disks, sb->raid_disks, 1782 sb->md_minor, sb->layout, sb->chunk_size); 1783 printk(KERN_INFO "md: UT:%08x ST:%d AD:%d WD:%d" 1784 " FD:%d SD:%d CSUM:%08x E:%08lx\n", 1785 sb->utime, sb->state, sb->active_disks, sb->working_disks, 1786 sb->failed_disks, sb->spare_disks, 1787 sb->sb_csum, (unsigned long)sb->events_lo); 1788 1789 printk(KERN_INFO); 1790 for (i = 0; i < MD_SB_DISKS; i++) { 1791 mdp_disk_t *desc; 1792 1793 desc = sb->disks + i; 1794 if (desc->number || desc->major || desc->minor || 1795 desc->raid_disk || (desc->state && (desc->state != 4))) { 1796 printk(" D %2d: ", i); 1797 print_desc(desc); 1798 } 1799 } 1800 printk(KERN_INFO "md: THIS: "); 1801 print_desc(&sb->this_disk); 1802 } 1803 1804 static void print_sb_1(struct mdp_superblock_1 *sb) 1805 { 1806 __u8 *uuid; 1807 1808 uuid = sb->set_uuid; 1809 printk(KERN_INFO 1810 "md: SB: (V:%u) (F:0x%08x) Array-ID:<%02x%02x%02x%02x" 1811 ":%02x%02x:%02x%02x:%02x%02x:%02x%02x%02x%02x%02x%02x>\n" 1812 "md: Name: \"%s\" CT:%llu\n", 1813 le32_to_cpu(sb->major_version), 1814 le32_to_cpu(sb->feature_map), 1815 uuid[0], uuid[1], uuid[2], uuid[3], 1816 uuid[4], uuid[5], uuid[6], uuid[7], 1817 uuid[8], uuid[9], uuid[10], uuid[11], 1818 uuid[12], uuid[13], uuid[14], uuid[15], 1819 sb->set_name, 1820 (unsigned long long)le64_to_cpu(sb->ctime) 1821 & MD_SUPERBLOCK_1_TIME_SEC_MASK); 1822 1823 uuid = sb->device_uuid; 1824 printk(KERN_INFO 1825 "md: L%u SZ%llu RD:%u LO:%u CS:%u DO:%llu DS:%llu SO:%llu" 1826 " RO:%llu\n" 1827 "md: Dev:%08x UUID: %02x%02x%02x%02x:%02x%02x:%02x%02x:%02x%02x" 1828 ":%02x%02x%02x%02x%02x%02x\n" 1829 "md: (F:0x%08x) UT:%llu Events:%llu ResyncOffset:%llu CSUM:0x%08x\n" 1830 "md: (MaxDev:%u) \n", 1831 le32_to_cpu(sb->level), 1832 (unsigned long long)le64_to_cpu(sb->size), 1833 le32_to_cpu(sb->raid_disks), 1834 le32_to_cpu(sb->layout), 1835 le32_to_cpu(sb->chunksize), 1836 (unsigned long long)le64_to_cpu(sb->data_offset), 1837 (unsigned long long)le64_to_cpu(sb->data_size), 1838 (unsigned long long)le64_to_cpu(sb->super_offset), 1839 (unsigned long long)le64_to_cpu(sb->recovery_offset), 1840 le32_to_cpu(sb->dev_number), 1841 uuid[0], uuid[1], uuid[2], uuid[3], 1842 uuid[4], uuid[5], uuid[6], uuid[7], 1843 uuid[8], uuid[9], uuid[10], uuid[11], 1844 uuid[12], uuid[13], uuid[14], uuid[15], 1845 sb->devflags, 1846 (unsigned long long)le64_to_cpu(sb->utime) & MD_SUPERBLOCK_1_TIME_SEC_MASK, 1847 (unsigned long long)le64_to_cpu(sb->events), 1848 (unsigned long long)le64_to_cpu(sb->resync_offset), 1849 le32_to_cpu(sb->sb_csum), 1850 le32_to_cpu(sb->max_dev) 1851 ); 1852 } 1853 1854 static void print_rdev(mdk_rdev_t *rdev, int major_version) 1855 { 1856 char b[BDEVNAME_SIZE]; 1857 printk(KERN_INFO "md: rdev %s, Sect:%08llu F:%d S:%d DN:%u\n", 1858 bdevname(rdev->bdev, b), (unsigned long long)rdev->sectors, 1859 test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags), 1860 rdev->desc_nr); 1861 if (rdev->sb_loaded) { 1862 printk(KERN_INFO "md: rdev superblock (MJ:%d):\n", major_version); 1863 switch (major_version) { 1864 case 0: 1865 print_sb_90((mdp_super_t*)page_address(rdev->sb_page)); 1866 break; 1867 case 1: 1868 print_sb_1((struct mdp_superblock_1 *)page_address(rdev->sb_page)); 1869 break; 1870 } 1871 } else 1872 printk(KERN_INFO "md: no rdev superblock!\n"); 1873 } 1874 1875 static void md_print_devices(void) 1876 { 1877 struct list_head *tmp; 1878 mdk_rdev_t *rdev; 1879 mddev_t *mddev; 1880 char b[BDEVNAME_SIZE]; 1881 1882 printk("\n"); 1883 printk("md: **********************************\n"); 1884 printk("md: * <COMPLETE RAID STATE PRINTOUT> *\n"); 1885 printk("md: **********************************\n"); 1886 for_each_mddev(mddev, tmp) { 1887 1888 if (mddev->bitmap) 1889 bitmap_print_sb(mddev->bitmap); 1890 else 1891 printk("%s: ", mdname(mddev)); 1892 list_for_each_entry(rdev, &mddev->disks, same_set) 1893 printk("<%s>", bdevname(rdev->bdev,b)); 1894 printk("\n"); 1895 1896 list_for_each_entry(rdev, &mddev->disks, same_set) 1897 print_rdev(rdev, mddev->major_version); 1898 } 1899 printk("md: **********************************\n"); 1900 printk("\n"); 1901 } 1902 1903 1904 static void sync_sbs(mddev_t * mddev, int nospares) 1905 { 1906 /* Update each superblock (in-memory image), but 1907 * if we are allowed to, skip spares which already 1908 * have the right event counter, or have one earlier 1909 * (which would mean they aren't being marked as dirty 1910 * with the rest of the array) 1911 */ 1912 mdk_rdev_t *rdev; 1913 1914 list_for_each_entry(rdev, &mddev->disks, same_set) { 1915 if (rdev->sb_events == mddev->events || 1916 (nospares && 1917 rdev->raid_disk < 0 && 1918 (rdev->sb_events&1)==0 && 1919 rdev->sb_events+1 == mddev->events)) { 1920 /* Don't update this superblock */ 1921 rdev->sb_loaded = 2; 1922 } else { 1923 super_types[mddev->major_version]. 1924 sync_super(mddev, rdev); 1925 rdev->sb_loaded = 1; 1926 } 1927 } 1928 } 1929 1930 static void md_update_sb(mddev_t * mddev, int force_change) 1931 { 1932 mdk_rdev_t *rdev; 1933 int sync_req; 1934 int nospares = 0; 1935 1936 mddev->utime = get_seconds(); 1937 if (mddev->external) 1938 return; 1939 repeat: 1940 spin_lock_irq(&mddev->write_lock); 1941 1942 set_bit(MD_CHANGE_PENDING, &mddev->flags); 1943 if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags)) 1944 force_change = 1; 1945 if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags)) 1946 /* just a clean<-> dirty transition, possibly leave spares alone, 1947 * though if events isn't the right even/odd, we will have to do 1948 * spares after all 1949 */ 1950 nospares = 1; 1951 if (force_change) 1952 nospares = 0; 1953 if (mddev->degraded) 1954 /* If the array is degraded, then skipping spares is both 1955 * dangerous and fairly pointless. 1956 * Dangerous because a device that was removed from the array 1957 * might have a event_count that still looks up-to-date, 1958 * so it can be re-added without a resync. 1959 * Pointless because if there are any spares to skip, 1960 * then a recovery will happen and soon that array won't 1961 * be degraded any more and the spare can go back to sleep then. 1962 */ 1963 nospares = 0; 1964 1965 sync_req = mddev->in_sync; 1966 1967 /* If this is just a dirty<->clean transition, and the array is clean 1968 * and 'events' is odd, we can roll back to the previous clean state */ 1969 if (nospares 1970 && (mddev->in_sync && mddev->recovery_cp == MaxSector) 1971 && (mddev->events & 1) 1972 && mddev->events != 1) 1973 mddev->events--; 1974 else { 1975 /* otherwise we have to go forward and ... */ 1976 mddev->events ++; 1977 if (!mddev->in_sync || mddev->recovery_cp != MaxSector) { /* not clean */ 1978 /* .. if the array isn't clean, an 'even' event must also go 1979 * to spares. */ 1980 if ((mddev->events&1)==0) 1981 nospares = 0; 1982 } else { 1983 /* otherwise an 'odd' event must go to spares */ 1984 if ((mddev->events&1)) 1985 nospares = 0; 1986 } 1987 } 1988 1989 if (!mddev->events) { 1990 /* 1991 * oops, this 64-bit counter should never wrap. 1992 * Either we are in around ~1 trillion A.C., assuming 1993 * 1 reboot per second, or we have a bug: 1994 */ 1995 MD_BUG(); 1996 mddev->events --; 1997 } 1998 1999 /* 2000 * do not write anything to disk if using 2001 * nonpersistent superblocks 2002 */ 2003 if (!mddev->persistent) { 2004 if (!mddev->external) 2005 clear_bit(MD_CHANGE_PENDING, &mddev->flags); 2006 2007 spin_unlock_irq(&mddev->write_lock); 2008 wake_up(&mddev->sb_wait); 2009 return; 2010 } 2011 sync_sbs(mddev, nospares); 2012 spin_unlock_irq(&mddev->write_lock); 2013 2014 dprintk(KERN_INFO 2015 "md: updating %s RAID superblock on device (in sync %d)\n", 2016 mdname(mddev),mddev->in_sync); 2017 2018 bitmap_update_sb(mddev->bitmap); 2019 list_for_each_entry(rdev, &mddev->disks, same_set) { 2020 char b[BDEVNAME_SIZE]; 2021 dprintk(KERN_INFO "md: "); 2022 if (rdev->sb_loaded != 1) 2023 continue; /* no noise on spare devices */ 2024 if (test_bit(Faulty, &rdev->flags)) 2025 dprintk("(skipping faulty "); 2026 2027 dprintk("%s ", bdevname(rdev->bdev,b)); 2028 if (!test_bit(Faulty, &rdev->flags)) { 2029 md_super_write(mddev,rdev, 2030 rdev->sb_start, rdev->sb_size, 2031 rdev->sb_page); 2032 dprintk(KERN_INFO "(write) %s's sb offset: %llu\n", 2033 bdevname(rdev->bdev,b), 2034 (unsigned long long)rdev->sb_start); 2035 rdev->sb_events = mddev->events; 2036 2037 } else 2038 dprintk(")\n"); 2039 if (mddev->level == LEVEL_MULTIPATH) 2040 /* only need to write one superblock... */ 2041 break; 2042 } 2043 md_super_wait(mddev); 2044 /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */ 2045 2046 spin_lock_irq(&mddev->write_lock); 2047 if (mddev->in_sync != sync_req || 2048 test_bit(MD_CHANGE_DEVS, &mddev->flags)) { 2049 /* have to write it out again */ 2050 spin_unlock_irq(&mddev->write_lock); 2051 goto repeat; 2052 } 2053 clear_bit(MD_CHANGE_PENDING, &mddev->flags); 2054 spin_unlock_irq(&mddev->write_lock); 2055 wake_up(&mddev->sb_wait); 2056 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 2057 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 2058 2059 } 2060 2061 /* words written to sysfs files may, or may not, be \n terminated. 2062 * We want to accept with case. For this we use cmd_match. 2063 */ 2064 static int cmd_match(const char *cmd, const char *str) 2065 { 2066 /* See if cmd, written into a sysfs file, matches 2067 * str. They must either be the same, or cmd can 2068 * have a trailing newline 2069 */ 2070 while (*cmd && *str && *cmd == *str) { 2071 cmd++; 2072 str++; 2073 } 2074 if (*cmd == '\n') 2075 cmd++; 2076 if (*str || *cmd) 2077 return 0; 2078 return 1; 2079 } 2080 2081 struct rdev_sysfs_entry { 2082 struct attribute attr; 2083 ssize_t (*show)(mdk_rdev_t *, char *); 2084 ssize_t (*store)(mdk_rdev_t *, const char *, size_t); 2085 }; 2086 2087 static ssize_t 2088 state_show(mdk_rdev_t *rdev, char *page) 2089 { 2090 char *sep = ""; 2091 size_t len = 0; 2092 2093 if (test_bit(Faulty, &rdev->flags)) { 2094 len+= sprintf(page+len, "%sfaulty",sep); 2095 sep = ","; 2096 } 2097 if (test_bit(In_sync, &rdev->flags)) { 2098 len += sprintf(page+len, "%sin_sync",sep); 2099 sep = ","; 2100 } 2101 if (test_bit(WriteMostly, &rdev->flags)) { 2102 len += sprintf(page+len, "%swrite_mostly",sep); 2103 sep = ","; 2104 } 2105 if (test_bit(Blocked, &rdev->flags)) { 2106 len += sprintf(page+len, "%sblocked", sep); 2107 sep = ","; 2108 } 2109 if (!test_bit(Faulty, &rdev->flags) && 2110 !test_bit(In_sync, &rdev->flags)) { 2111 len += sprintf(page+len, "%sspare", sep); 2112 sep = ","; 2113 } 2114 return len+sprintf(page+len, "\n"); 2115 } 2116 2117 static ssize_t 2118 state_store(mdk_rdev_t *rdev, const char *buf, size_t len) 2119 { 2120 /* can write 2121 * faulty - simulates and error 2122 * remove - disconnects the device 2123 * writemostly - sets write_mostly 2124 * -writemostly - clears write_mostly 2125 * blocked - sets the Blocked flag 2126 * -blocked - clears the Blocked flag 2127 * insync - sets Insync providing device isn't active 2128 */ 2129 int err = -EINVAL; 2130 if (cmd_match(buf, "faulty") && rdev->mddev->pers) { 2131 md_error(rdev->mddev, rdev); 2132 err = 0; 2133 } else if (cmd_match(buf, "remove")) { 2134 if (rdev->raid_disk >= 0) 2135 err = -EBUSY; 2136 else { 2137 mddev_t *mddev = rdev->mddev; 2138 kick_rdev_from_array(rdev); 2139 if (mddev->pers) 2140 md_update_sb(mddev, 1); 2141 md_new_event(mddev); 2142 err = 0; 2143 } 2144 } else if (cmd_match(buf, "writemostly")) { 2145 set_bit(WriteMostly, &rdev->flags); 2146 err = 0; 2147 } else if (cmd_match(buf, "-writemostly")) { 2148 clear_bit(WriteMostly, &rdev->flags); 2149 err = 0; 2150 } else if (cmd_match(buf, "blocked")) { 2151 set_bit(Blocked, &rdev->flags); 2152 err = 0; 2153 } else if (cmd_match(buf, "-blocked")) { 2154 clear_bit(Blocked, &rdev->flags); 2155 wake_up(&rdev->blocked_wait); 2156 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); 2157 md_wakeup_thread(rdev->mddev->thread); 2158 2159 err = 0; 2160 } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) { 2161 set_bit(In_sync, &rdev->flags); 2162 err = 0; 2163 } 2164 if (!err && rdev->sysfs_state) 2165 sysfs_notify_dirent(rdev->sysfs_state); 2166 return err ? err : len; 2167 } 2168 static struct rdev_sysfs_entry rdev_state = 2169 __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store); 2170 2171 static ssize_t 2172 errors_show(mdk_rdev_t *rdev, char *page) 2173 { 2174 return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors)); 2175 } 2176 2177 static ssize_t 2178 errors_store(mdk_rdev_t *rdev, const char *buf, size_t len) 2179 { 2180 char *e; 2181 unsigned long n = simple_strtoul(buf, &e, 10); 2182 if (*buf && (*e == 0 || *e == '\n')) { 2183 atomic_set(&rdev->corrected_errors, n); 2184 return len; 2185 } 2186 return -EINVAL; 2187 } 2188 static struct rdev_sysfs_entry rdev_errors = 2189 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store); 2190 2191 static ssize_t 2192 slot_show(mdk_rdev_t *rdev, char *page) 2193 { 2194 if (rdev->raid_disk < 0) 2195 return sprintf(page, "none\n"); 2196 else 2197 return sprintf(page, "%d\n", rdev->raid_disk); 2198 } 2199 2200 static ssize_t 2201 slot_store(mdk_rdev_t *rdev, const char *buf, size_t len) 2202 { 2203 char *e; 2204 int err; 2205 char nm[20]; 2206 int slot = simple_strtoul(buf, &e, 10); 2207 if (strncmp(buf, "none", 4)==0) 2208 slot = -1; 2209 else if (e==buf || (*e && *e!= '\n')) 2210 return -EINVAL; 2211 if (rdev->mddev->pers && slot == -1) { 2212 /* Setting 'slot' on an active array requires also 2213 * updating the 'rd%d' link, and communicating 2214 * with the personality with ->hot_*_disk. 2215 * For now we only support removing 2216 * failed/spare devices. This normally happens automatically, 2217 * but not when the metadata is externally managed. 2218 */ 2219 if (rdev->raid_disk == -1) 2220 return -EEXIST; 2221 /* personality does all needed checks */ 2222 if (rdev->mddev->pers->hot_add_disk == NULL) 2223 return -EINVAL; 2224 err = rdev->mddev->pers-> 2225 hot_remove_disk(rdev->mddev, rdev->raid_disk); 2226 if (err) 2227 return err; 2228 sprintf(nm, "rd%d", rdev->raid_disk); 2229 sysfs_remove_link(&rdev->mddev->kobj, nm); 2230 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); 2231 md_wakeup_thread(rdev->mddev->thread); 2232 } else if (rdev->mddev->pers) { 2233 mdk_rdev_t *rdev2; 2234 /* Activating a spare .. or possibly reactivating 2235 * if we ever get bitmaps working here. 2236 */ 2237 2238 if (rdev->raid_disk != -1) 2239 return -EBUSY; 2240 2241 if (rdev->mddev->pers->hot_add_disk == NULL) 2242 return -EINVAL; 2243 2244 list_for_each_entry(rdev2, &rdev->mddev->disks, same_set) 2245 if (rdev2->raid_disk == slot) 2246 return -EEXIST; 2247 2248 rdev->raid_disk = slot; 2249 if (test_bit(In_sync, &rdev->flags)) 2250 rdev->saved_raid_disk = slot; 2251 else 2252 rdev->saved_raid_disk = -1; 2253 err = rdev->mddev->pers-> 2254 hot_add_disk(rdev->mddev, rdev); 2255 if (err) { 2256 rdev->raid_disk = -1; 2257 return err; 2258 } else 2259 sysfs_notify_dirent(rdev->sysfs_state); 2260 sprintf(nm, "rd%d", rdev->raid_disk); 2261 if (sysfs_create_link(&rdev->mddev->kobj, &rdev->kobj, nm)) 2262 printk(KERN_WARNING 2263 "md: cannot register " 2264 "%s for %s\n", 2265 nm, mdname(rdev->mddev)); 2266 2267 /* don't wakeup anyone, leave that to userspace. */ 2268 } else { 2269 if (slot >= rdev->mddev->raid_disks) 2270 return -ENOSPC; 2271 rdev->raid_disk = slot; 2272 /* assume it is working */ 2273 clear_bit(Faulty, &rdev->flags); 2274 clear_bit(WriteMostly, &rdev->flags); 2275 set_bit(In_sync, &rdev->flags); 2276 sysfs_notify_dirent(rdev->sysfs_state); 2277 } 2278 return len; 2279 } 2280 2281 2282 static struct rdev_sysfs_entry rdev_slot = 2283 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store); 2284 2285 static ssize_t 2286 offset_show(mdk_rdev_t *rdev, char *page) 2287 { 2288 return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset); 2289 } 2290 2291 static ssize_t 2292 offset_store(mdk_rdev_t *rdev, const char *buf, size_t len) 2293 { 2294 char *e; 2295 unsigned long long offset = simple_strtoull(buf, &e, 10); 2296 if (e==buf || (*e && *e != '\n')) 2297 return -EINVAL; 2298 if (rdev->mddev->pers && rdev->raid_disk >= 0) 2299 return -EBUSY; 2300 if (rdev->sectors && rdev->mddev->external) 2301 /* Must set offset before size, so overlap checks 2302 * can be sane */ 2303 return -EBUSY; 2304 rdev->data_offset = offset; 2305 return len; 2306 } 2307 2308 static struct rdev_sysfs_entry rdev_offset = 2309 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store); 2310 2311 static ssize_t 2312 rdev_size_show(mdk_rdev_t *rdev, char *page) 2313 { 2314 return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2); 2315 } 2316 2317 static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2) 2318 { 2319 /* check if two start/length pairs overlap */ 2320 if (s1+l1 <= s2) 2321 return 0; 2322 if (s2+l2 <= s1) 2323 return 0; 2324 return 1; 2325 } 2326 2327 static int strict_blocks_to_sectors(const char *buf, sector_t *sectors) 2328 { 2329 unsigned long long blocks; 2330 sector_t new; 2331 2332 if (strict_strtoull(buf, 10, &blocks) < 0) 2333 return -EINVAL; 2334 2335 if (blocks & 1ULL << (8 * sizeof(blocks) - 1)) 2336 return -EINVAL; /* sector conversion overflow */ 2337 2338 new = blocks * 2; 2339 if (new != blocks * 2) 2340 return -EINVAL; /* unsigned long long to sector_t overflow */ 2341 2342 *sectors = new; 2343 return 0; 2344 } 2345 2346 static ssize_t 2347 rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len) 2348 { 2349 mddev_t *my_mddev = rdev->mddev; 2350 sector_t oldsectors = rdev->sectors; 2351 sector_t sectors; 2352 2353 if (strict_blocks_to_sectors(buf, §ors) < 0) 2354 return -EINVAL; 2355 if (my_mddev->pers && rdev->raid_disk >= 0) { 2356 if (my_mddev->persistent) { 2357 sectors = super_types[my_mddev->major_version]. 2358 rdev_size_change(rdev, sectors); 2359 if (!sectors) 2360 return -EBUSY; 2361 } else if (!sectors) 2362 sectors = (rdev->bdev->bd_inode->i_size >> 9) - 2363 rdev->data_offset; 2364 } 2365 if (sectors < my_mddev->dev_sectors) 2366 return -EINVAL; /* component must fit device */ 2367 2368 rdev->sectors = sectors; 2369 if (sectors > oldsectors && my_mddev->external) { 2370 /* need to check that all other rdevs with the same ->bdev 2371 * do not overlap. We need to unlock the mddev to avoid 2372 * a deadlock. We have already changed rdev->sectors, and if 2373 * we have to change it back, we will have the lock again. 2374 */ 2375 mddev_t *mddev; 2376 int overlap = 0; 2377 struct list_head *tmp; 2378 2379 mddev_unlock(my_mddev); 2380 for_each_mddev(mddev, tmp) { 2381 mdk_rdev_t *rdev2; 2382 2383 mddev_lock(mddev); 2384 list_for_each_entry(rdev2, &mddev->disks, same_set) 2385 if (test_bit(AllReserved, &rdev2->flags) || 2386 (rdev->bdev == rdev2->bdev && 2387 rdev != rdev2 && 2388 overlaps(rdev->data_offset, rdev->sectors, 2389 rdev2->data_offset, 2390 rdev2->sectors))) { 2391 overlap = 1; 2392 break; 2393 } 2394 mddev_unlock(mddev); 2395 if (overlap) { 2396 mddev_put(mddev); 2397 break; 2398 } 2399 } 2400 mddev_lock(my_mddev); 2401 if (overlap) { 2402 /* Someone else could have slipped in a size 2403 * change here, but doing so is just silly. 2404 * We put oldsectors back because we *know* it is 2405 * safe, and trust userspace not to race with 2406 * itself 2407 */ 2408 rdev->sectors = oldsectors; 2409 return -EBUSY; 2410 } 2411 } 2412 return len; 2413 } 2414 2415 static struct rdev_sysfs_entry rdev_size = 2416 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store); 2417 2418 static struct attribute *rdev_default_attrs[] = { 2419 &rdev_state.attr, 2420 &rdev_errors.attr, 2421 &rdev_slot.attr, 2422 &rdev_offset.attr, 2423 &rdev_size.attr, 2424 NULL, 2425 }; 2426 static ssize_t 2427 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 2428 { 2429 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); 2430 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj); 2431 mddev_t *mddev = rdev->mddev; 2432 ssize_t rv; 2433 2434 if (!entry->show) 2435 return -EIO; 2436 2437 rv = mddev ? mddev_lock(mddev) : -EBUSY; 2438 if (!rv) { 2439 if (rdev->mddev == NULL) 2440 rv = -EBUSY; 2441 else 2442 rv = entry->show(rdev, page); 2443 mddev_unlock(mddev); 2444 } 2445 return rv; 2446 } 2447 2448 static ssize_t 2449 rdev_attr_store(struct kobject *kobj, struct attribute *attr, 2450 const char *page, size_t length) 2451 { 2452 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); 2453 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj); 2454 ssize_t rv; 2455 mddev_t *mddev = rdev->mddev; 2456 2457 if (!entry->store) 2458 return -EIO; 2459 if (!capable(CAP_SYS_ADMIN)) 2460 return -EACCES; 2461 rv = mddev ? mddev_lock(mddev): -EBUSY; 2462 if (!rv) { 2463 if (rdev->mddev == NULL) 2464 rv = -EBUSY; 2465 else 2466 rv = entry->store(rdev, page, length); 2467 mddev_unlock(mddev); 2468 } 2469 return rv; 2470 } 2471 2472 static void rdev_free(struct kobject *ko) 2473 { 2474 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj); 2475 kfree(rdev); 2476 } 2477 static struct sysfs_ops rdev_sysfs_ops = { 2478 .show = rdev_attr_show, 2479 .store = rdev_attr_store, 2480 }; 2481 static struct kobj_type rdev_ktype = { 2482 .release = rdev_free, 2483 .sysfs_ops = &rdev_sysfs_ops, 2484 .default_attrs = rdev_default_attrs, 2485 }; 2486 2487 /* 2488 * Import a device. If 'super_format' >= 0, then sanity check the superblock 2489 * 2490 * mark the device faulty if: 2491 * 2492 * - the device is nonexistent (zero size) 2493 * - the device has no valid superblock 2494 * 2495 * a faulty rdev _never_ has rdev->sb set. 2496 */ 2497 static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_minor) 2498 { 2499 char b[BDEVNAME_SIZE]; 2500 int err; 2501 mdk_rdev_t *rdev; 2502 sector_t size; 2503 2504 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); 2505 if (!rdev) { 2506 printk(KERN_ERR "md: could not alloc mem for new device!\n"); 2507 return ERR_PTR(-ENOMEM); 2508 } 2509 2510 if ((err = alloc_disk_sb(rdev))) 2511 goto abort_free; 2512 2513 err = lock_rdev(rdev, newdev, super_format == -2); 2514 if (err) 2515 goto abort_free; 2516 2517 kobject_init(&rdev->kobj, &rdev_ktype); 2518 2519 rdev->desc_nr = -1; 2520 rdev->saved_raid_disk = -1; 2521 rdev->raid_disk = -1; 2522 rdev->flags = 0; 2523 rdev->data_offset = 0; 2524 rdev->sb_events = 0; 2525 atomic_set(&rdev->nr_pending, 0); 2526 atomic_set(&rdev->read_errors, 0); 2527 atomic_set(&rdev->corrected_errors, 0); 2528 2529 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; 2530 if (!size) { 2531 printk(KERN_WARNING 2532 "md: %s has zero or unknown size, marking faulty!\n", 2533 bdevname(rdev->bdev,b)); 2534 err = -EINVAL; 2535 goto abort_free; 2536 } 2537 2538 if (super_format >= 0) { 2539 err = super_types[super_format]. 2540 load_super(rdev, NULL, super_minor); 2541 if (err == -EINVAL) { 2542 printk(KERN_WARNING 2543 "md: %s does not have a valid v%d.%d " 2544 "superblock, not importing!\n", 2545 bdevname(rdev->bdev,b), 2546 super_format, super_minor); 2547 goto abort_free; 2548 } 2549 if (err < 0) { 2550 printk(KERN_WARNING 2551 "md: could not read %s's sb, not importing!\n", 2552 bdevname(rdev->bdev,b)); 2553 goto abort_free; 2554 } 2555 } 2556 2557 INIT_LIST_HEAD(&rdev->same_set); 2558 init_waitqueue_head(&rdev->blocked_wait); 2559 2560 return rdev; 2561 2562 abort_free: 2563 if (rdev->sb_page) { 2564 if (rdev->bdev) 2565 unlock_rdev(rdev); 2566 free_disk_sb(rdev); 2567 } 2568 kfree(rdev); 2569 return ERR_PTR(err); 2570 } 2571 2572 /* 2573 * Check a full RAID array for plausibility 2574 */ 2575 2576 2577 static void analyze_sbs(mddev_t * mddev) 2578 { 2579 int i; 2580 mdk_rdev_t *rdev, *freshest, *tmp; 2581 char b[BDEVNAME_SIZE]; 2582 2583 freshest = NULL; 2584 rdev_for_each(rdev, tmp, mddev) 2585 switch (super_types[mddev->major_version]. 2586 load_super(rdev, freshest, mddev->minor_version)) { 2587 case 1: 2588 freshest = rdev; 2589 break; 2590 case 0: 2591 break; 2592 default: 2593 printk( KERN_ERR \ 2594 "md: fatal superblock inconsistency in %s" 2595 " -- removing from array\n", 2596 bdevname(rdev->bdev,b)); 2597 kick_rdev_from_array(rdev); 2598 } 2599 2600 2601 super_types[mddev->major_version]. 2602 validate_super(mddev, freshest); 2603 2604 i = 0; 2605 rdev_for_each(rdev, tmp, mddev) { 2606 if (rdev->desc_nr >= mddev->max_disks || 2607 i > mddev->max_disks) { 2608 printk(KERN_WARNING 2609 "md: %s: %s: only %d devices permitted\n", 2610 mdname(mddev), bdevname(rdev->bdev, b), 2611 mddev->max_disks); 2612 kick_rdev_from_array(rdev); 2613 continue; 2614 } 2615 if (rdev != freshest) 2616 if (super_types[mddev->major_version]. 2617 validate_super(mddev, rdev)) { 2618 printk(KERN_WARNING "md: kicking non-fresh %s" 2619 " from array!\n", 2620 bdevname(rdev->bdev,b)); 2621 kick_rdev_from_array(rdev); 2622 continue; 2623 } 2624 if (mddev->level == LEVEL_MULTIPATH) { 2625 rdev->desc_nr = i++; 2626 rdev->raid_disk = rdev->desc_nr; 2627 set_bit(In_sync, &rdev->flags); 2628 } else if (rdev->raid_disk >= mddev->raid_disks) { 2629 rdev->raid_disk = -1; 2630 clear_bit(In_sync, &rdev->flags); 2631 } 2632 } 2633 } 2634 2635 static void md_safemode_timeout(unsigned long data); 2636 2637 static ssize_t 2638 safe_delay_show(mddev_t *mddev, char *page) 2639 { 2640 int msec = (mddev->safemode_delay*1000)/HZ; 2641 return sprintf(page, "%d.%03d\n", msec/1000, msec%1000); 2642 } 2643 static ssize_t 2644 safe_delay_store(mddev_t *mddev, const char *cbuf, size_t len) 2645 { 2646 int scale=1; 2647 int dot=0; 2648 int i; 2649 unsigned long msec; 2650 char buf[30]; 2651 2652 /* remove a period, and count digits after it */ 2653 if (len >= sizeof(buf)) 2654 return -EINVAL; 2655 strlcpy(buf, cbuf, sizeof(buf)); 2656 for (i=0; i<len; i++) { 2657 if (dot) { 2658 if (isdigit(buf[i])) { 2659 buf[i-1] = buf[i]; 2660 scale *= 10; 2661 } 2662 buf[i] = 0; 2663 } else if (buf[i] == '.') { 2664 dot=1; 2665 buf[i] = 0; 2666 } 2667 } 2668 if (strict_strtoul(buf, 10, &msec) < 0) 2669 return -EINVAL; 2670 msec = (msec * 1000) / scale; 2671 if (msec == 0) 2672 mddev->safemode_delay = 0; 2673 else { 2674 unsigned long old_delay = mddev->safemode_delay; 2675 mddev->safemode_delay = (msec*HZ)/1000; 2676 if (mddev->safemode_delay == 0) 2677 mddev->safemode_delay = 1; 2678 if (mddev->safemode_delay < old_delay) 2679 md_safemode_timeout((unsigned long)mddev); 2680 } 2681 return len; 2682 } 2683 static struct md_sysfs_entry md_safe_delay = 2684 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store); 2685 2686 static ssize_t 2687 level_show(mddev_t *mddev, char *page) 2688 { 2689 struct mdk_personality *p = mddev->pers; 2690 if (p) 2691 return sprintf(page, "%s\n", p->name); 2692 else if (mddev->clevel[0]) 2693 return sprintf(page, "%s\n", mddev->clevel); 2694 else if (mddev->level != LEVEL_NONE) 2695 return sprintf(page, "%d\n", mddev->level); 2696 else 2697 return 0; 2698 } 2699 2700 static ssize_t 2701 level_store(mddev_t *mddev, const char *buf, size_t len) 2702 { 2703 char level[16]; 2704 ssize_t rv = len; 2705 struct mdk_personality *pers; 2706 void *priv; 2707 mdk_rdev_t *rdev; 2708 2709 if (mddev->pers == NULL) { 2710 if (len == 0) 2711 return 0; 2712 if (len >= sizeof(mddev->clevel)) 2713 return -ENOSPC; 2714 strncpy(mddev->clevel, buf, len); 2715 if (mddev->clevel[len-1] == '\n') 2716 len--; 2717 mddev->clevel[len] = 0; 2718 mddev->level = LEVEL_NONE; 2719 return rv; 2720 } 2721 2722 /* request to change the personality. Need to ensure: 2723 * - array is not engaged in resync/recovery/reshape 2724 * - old personality can be suspended 2725 * - new personality will access other array. 2726 */ 2727 2728 if (mddev->sync_thread || mddev->reshape_position != MaxSector) 2729 return -EBUSY; 2730 2731 if (!mddev->pers->quiesce) { 2732 printk(KERN_WARNING "md: %s: %s does not support online personality change\n", 2733 mdname(mddev), mddev->pers->name); 2734 return -EINVAL; 2735 } 2736 2737 /* Now find the new personality */ 2738 if (len == 0 || len >= sizeof(level)) 2739 return -EINVAL; 2740 strncpy(level, buf, len); 2741 if (level[len-1] == '\n') 2742 len--; 2743 level[len] = 0; 2744 2745 request_module("md-%s", level); 2746 spin_lock(&pers_lock); 2747 pers = find_pers(LEVEL_NONE, level); 2748 if (!pers || !try_module_get(pers->owner)) { 2749 spin_unlock(&pers_lock); 2750 printk(KERN_WARNING "md: personality %s not loaded\n", level); 2751 return -EINVAL; 2752 } 2753 spin_unlock(&pers_lock); 2754 2755 if (pers == mddev->pers) { 2756 /* Nothing to do! */ 2757 module_put(pers->owner); 2758 return rv; 2759 } 2760 if (!pers->takeover) { 2761 module_put(pers->owner); 2762 printk(KERN_WARNING "md: %s: %s does not support personality takeover\n", 2763 mdname(mddev), level); 2764 return -EINVAL; 2765 } 2766 2767 /* ->takeover must set new_* and/or delta_disks 2768 * if it succeeds, and may set them when it fails. 2769 */ 2770 priv = pers->takeover(mddev); 2771 if (IS_ERR(priv)) { 2772 mddev->new_level = mddev->level; 2773 mddev->new_layout = mddev->layout; 2774 mddev->new_chunk_sectors = mddev->chunk_sectors; 2775 mddev->raid_disks -= mddev->delta_disks; 2776 mddev->delta_disks = 0; 2777 module_put(pers->owner); 2778 printk(KERN_WARNING "md: %s: %s would not accept array\n", 2779 mdname(mddev), level); 2780 return PTR_ERR(priv); 2781 } 2782 2783 /* Looks like we have a winner */ 2784 mddev_suspend(mddev); 2785 mddev->pers->stop(mddev); 2786 module_put(mddev->pers->owner); 2787 /* Invalidate devices that are now superfluous */ 2788 list_for_each_entry(rdev, &mddev->disks, same_set) 2789 if (rdev->raid_disk >= mddev->raid_disks) { 2790 rdev->raid_disk = -1; 2791 clear_bit(In_sync, &rdev->flags); 2792 } 2793 mddev->pers = pers; 2794 mddev->private = priv; 2795 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); 2796 mddev->level = mddev->new_level; 2797 mddev->layout = mddev->new_layout; 2798 mddev->chunk_sectors = mddev->new_chunk_sectors; 2799 mddev->delta_disks = 0; 2800 pers->run(mddev); 2801 mddev_resume(mddev); 2802 set_bit(MD_CHANGE_DEVS, &mddev->flags); 2803 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 2804 md_wakeup_thread(mddev->thread); 2805 return rv; 2806 } 2807 2808 static struct md_sysfs_entry md_level = 2809 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store); 2810 2811 2812 static ssize_t 2813 layout_show(mddev_t *mddev, char *page) 2814 { 2815 /* just a number, not meaningful for all levels */ 2816 if (mddev->reshape_position != MaxSector && 2817 mddev->layout != mddev->new_layout) 2818 return sprintf(page, "%d (%d)\n", 2819 mddev->new_layout, mddev->layout); 2820 return sprintf(page, "%d\n", mddev->layout); 2821 } 2822 2823 static ssize_t 2824 layout_store(mddev_t *mddev, const char *buf, size_t len) 2825 { 2826 char *e; 2827 unsigned long n = simple_strtoul(buf, &e, 10); 2828 2829 if (!*buf || (*e && *e != '\n')) 2830 return -EINVAL; 2831 2832 if (mddev->pers) { 2833 int err; 2834 if (mddev->pers->check_reshape == NULL) 2835 return -EBUSY; 2836 mddev->new_layout = n; 2837 err = mddev->pers->check_reshape(mddev); 2838 if (err) { 2839 mddev->new_layout = mddev->layout; 2840 return err; 2841 } 2842 } else { 2843 mddev->new_layout = n; 2844 if (mddev->reshape_position == MaxSector) 2845 mddev->layout = n; 2846 } 2847 return len; 2848 } 2849 static struct md_sysfs_entry md_layout = 2850 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store); 2851 2852 2853 static ssize_t 2854 raid_disks_show(mddev_t *mddev, char *page) 2855 { 2856 if (mddev->raid_disks == 0) 2857 return 0; 2858 if (mddev->reshape_position != MaxSector && 2859 mddev->delta_disks != 0) 2860 return sprintf(page, "%d (%d)\n", mddev->raid_disks, 2861 mddev->raid_disks - mddev->delta_disks); 2862 return sprintf(page, "%d\n", mddev->raid_disks); 2863 } 2864 2865 static int update_raid_disks(mddev_t *mddev, int raid_disks); 2866 2867 static ssize_t 2868 raid_disks_store(mddev_t *mddev, const char *buf, size_t len) 2869 { 2870 char *e; 2871 int rv = 0; 2872 unsigned long n = simple_strtoul(buf, &e, 10); 2873 2874 if (!*buf || (*e && *e != '\n')) 2875 return -EINVAL; 2876 2877 if (mddev->pers) 2878 rv = update_raid_disks(mddev, n); 2879 else if (mddev->reshape_position != MaxSector) { 2880 int olddisks = mddev->raid_disks - mddev->delta_disks; 2881 mddev->delta_disks = n - olddisks; 2882 mddev->raid_disks = n; 2883 } else 2884 mddev->raid_disks = n; 2885 return rv ? rv : len; 2886 } 2887 static struct md_sysfs_entry md_raid_disks = 2888 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store); 2889 2890 static ssize_t 2891 chunk_size_show(mddev_t *mddev, char *page) 2892 { 2893 if (mddev->reshape_position != MaxSector && 2894 mddev->chunk_sectors != mddev->new_chunk_sectors) 2895 return sprintf(page, "%d (%d)\n", 2896 mddev->new_chunk_sectors << 9, 2897 mddev->chunk_sectors << 9); 2898 return sprintf(page, "%d\n", mddev->chunk_sectors << 9); 2899 } 2900 2901 static ssize_t 2902 chunk_size_store(mddev_t *mddev, const char *buf, size_t len) 2903 { 2904 char *e; 2905 unsigned long n = simple_strtoul(buf, &e, 10); 2906 2907 if (!*buf || (*e && *e != '\n')) 2908 return -EINVAL; 2909 2910 if (mddev->pers) { 2911 int err; 2912 if (mddev->pers->check_reshape == NULL) 2913 return -EBUSY; 2914 mddev->new_chunk_sectors = n >> 9; 2915 err = mddev->pers->check_reshape(mddev); 2916 if (err) { 2917 mddev->new_chunk_sectors = mddev->chunk_sectors; 2918 return err; 2919 } 2920 } else { 2921 mddev->new_chunk_sectors = n >> 9; 2922 if (mddev->reshape_position == MaxSector) 2923 mddev->chunk_sectors = n >> 9; 2924 } 2925 return len; 2926 } 2927 static struct md_sysfs_entry md_chunk_size = 2928 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store); 2929 2930 static ssize_t 2931 resync_start_show(mddev_t *mddev, char *page) 2932 { 2933 if (mddev->recovery_cp == MaxSector) 2934 return sprintf(page, "none\n"); 2935 return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp); 2936 } 2937 2938 static ssize_t 2939 resync_start_store(mddev_t *mddev, const char *buf, size_t len) 2940 { 2941 char *e; 2942 unsigned long long n = simple_strtoull(buf, &e, 10); 2943 2944 if (mddev->pers) 2945 return -EBUSY; 2946 if (!*buf || (*e && *e != '\n')) 2947 return -EINVAL; 2948 2949 mddev->recovery_cp = n; 2950 return len; 2951 } 2952 static struct md_sysfs_entry md_resync_start = 2953 __ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store); 2954 2955 /* 2956 * The array state can be: 2957 * 2958 * clear 2959 * No devices, no size, no level 2960 * Equivalent to STOP_ARRAY ioctl 2961 * inactive 2962 * May have some settings, but array is not active 2963 * all IO results in error 2964 * When written, doesn't tear down array, but just stops it 2965 * suspended (not supported yet) 2966 * All IO requests will block. The array can be reconfigured. 2967 * Writing this, if accepted, will block until array is quiescent 2968 * readonly 2969 * no resync can happen. no superblocks get written. 2970 * write requests fail 2971 * read-auto 2972 * like readonly, but behaves like 'clean' on a write request. 2973 * 2974 * clean - no pending writes, but otherwise active. 2975 * When written to inactive array, starts without resync 2976 * If a write request arrives then 2977 * if metadata is known, mark 'dirty' and switch to 'active'. 2978 * if not known, block and switch to write-pending 2979 * If written to an active array that has pending writes, then fails. 2980 * active 2981 * fully active: IO and resync can be happening. 2982 * When written to inactive array, starts with resync 2983 * 2984 * write-pending 2985 * clean, but writes are blocked waiting for 'active' to be written. 2986 * 2987 * active-idle 2988 * like active, but no writes have been seen for a while (100msec). 2989 * 2990 */ 2991 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active, 2992 write_pending, active_idle, bad_word}; 2993 static char *array_states[] = { 2994 "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active", 2995 "write-pending", "active-idle", NULL }; 2996 2997 static int match_word(const char *word, char **list) 2998 { 2999 int n; 3000 for (n=0; list[n]; n++) 3001 if (cmd_match(word, list[n])) 3002 break; 3003 return n; 3004 } 3005 3006 static ssize_t 3007 array_state_show(mddev_t *mddev, char *page) 3008 { 3009 enum array_state st = inactive; 3010 3011 if (mddev->pers) 3012 switch(mddev->ro) { 3013 case 1: 3014 st = readonly; 3015 break; 3016 case 2: 3017 st = read_auto; 3018 break; 3019 case 0: 3020 if (mddev->in_sync) 3021 st = clean; 3022 else if (test_bit(MD_CHANGE_CLEAN, &mddev->flags)) 3023 st = write_pending; 3024 else if (mddev->safemode) 3025 st = active_idle; 3026 else 3027 st = active; 3028 } 3029 else { 3030 if (list_empty(&mddev->disks) && 3031 mddev->raid_disks == 0 && 3032 mddev->dev_sectors == 0) 3033 st = clear; 3034 else 3035 st = inactive; 3036 } 3037 return sprintf(page, "%s\n", array_states[st]); 3038 } 3039 3040 static int do_md_stop(mddev_t * mddev, int ro, int is_open); 3041 static int do_md_run(mddev_t * mddev); 3042 static int restart_array(mddev_t *mddev); 3043 3044 static ssize_t 3045 array_state_store(mddev_t *mddev, const char *buf, size_t len) 3046 { 3047 int err = -EINVAL; 3048 enum array_state st = match_word(buf, array_states); 3049 switch(st) { 3050 case bad_word: 3051 break; 3052 case clear: 3053 /* stopping an active array */ 3054 if (atomic_read(&mddev->openers) > 0) 3055 return -EBUSY; 3056 err = do_md_stop(mddev, 0, 0); 3057 break; 3058 case inactive: 3059 /* stopping an active array */ 3060 if (mddev->pers) { 3061 if (atomic_read(&mddev->openers) > 0) 3062 return -EBUSY; 3063 err = do_md_stop(mddev, 2, 0); 3064 } else 3065 err = 0; /* already inactive */ 3066 break; 3067 case suspended: 3068 break; /* not supported yet */ 3069 case readonly: 3070 if (mddev->pers) 3071 err = do_md_stop(mddev, 1, 0); 3072 else { 3073 mddev->ro = 1; 3074 set_disk_ro(mddev->gendisk, 1); 3075 err = do_md_run(mddev); 3076 } 3077 break; 3078 case read_auto: 3079 if (mddev->pers) { 3080 if (mddev->ro == 0) 3081 err = do_md_stop(mddev, 1, 0); 3082 else if (mddev->ro == 1) 3083 err = restart_array(mddev); 3084 if (err == 0) { 3085 mddev->ro = 2; 3086 set_disk_ro(mddev->gendisk, 0); 3087 } 3088 } else { 3089 mddev->ro = 2; 3090 err = do_md_run(mddev); 3091 } 3092 break; 3093 case clean: 3094 if (mddev->pers) { 3095 restart_array(mddev); 3096 spin_lock_irq(&mddev->write_lock); 3097 if (atomic_read(&mddev->writes_pending) == 0) { 3098 if (mddev->in_sync == 0) { 3099 mddev->in_sync = 1; 3100 if (mddev->safemode == 1) 3101 mddev->safemode = 0; 3102 if (mddev->persistent) 3103 set_bit(MD_CHANGE_CLEAN, 3104 &mddev->flags); 3105 } 3106 err = 0; 3107 } else 3108 err = -EBUSY; 3109 spin_unlock_irq(&mddev->write_lock); 3110 } else 3111 err = -EINVAL; 3112 break; 3113 case active: 3114 if (mddev->pers) { 3115 restart_array(mddev); 3116 if (mddev->external) 3117 clear_bit(MD_CHANGE_CLEAN, &mddev->flags); 3118 wake_up(&mddev->sb_wait); 3119 err = 0; 3120 } else { 3121 mddev->ro = 0; 3122 set_disk_ro(mddev->gendisk, 0); 3123 err = do_md_run(mddev); 3124 } 3125 break; 3126 case write_pending: 3127 case active_idle: 3128 /* these cannot be set */ 3129 break; 3130 } 3131 if (err) 3132 return err; 3133 else { 3134 sysfs_notify_dirent(mddev->sysfs_state); 3135 return len; 3136 } 3137 } 3138 static struct md_sysfs_entry md_array_state = 3139 __ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store); 3140 3141 static ssize_t 3142 null_show(mddev_t *mddev, char *page) 3143 { 3144 return -EINVAL; 3145 } 3146 3147 static ssize_t 3148 new_dev_store(mddev_t *mddev, const char *buf, size_t len) 3149 { 3150 /* buf must be %d:%d\n? giving major and minor numbers */ 3151 /* The new device is added to the array. 3152 * If the array has a persistent superblock, we read the 3153 * superblock to initialise info and check validity. 3154 * Otherwise, only checking done is that in bind_rdev_to_array, 3155 * which mainly checks size. 3156 */ 3157 char *e; 3158 int major = simple_strtoul(buf, &e, 10); 3159 int minor; 3160 dev_t dev; 3161 mdk_rdev_t *rdev; 3162 int err; 3163 3164 if (!*buf || *e != ':' || !e[1] || e[1] == '\n') 3165 return -EINVAL; 3166 minor = simple_strtoul(e+1, &e, 10); 3167 if (*e && *e != '\n') 3168 return -EINVAL; 3169 dev = MKDEV(major, minor); 3170 if (major != MAJOR(dev) || 3171 minor != MINOR(dev)) 3172 return -EOVERFLOW; 3173 3174 3175 if (mddev->persistent) { 3176 rdev = md_import_device(dev, mddev->major_version, 3177 mddev->minor_version); 3178 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) { 3179 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next, 3180 mdk_rdev_t, same_set); 3181 err = super_types[mddev->major_version] 3182 .load_super(rdev, rdev0, mddev->minor_version); 3183 if (err < 0) 3184 goto out; 3185 } 3186 } else if (mddev->external) 3187 rdev = md_import_device(dev, -2, -1); 3188 else 3189 rdev = md_import_device(dev, -1, -1); 3190 3191 if (IS_ERR(rdev)) 3192 return PTR_ERR(rdev); 3193 err = bind_rdev_to_array(rdev, mddev); 3194 out: 3195 if (err) 3196 export_rdev(rdev); 3197 return err ? err : len; 3198 } 3199 3200 static struct md_sysfs_entry md_new_device = 3201 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store); 3202 3203 static ssize_t 3204 bitmap_store(mddev_t *mddev, const char *buf, size_t len) 3205 { 3206 char *end; 3207 unsigned long chunk, end_chunk; 3208 3209 if (!mddev->bitmap) 3210 goto out; 3211 /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */ 3212 while (*buf) { 3213 chunk = end_chunk = simple_strtoul(buf, &end, 0); 3214 if (buf == end) break; 3215 if (*end == '-') { /* range */ 3216 buf = end + 1; 3217 end_chunk = simple_strtoul(buf, &end, 0); 3218 if (buf == end) break; 3219 } 3220 if (*end && !isspace(*end)) break; 3221 bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk); 3222 buf = end; 3223 while (isspace(*buf)) buf++; 3224 } 3225 bitmap_unplug(mddev->bitmap); /* flush the bits to disk */ 3226 out: 3227 return len; 3228 } 3229 3230 static struct md_sysfs_entry md_bitmap = 3231 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store); 3232 3233 static ssize_t 3234 size_show(mddev_t *mddev, char *page) 3235 { 3236 return sprintf(page, "%llu\n", 3237 (unsigned long long)mddev->dev_sectors / 2); 3238 } 3239 3240 static int update_size(mddev_t *mddev, sector_t num_sectors); 3241 3242 static ssize_t 3243 size_store(mddev_t *mddev, const char *buf, size_t len) 3244 { 3245 /* If array is inactive, we can reduce the component size, but 3246 * not increase it (except from 0). 3247 * If array is active, we can try an on-line resize 3248 */ 3249 sector_t sectors; 3250 int err = strict_blocks_to_sectors(buf, §ors); 3251 3252 if (err < 0) 3253 return err; 3254 if (mddev->pers) { 3255 err = update_size(mddev, sectors); 3256 md_update_sb(mddev, 1); 3257 } else { 3258 if (mddev->dev_sectors == 0 || 3259 mddev->dev_sectors > sectors) 3260 mddev->dev_sectors = sectors; 3261 else 3262 err = -ENOSPC; 3263 } 3264 return err ? err : len; 3265 } 3266 3267 static struct md_sysfs_entry md_size = 3268 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store); 3269 3270 3271 /* Metdata version. 3272 * This is one of 3273 * 'none' for arrays with no metadata (good luck...) 3274 * 'external' for arrays with externally managed metadata, 3275 * or N.M for internally known formats 3276 */ 3277 static ssize_t 3278 metadata_show(mddev_t *mddev, char *page) 3279 { 3280 if (mddev->persistent) 3281 return sprintf(page, "%d.%d\n", 3282 mddev->major_version, mddev->minor_version); 3283 else if (mddev->external) 3284 return sprintf(page, "external:%s\n", mddev->metadata_type); 3285 else 3286 return sprintf(page, "none\n"); 3287 } 3288 3289 static ssize_t 3290 metadata_store(mddev_t *mddev, const char *buf, size_t len) 3291 { 3292 int major, minor; 3293 char *e; 3294 /* Changing the details of 'external' metadata is 3295 * always permitted. Otherwise there must be 3296 * no devices attached to the array. 3297 */ 3298 if (mddev->external && strncmp(buf, "external:", 9) == 0) 3299 ; 3300 else if (!list_empty(&mddev->disks)) 3301 return -EBUSY; 3302 3303 if (cmd_match(buf, "none")) { 3304 mddev->persistent = 0; 3305 mddev->external = 0; 3306 mddev->major_version = 0; 3307 mddev->minor_version = 90; 3308 return len; 3309 } 3310 if (strncmp(buf, "external:", 9) == 0) { 3311 size_t namelen = len-9; 3312 if (namelen >= sizeof(mddev->metadata_type)) 3313 namelen = sizeof(mddev->metadata_type)-1; 3314 strncpy(mddev->metadata_type, buf+9, namelen); 3315 mddev->metadata_type[namelen] = 0; 3316 if (namelen && mddev->metadata_type[namelen-1] == '\n') 3317 mddev->metadata_type[--namelen] = 0; 3318 mddev->persistent = 0; 3319 mddev->external = 1; 3320 mddev->major_version = 0; 3321 mddev->minor_version = 90; 3322 return len; 3323 } 3324 major = simple_strtoul(buf, &e, 10); 3325 if (e==buf || *e != '.') 3326 return -EINVAL; 3327 buf = e+1; 3328 minor = simple_strtoul(buf, &e, 10); 3329 if (e==buf || (*e && *e != '\n') ) 3330 return -EINVAL; 3331 if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL) 3332 return -ENOENT; 3333 mddev->major_version = major; 3334 mddev->minor_version = minor; 3335 mddev->persistent = 1; 3336 mddev->external = 0; 3337 return len; 3338 } 3339 3340 static struct md_sysfs_entry md_metadata = 3341 __ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store); 3342 3343 static ssize_t 3344 action_show(mddev_t *mddev, char *page) 3345 { 3346 char *type = "idle"; 3347 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) 3348 type = "frozen"; 3349 else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 3350 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) { 3351 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 3352 type = "reshape"; 3353 else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 3354 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 3355 type = "resync"; 3356 else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) 3357 type = "check"; 3358 else 3359 type = "repair"; 3360 } else if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) 3361 type = "recover"; 3362 } 3363 return sprintf(page, "%s\n", type); 3364 } 3365 3366 static ssize_t 3367 action_store(mddev_t *mddev, const char *page, size_t len) 3368 { 3369 if (!mddev->pers || !mddev->pers->sync_request) 3370 return -EINVAL; 3371 3372 if (cmd_match(page, "frozen")) 3373 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 3374 else 3375 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 3376 3377 if (cmd_match(page, "idle") || cmd_match(page, "frozen")) { 3378 if (mddev->sync_thread) { 3379 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 3380 md_unregister_thread(mddev->sync_thread); 3381 mddev->sync_thread = NULL; 3382 mddev->recovery = 0; 3383 } 3384 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 3385 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) 3386 return -EBUSY; 3387 else if (cmd_match(page, "resync")) 3388 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3389 else if (cmd_match(page, "recover")) { 3390 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 3391 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3392 } else if (cmd_match(page, "reshape")) { 3393 int err; 3394 if (mddev->pers->start_reshape == NULL) 3395 return -EINVAL; 3396 err = mddev->pers->start_reshape(mddev); 3397 if (err) 3398 return err; 3399 sysfs_notify(&mddev->kobj, NULL, "degraded"); 3400 } else { 3401 if (cmd_match(page, "check")) 3402 set_bit(MD_RECOVERY_CHECK, &mddev->recovery); 3403 else if (!cmd_match(page, "repair")) 3404 return -EINVAL; 3405 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 3406 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 3407 } 3408 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3409 md_wakeup_thread(mddev->thread); 3410 sysfs_notify_dirent(mddev->sysfs_action); 3411 return len; 3412 } 3413 3414 static ssize_t 3415 mismatch_cnt_show(mddev_t *mddev, char *page) 3416 { 3417 return sprintf(page, "%llu\n", 3418 (unsigned long long) mddev->resync_mismatches); 3419 } 3420 3421 static struct md_sysfs_entry md_scan_mode = 3422 __ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store); 3423 3424 3425 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt); 3426 3427 static ssize_t 3428 sync_min_show(mddev_t *mddev, char *page) 3429 { 3430 return sprintf(page, "%d (%s)\n", speed_min(mddev), 3431 mddev->sync_speed_min ? "local": "system"); 3432 } 3433 3434 static ssize_t 3435 sync_min_store(mddev_t *mddev, const char *buf, size_t len) 3436 { 3437 int min; 3438 char *e; 3439 if (strncmp(buf, "system", 6)==0) { 3440 mddev->sync_speed_min = 0; 3441 return len; 3442 } 3443 min = simple_strtoul(buf, &e, 10); 3444 if (buf == e || (*e && *e != '\n') || min <= 0) 3445 return -EINVAL; 3446 mddev->sync_speed_min = min; 3447 return len; 3448 } 3449 3450 static struct md_sysfs_entry md_sync_min = 3451 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store); 3452 3453 static ssize_t 3454 sync_max_show(mddev_t *mddev, char *page) 3455 { 3456 return sprintf(page, "%d (%s)\n", speed_max(mddev), 3457 mddev->sync_speed_max ? "local": "system"); 3458 } 3459 3460 static ssize_t 3461 sync_max_store(mddev_t *mddev, const char *buf, size_t len) 3462 { 3463 int max; 3464 char *e; 3465 if (strncmp(buf, "system", 6)==0) { 3466 mddev->sync_speed_max = 0; 3467 return len; 3468 } 3469 max = simple_strtoul(buf, &e, 10); 3470 if (buf == e || (*e && *e != '\n') || max <= 0) 3471 return -EINVAL; 3472 mddev->sync_speed_max = max; 3473 return len; 3474 } 3475 3476 static struct md_sysfs_entry md_sync_max = 3477 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store); 3478 3479 static ssize_t 3480 degraded_show(mddev_t *mddev, char *page) 3481 { 3482 return sprintf(page, "%d\n", mddev->degraded); 3483 } 3484 static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded); 3485 3486 static ssize_t 3487 sync_force_parallel_show(mddev_t *mddev, char *page) 3488 { 3489 return sprintf(page, "%d\n", mddev->parallel_resync); 3490 } 3491 3492 static ssize_t 3493 sync_force_parallel_store(mddev_t *mddev, const char *buf, size_t len) 3494 { 3495 long n; 3496 3497 if (strict_strtol(buf, 10, &n)) 3498 return -EINVAL; 3499 3500 if (n != 0 && n != 1) 3501 return -EINVAL; 3502 3503 mddev->parallel_resync = n; 3504 3505 if (mddev->sync_thread) 3506 wake_up(&resync_wait); 3507 3508 return len; 3509 } 3510 3511 /* force parallel resync, even with shared block devices */ 3512 static struct md_sysfs_entry md_sync_force_parallel = 3513 __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR, 3514 sync_force_parallel_show, sync_force_parallel_store); 3515 3516 static ssize_t 3517 sync_speed_show(mddev_t *mddev, char *page) 3518 { 3519 unsigned long resync, dt, db; 3520 if (mddev->curr_resync == 0) 3521 return sprintf(page, "none\n"); 3522 resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active); 3523 dt = (jiffies - mddev->resync_mark) / HZ; 3524 if (!dt) dt++; 3525 db = resync - mddev->resync_mark_cnt; 3526 return sprintf(page, "%lu\n", db/dt/2); /* K/sec */ 3527 } 3528 3529 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed); 3530 3531 static ssize_t 3532 sync_completed_show(mddev_t *mddev, char *page) 3533 { 3534 unsigned long max_sectors, resync; 3535 3536 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 3537 return sprintf(page, "none\n"); 3538 3539 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 3540 max_sectors = mddev->resync_max_sectors; 3541 else 3542 max_sectors = mddev->dev_sectors; 3543 3544 resync = mddev->curr_resync_completed; 3545 return sprintf(page, "%lu / %lu\n", resync, max_sectors); 3546 } 3547 3548 static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed); 3549 3550 static ssize_t 3551 min_sync_show(mddev_t *mddev, char *page) 3552 { 3553 return sprintf(page, "%llu\n", 3554 (unsigned long long)mddev->resync_min); 3555 } 3556 static ssize_t 3557 min_sync_store(mddev_t *mddev, const char *buf, size_t len) 3558 { 3559 unsigned long long min; 3560 if (strict_strtoull(buf, 10, &min)) 3561 return -EINVAL; 3562 if (min > mddev->resync_max) 3563 return -EINVAL; 3564 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 3565 return -EBUSY; 3566 3567 /* Must be a multiple of chunk_size */ 3568 if (mddev->chunk_sectors) { 3569 sector_t temp = min; 3570 if (sector_div(temp, mddev->chunk_sectors)) 3571 return -EINVAL; 3572 } 3573 mddev->resync_min = min; 3574 3575 return len; 3576 } 3577 3578 static struct md_sysfs_entry md_min_sync = 3579 __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store); 3580 3581 static ssize_t 3582 max_sync_show(mddev_t *mddev, char *page) 3583 { 3584 if (mddev->resync_max == MaxSector) 3585 return sprintf(page, "max\n"); 3586 else 3587 return sprintf(page, "%llu\n", 3588 (unsigned long long)mddev->resync_max); 3589 } 3590 static ssize_t 3591 max_sync_store(mddev_t *mddev, const char *buf, size_t len) 3592 { 3593 if (strncmp(buf, "max", 3) == 0) 3594 mddev->resync_max = MaxSector; 3595 else { 3596 unsigned long long max; 3597 if (strict_strtoull(buf, 10, &max)) 3598 return -EINVAL; 3599 if (max < mddev->resync_min) 3600 return -EINVAL; 3601 if (max < mddev->resync_max && 3602 mddev->ro == 0 && 3603 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 3604 return -EBUSY; 3605 3606 /* Must be a multiple of chunk_size */ 3607 if (mddev->chunk_sectors) { 3608 sector_t temp = max; 3609 if (sector_div(temp, mddev->chunk_sectors)) 3610 return -EINVAL; 3611 } 3612 mddev->resync_max = max; 3613 } 3614 wake_up(&mddev->recovery_wait); 3615 return len; 3616 } 3617 3618 static struct md_sysfs_entry md_max_sync = 3619 __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store); 3620 3621 static ssize_t 3622 suspend_lo_show(mddev_t *mddev, char *page) 3623 { 3624 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo); 3625 } 3626 3627 static ssize_t 3628 suspend_lo_store(mddev_t *mddev, const char *buf, size_t len) 3629 { 3630 char *e; 3631 unsigned long long new = simple_strtoull(buf, &e, 10); 3632 3633 if (mddev->pers == NULL || 3634 mddev->pers->quiesce == NULL) 3635 return -EINVAL; 3636 if (buf == e || (*e && *e != '\n')) 3637 return -EINVAL; 3638 if (new >= mddev->suspend_hi || 3639 (new > mddev->suspend_lo && new < mddev->suspend_hi)) { 3640 mddev->suspend_lo = new; 3641 mddev->pers->quiesce(mddev, 2); 3642 return len; 3643 } else 3644 return -EINVAL; 3645 } 3646 static struct md_sysfs_entry md_suspend_lo = 3647 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store); 3648 3649 3650 static ssize_t 3651 suspend_hi_show(mddev_t *mddev, char *page) 3652 { 3653 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi); 3654 } 3655 3656 static ssize_t 3657 suspend_hi_store(mddev_t *mddev, const char *buf, size_t len) 3658 { 3659 char *e; 3660 unsigned long long new = simple_strtoull(buf, &e, 10); 3661 3662 if (mddev->pers == NULL || 3663 mddev->pers->quiesce == NULL) 3664 return -EINVAL; 3665 if (buf == e || (*e && *e != '\n')) 3666 return -EINVAL; 3667 if ((new <= mddev->suspend_lo && mddev->suspend_lo >= mddev->suspend_hi) || 3668 (new > mddev->suspend_lo && new > mddev->suspend_hi)) { 3669 mddev->suspend_hi = new; 3670 mddev->pers->quiesce(mddev, 1); 3671 mddev->pers->quiesce(mddev, 0); 3672 return len; 3673 } else 3674 return -EINVAL; 3675 } 3676 static struct md_sysfs_entry md_suspend_hi = 3677 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store); 3678 3679 static ssize_t 3680 reshape_position_show(mddev_t *mddev, char *page) 3681 { 3682 if (mddev->reshape_position != MaxSector) 3683 return sprintf(page, "%llu\n", 3684 (unsigned long long)mddev->reshape_position); 3685 strcpy(page, "none\n"); 3686 return 5; 3687 } 3688 3689 static ssize_t 3690 reshape_position_store(mddev_t *mddev, const char *buf, size_t len) 3691 { 3692 char *e; 3693 unsigned long long new = simple_strtoull(buf, &e, 10); 3694 if (mddev->pers) 3695 return -EBUSY; 3696 if (buf == e || (*e && *e != '\n')) 3697 return -EINVAL; 3698 mddev->reshape_position = new; 3699 mddev->delta_disks = 0; 3700 mddev->new_level = mddev->level; 3701 mddev->new_layout = mddev->layout; 3702 mddev->new_chunk_sectors = mddev->chunk_sectors; 3703 return len; 3704 } 3705 3706 static struct md_sysfs_entry md_reshape_position = 3707 __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show, 3708 reshape_position_store); 3709 3710 static ssize_t 3711 array_size_show(mddev_t *mddev, char *page) 3712 { 3713 if (mddev->external_size) 3714 return sprintf(page, "%llu\n", 3715 (unsigned long long)mddev->array_sectors/2); 3716 else 3717 return sprintf(page, "default\n"); 3718 } 3719 3720 static ssize_t 3721 array_size_store(mddev_t *mddev, const char *buf, size_t len) 3722 { 3723 sector_t sectors; 3724 3725 if (strncmp(buf, "default", 7) == 0) { 3726 if (mddev->pers) 3727 sectors = mddev->pers->size(mddev, 0, 0); 3728 else 3729 sectors = mddev->array_sectors; 3730 3731 mddev->external_size = 0; 3732 } else { 3733 if (strict_blocks_to_sectors(buf, §ors) < 0) 3734 return -EINVAL; 3735 if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors) 3736 return -E2BIG; 3737 3738 mddev->external_size = 1; 3739 } 3740 3741 mddev->array_sectors = sectors; 3742 set_capacity(mddev->gendisk, mddev->array_sectors); 3743 if (mddev->pers) 3744 revalidate_disk(mddev->gendisk); 3745 3746 return len; 3747 } 3748 3749 static struct md_sysfs_entry md_array_size = 3750 __ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show, 3751 array_size_store); 3752 3753 static struct attribute *md_default_attrs[] = { 3754 &md_level.attr, 3755 &md_layout.attr, 3756 &md_raid_disks.attr, 3757 &md_chunk_size.attr, 3758 &md_size.attr, 3759 &md_resync_start.attr, 3760 &md_metadata.attr, 3761 &md_new_device.attr, 3762 &md_safe_delay.attr, 3763 &md_array_state.attr, 3764 &md_reshape_position.attr, 3765 &md_array_size.attr, 3766 NULL, 3767 }; 3768 3769 static struct attribute *md_redundancy_attrs[] = { 3770 &md_scan_mode.attr, 3771 &md_mismatches.attr, 3772 &md_sync_min.attr, 3773 &md_sync_max.attr, 3774 &md_sync_speed.attr, 3775 &md_sync_force_parallel.attr, 3776 &md_sync_completed.attr, 3777 &md_min_sync.attr, 3778 &md_max_sync.attr, 3779 &md_suspend_lo.attr, 3780 &md_suspend_hi.attr, 3781 &md_bitmap.attr, 3782 &md_degraded.attr, 3783 NULL, 3784 }; 3785 static struct attribute_group md_redundancy_group = { 3786 .name = NULL, 3787 .attrs = md_redundancy_attrs, 3788 }; 3789 3790 3791 static ssize_t 3792 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 3793 { 3794 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); 3795 mddev_t *mddev = container_of(kobj, struct mddev_s, kobj); 3796 ssize_t rv; 3797 3798 if (!entry->show) 3799 return -EIO; 3800 rv = mddev_lock(mddev); 3801 if (!rv) { 3802 rv = entry->show(mddev, page); 3803 mddev_unlock(mddev); 3804 } 3805 return rv; 3806 } 3807 3808 static ssize_t 3809 md_attr_store(struct kobject *kobj, struct attribute *attr, 3810 const char *page, size_t length) 3811 { 3812 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); 3813 mddev_t *mddev = container_of(kobj, struct mddev_s, kobj); 3814 ssize_t rv; 3815 3816 if (!entry->store) 3817 return -EIO; 3818 if (!capable(CAP_SYS_ADMIN)) 3819 return -EACCES; 3820 rv = mddev_lock(mddev); 3821 if (mddev->hold_active == UNTIL_IOCTL) 3822 mddev->hold_active = 0; 3823 if (!rv) { 3824 rv = entry->store(mddev, page, length); 3825 mddev_unlock(mddev); 3826 } 3827 return rv; 3828 } 3829 3830 static void md_free(struct kobject *ko) 3831 { 3832 mddev_t *mddev = container_of(ko, mddev_t, kobj); 3833 3834 if (mddev->sysfs_state) 3835 sysfs_put(mddev->sysfs_state); 3836 3837 if (mddev->gendisk) { 3838 del_gendisk(mddev->gendisk); 3839 put_disk(mddev->gendisk); 3840 } 3841 if (mddev->queue) 3842 blk_cleanup_queue(mddev->queue); 3843 3844 kfree(mddev); 3845 } 3846 3847 static struct sysfs_ops md_sysfs_ops = { 3848 .show = md_attr_show, 3849 .store = md_attr_store, 3850 }; 3851 static struct kobj_type md_ktype = { 3852 .release = md_free, 3853 .sysfs_ops = &md_sysfs_ops, 3854 .default_attrs = md_default_attrs, 3855 }; 3856 3857 int mdp_major = 0; 3858 3859 static void mddev_delayed_delete(struct work_struct *ws) 3860 { 3861 mddev_t *mddev = container_of(ws, mddev_t, del_work); 3862 3863 if (mddev->private == &md_redundancy_group) { 3864 sysfs_remove_group(&mddev->kobj, &md_redundancy_group); 3865 if (mddev->sysfs_action) 3866 sysfs_put(mddev->sysfs_action); 3867 mddev->sysfs_action = NULL; 3868 mddev->private = NULL; 3869 } 3870 kobject_del(&mddev->kobj); 3871 kobject_put(&mddev->kobj); 3872 } 3873 3874 static int md_alloc(dev_t dev, char *name) 3875 { 3876 static DEFINE_MUTEX(disks_mutex); 3877 mddev_t *mddev = mddev_find(dev); 3878 struct gendisk *disk; 3879 int partitioned; 3880 int shift; 3881 int unit; 3882 int error; 3883 3884 if (!mddev) 3885 return -ENODEV; 3886 3887 partitioned = (MAJOR(mddev->unit) != MD_MAJOR); 3888 shift = partitioned ? MdpMinorShift : 0; 3889 unit = MINOR(mddev->unit) >> shift; 3890 3891 /* wait for any previous instance if this device 3892 * to be completed removed (mddev_delayed_delete). 3893 */ 3894 flush_scheduled_work(); 3895 3896 mutex_lock(&disks_mutex); 3897 error = -EEXIST; 3898 if (mddev->gendisk) 3899 goto abort; 3900 3901 if (name) { 3902 /* Need to ensure that 'name' is not a duplicate. 3903 */ 3904 mddev_t *mddev2; 3905 spin_lock(&all_mddevs_lock); 3906 3907 list_for_each_entry(mddev2, &all_mddevs, all_mddevs) 3908 if (mddev2->gendisk && 3909 strcmp(mddev2->gendisk->disk_name, name) == 0) { 3910 spin_unlock(&all_mddevs_lock); 3911 goto abort; 3912 } 3913 spin_unlock(&all_mddevs_lock); 3914 } 3915 3916 error = -ENOMEM; 3917 mddev->queue = blk_alloc_queue(GFP_KERNEL); 3918 if (!mddev->queue) 3919 goto abort; 3920 mddev->queue->queuedata = mddev; 3921 3922 /* Can be unlocked because the queue is new: no concurrency */ 3923 queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, mddev->queue); 3924 3925 blk_queue_make_request(mddev->queue, md_make_request); 3926 3927 disk = alloc_disk(1 << shift); 3928 if (!disk) { 3929 blk_cleanup_queue(mddev->queue); 3930 mddev->queue = NULL; 3931 goto abort; 3932 } 3933 disk->major = MAJOR(mddev->unit); 3934 disk->first_minor = unit << shift; 3935 if (name) 3936 strcpy(disk->disk_name, name); 3937 else if (partitioned) 3938 sprintf(disk->disk_name, "md_d%d", unit); 3939 else 3940 sprintf(disk->disk_name, "md%d", unit); 3941 disk->fops = &md_fops; 3942 disk->private_data = mddev; 3943 disk->queue = mddev->queue; 3944 /* Allow extended partitions. This makes the 3945 * 'mdp' device redundant, but we can't really 3946 * remove it now. 3947 */ 3948 disk->flags |= GENHD_FL_EXT_DEVT; 3949 add_disk(disk); 3950 mddev->gendisk = disk; 3951 error = kobject_init_and_add(&mddev->kobj, &md_ktype, 3952 &disk_to_dev(disk)->kobj, "%s", "md"); 3953 if (error) { 3954 /* This isn't possible, but as kobject_init_and_add is marked 3955 * __must_check, we must do something with the result 3956 */ 3957 printk(KERN_WARNING "md: cannot register %s/md - name in use\n", 3958 disk->disk_name); 3959 error = 0; 3960 } 3961 abort: 3962 mutex_unlock(&disks_mutex); 3963 if (!error) { 3964 kobject_uevent(&mddev->kobj, KOBJ_ADD); 3965 mddev->sysfs_state = sysfs_get_dirent(mddev->kobj.sd, "array_state"); 3966 } 3967 mddev_put(mddev); 3968 return error; 3969 } 3970 3971 static struct kobject *md_probe(dev_t dev, int *part, void *data) 3972 { 3973 md_alloc(dev, NULL); 3974 return NULL; 3975 } 3976 3977 static int add_named_array(const char *val, struct kernel_param *kp) 3978 { 3979 /* val must be "md_*" where * is not all digits. 3980 * We allocate an array with a large free minor number, and 3981 * set the name to val. val must not already be an active name. 3982 */ 3983 int len = strlen(val); 3984 char buf[DISK_NAME_LEN]; 3985 3986 while (len && val[len-1] == '\n') 3987 len--; 3988 if (len >= DISK_NAME_LEN) 3989 return -E2BIG; 3990 strlcpy(buf, val, len+1); 3991 if (strncmp(buf, "md_", 3) != 0) 3992 return -EINVAL; 3993 return md_alloc(0, buf); 3994 } 3995 3996 static void md_safemode_timeout(unsigned long data) 3997 { 3998 mddev_t *mddev = (mddev_t *) data; 3999 4000 if (!atomic_read(&mddev->writes_pending)) { 4001 mddev->safemode = 1; 4002 if (mddev->external) 4003 sysfs_notify_dirent(mddev->sysfs_state); 4004 } 4005 md_wakeup_thread(mddev->thread); 4006 } 4007 4008 static int start_dirty_degraded; 4009 4010 static int do_md_run(mddev_t * mddev) 4011 { 4012 int err; 4013 mdk_rdev_t *rdev; 4014 struct gendisk *disk; 4015 struct mdk_personality *pers; 4016 4017 if (list_empty(&mddev->disks)) 4018 /* cannot run an array with no devices.. */ 4019 return -EINVAL; 4020 4021 if (mddev->pers) 4022 return -EBUSY; 4023 4024 /* 4025 * Analyze all RAID superblock(s) 4026 */ 4027 if (!mddev->raid_disks) { 4028 if (!mddev->persistent) 4029 return -EINVAL; 4030 analyze_sbs(mddev); 4031 } 4032 4033 if (mddev->level != LEVEL_NONE) 4034 request_module("md-level-%d", mddev->level); 4035 else if (mddev->clevel[0]) 4036 request_module("md-%s", mddev->clevel); 4037 4038 /* 4039 * Drop all container device buffers, from now on 4040 * the only valid external interface is through the md 4041 * device. 4042 */ 4043 list_for_each_entry(rdev, &mddev->disks, same_set) { 4044 if (test_bit(Faulty, &rdev->flags)) 4045 continue; 4046 sync_blockdev(rdev->bdev); 4047 invalidate_bdev(rdev->bdev); 4048 4049 /* perform some consistency tests on the device. 4050 * We don't want the data to overlap the metadata, 4051 * Internal Bitmap issues have been handled elsewhere. 4052 */ 4053 if (rdev->data_offset < rdev->sb_start) { 4054 if (mddev->dev_sectors && 4055 rdev->data_offset + mddev->dev_sectors 4056 > rdev->sb_start) { 4057 printk("md: %s: data overlaps metadata\n", 4058 mdname(mddev)); 4059 return -EINVAL; 4060 } 4061 } else { 4062 if (rdev->sb_start + rdev->sb_size/512 4063 > rdev->data_offset) { 4064 printk("md: %s: metadata overlaps data\n", 4065 mdname(mddev)); 4066 return -EINVAL; 4067 } 4068 } 4069 sysfs_notify_dirent(rdev->sysfs_state); 4070 } 4071 4072 md_probe(mddev->unit, NULL, NULL); 4073 disk = mddev->gendisk; 4074 if (!disk) 4075 return -ENOMEM; 4076 4077 spin_lock(&pers_lock); 4078 pers = find_pers(mddev->level, mddev->clevel); 4079 if (!pers || !try_module_get(pers->owner)) { 4080 spin_unlock(&pers_lock); 4081 if (mddev->level != LEVEL_NONE) 4082 printk(KERN_WARNING "md: personality for level %d is not loaded!\n", 4083 mddev->level); 4084 else 4085 printk(KERN_WARNING "md: personality for level %s is not loaded!\n", 4086 mddev->clevel); 4087 return -EINVAL; 4088 } 4089 mddev->pers = pers; 4090 spin_unlock(&pers_lock); 4091 if (mddev->level != pers->level) { 4092 mddev->level = pers->level; 4093 mddev->new_level = pers->level; 4094 } 4095 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); 4096 4097 if (mddev->reshape_position != MaxSector && 4098 pers->start_reshape == NULL) { 4099 /* This personality cannot handle reshaping... */ 4100 mddev->pers = NULL; 4101 module_put(pers->owner); 4102 return -EINVAL; 4103 } 4104 4105 if (pers->sync_request) { 4106 /* Warn if this is a potentially silly 4107 * configuration. 4108 */ 4109 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 4110 mdk_rdev_t *rdev2; 4111 int warned = 0; 4112 4113 list_for_each_entry(rdev, &mddev->disks, same_set) 4114 list_for_each_entry(rdev2, &mddev->disks, same_set) { 4115 if (rdev < rdev2 && 4116 rdev->bdev->bd_contains == 4117 rdev2->bdev->bd_contains) { 4118 printk(KERN_WARNING 4119 "%s: WARNING: %s appears to be" 4120 " on the same physical disk as" 4121 " %s.\n", 4122 mdname(mddev), 4123 bdevname(rdev->bdev,b), 4124 bdevname(rdev2->bdev,b2)); 4125 warned = 1; 4126 } 4127 } 4128 4129 if (warned) 4130 printk(KERN_WARNING 4131 "True protection against single-disk" 4132 " failure might be compromised.\n"); 4133 } 4134 4135 mddev->recovery = 0; 4136 /* may be over-ridden by personality */ 4137 mddev->resync_max_sectors = mddev->dev_sectors; 4138 4139 mddev->barriers_work = 1; 4140 mddev->ok_start_degraded = start_dirty_degraded; 4141 4142 if (start_readonly) 4143 mddev->ro = 2; /* read-only, but switch on first write */ 4144 4145 err = mddev->pers->run(mddev); 4146 if (err) 4147 printk(KERN_ERR "md: pers->run() failed ...\n"); 4148 else if (mddev->pers->size(mddev, 0, 0) < mddev->array_sectors) { 4149 WARN_ONCE(!mddev->external_size, "%s: default size too small," 4150 " but 'external_size' not in effect?\n", __func__); 4151 printk(KERN_ERR 4152 "md: invalid array_size %llu > default size %llu\n", 4153 (unsigned long long)mddev->array_sectors / 2, 4154 (unsigned long long)mddev->pers->size(mddev, 0, 0) / 2); 4155 err = -EINVAL; 4156 mddev->pers->stop(mddev); 4157 } 4158 if (err == 0 && mddev->pers->sync_request) { 4159 err = bitmap_create(mddev); 4160 if (err) { 4161 printk(KERN_ERR "%s: failed to create bitmap (%d)\n", 4162 mdname(mddev), err); 4163 mddev->pers->stop(mddev); 4164 } 4165 } 4166 if (err) { 4167 module_put(mddev->pers->owner); 4168 mddev->pers = NULL; 4169 bitmap_destroy(mddev); 4170 return err; 4171 } 4172 if (mddev->pers->sync_request) { 4173 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group)) 4174 printk(KERN_WARNING 4175 "md: cannot register extra attributes for %s\n", 4176 mdname(mddev)); 4177 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action"); 4178 } else if (mddev->ro == 2) /* auto-readonly not meaningful */ 4179 mddev->ro = 0; 4180 4181 atomic_set(&mddev->writes_pending,0); 4182 mddev->safemode = 0; 4183 mddev->safemode_timer.function = md_safemode_timeout; 4184 mddev->safemode_timer.data = (unsigned long) mddev; 4185 mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */ 4186 mddev->in_sync = 1; 4187 4188 list_for_each_entry(rdev, &mddev->disks, same_set) 4189 if (rdev->raid_disk >= 0) { 4190 char nm[20]; 4191 sprintf(nm, "rd%d", rdev->raid_disk); 4192 if (sysfs_create_link(&mddev->kobj, &rdev->kobj, nm)) 4193 printk("md: cannot register %s for %s\n", 4194 nm, mdname(mddev)); 4195 } 4196 4197 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4198 4199 if (mddev->flags) 4200 md_update_sb(mddev, 0); 4201 4202 set_capacity(disk, mddev->array_sectors); 4203 4204 /* If there is a partially-recovered drive we need to 4205 * start recovery here. If we leave it to md_check_recovery, 4206 * it will remove the drives and not do the right thing 4207 */ 4208 if (mddev->degraded && !mddev->sync_thread) { 4209 int spares = 0; 4210 list_for_each_entry(rdev, &mddev->disks, same_set) 4211 if (rdev->raid_disk >= 0 && 4212 !test_bit(In_sync, &rdev->flags) && 4213 !test_bit(Faulty, &rdev->flags)) 4214 /* complete an interrupted recovery */ 4215 spares++; 4216 if (spares && mddev->pers->sync_request) { 4217 mddev->recovery = 0; 4218 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 4219 mddev->sync_thread = md_register_thread(md_do_sync, 4220 mddev, 4221 "%s_resync"); 4222 if (!mddev->sync_thread) { 4223 printk(KERN_ERR "%s: could not start resync" 4224 " thread...\n", 4225 mdname(mddev)); 4226 /* leave the spares where they are, it shouldn't hurt */ 4227 mddev->recovery = 0; 4228 } 4229 } 4230 } 4231 md_wakeup_thread(mddev->thread); 4232 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ 4233 4234 revalidate_disk(mddev->gendisk); 4235 mddev->changed = 1; 4236 md_new_event(mddev); 4237 sysfs_notify_dirent(mddev->sysfs_state); 4238 if (mddev->sysfs_action) 4239 sysfs_notify_dirent(mddev->sysfs_action); 4240 sysfs_notify(&mddev->kobj, NULL, "degraded"); 4241 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); 4242 return 0; 4243 } 4244 4245 static int restart_array(mddev_t *mddev) 4246 { 4247 struct gendisk *disk = mddev->gendisk; 4248 4249 /* Complain if it has no devices */ 4250 if (list_empty(&mddev->disks)) 4251 return -ENXIO; 4252 if (!mddev->pers) 4253 return -EINVAL; 4254 if (!mddev->ro) 4255 return -EBUSY; 4256 mddev->safemode = 0; 4257 mddev->ro = 0; 4258 set_disk_ro(disk, 0); 4259 printk(KERN_INFO "md: %s switched to read-write mode.\n", 4260 mdname(mddev)); 4261 /* Kick recovery or resync if necessary */ 4262 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4263 md_wakeup_thread(mddev->thread); 4264 md_wakeup_thread(mddev->sync_thread); 4265 sysfs_notify_dirent(mddev->sysfs_state); 4266 return 0; 4267 } 4268 4269 /* similar to deny_write_access, but accounts for our holding a reference 4270 * to the file ourselves */ 4271 static int deny_bitmap_write_access(struct file * file) 4272 { 4273 struct inode *inode = file->f_mapping->host; 4274 4275 spin_lock(&inode->i_lock); 4276 if (atomic_read(&inode->i_writecount) > 1) { 4277 spin_unlock(&inode->i_lock); 4278 return -ETXTBSY; 4279 } 4280 atomic_set(&inode->i_writecount, -1); 4281 spin_unlock(&inode->i_lock); 4282 4283 return 0; 4284 } 4285 4286 static void restore_bitmap_write_access(struct file *file) 4287 { 4288 struct inode *inode = file->f_mapping->host; 4289 4290 spin_lock(&inode->i_lock); 4291 atomic_set(&inode->i_writecount, 1); 4292 spin_unlock(&inode->i_lock); 4293 } 4294 4295 /* mode: 4296 * 0 - completely stop and dis-assemble array 4297 * 1 - switch to readonly 4298 * 2 - stop but do not disassemble array 4299 */ 4300 static int do_md_stop(mddev_t * mddev, int mode, int is_open) 4301 { 4302 int err = 0; 4303 struct gendisk *disk = mddev->gendisk; 4304 mdk_rdev_t *rdev; 4305 4306 mutex_lock(&mddev->open_mutex); 4307 if (atomic_read(&mddev->openers) > is_open) { 4308 printk("md: %s still in use.\n",mdname(mddev)); 4309 err = -EBUSY; 4310 } else if (mddev->pers) { 4311 4312 if (mddev->sync_thread) { 4313 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4314 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 4315 md_unregister_thread(mddev->sync_thread); 4316 mddev->sync_thread = NULL; 4317 } 4318 4319 del_timer_sync(&mddev->safemode_timer); 4320 4321 switch(mode) { 4322 case 1: /* readonly */ 4323 err = -ENXIO; 4324 if (mddev->ro==1) 4325 goto out; 4326 mddev->ro = 1; 4327 break; 4328 case 0: /* disassemble */ 4329 case 2: /* stop */ 4330 bitmap_flush(mddev); 4331 md_super_wait(mddev); 4332 if (mddev->ro) 4333 set_disk_ro(disk, 0); 4334 4335 mddev->pers->stop(mddev); 4336 mddev->queue->merge_bvec_fn = NULL; 4337 mddev->queue->unplug_fn = NULL; 4338 mddev->queue->backing_dev_info.congested_fn = NULL; 4339 module_put(mddev->pers->owner); 4340 if (mddev->pers->sync_request) 4341 mddev->private = &md_redundancy_group; 4342 mddev->pers = NULL; 4343 /* tell userspace to handle 'inactive' */ 4344 sysfs_notify_dirent(mddev->sysfs_state); 4345 4346 list_for_each_entry(rdev, &mddev->disks, same_set) 4347 if (rdev->raid_disk >= 0) { 4348 char nm[20]; 4349 sprintf(nm, "rd%d", rdev->raid_disk); 4350 sysfs_remove_link(&mddev->kobj, nm); 4351 } 4352 4353 set_capacity(disk, 0); 4354 mddev->changed = 1; 4355 4356 if (mddev->ro) 4357 mddev->ro = 0; 4358 } 4359 if (!mddev->in_sync || mddev->flags) { 4360 /* mark array as shutdown cleanly */ 4361 mddev->in_sync = 1; 4362 md_update_sb(mddev, 1); 4363 } 4364 if (mode == 1) 4365 set_disk_ro(disk, 1); 4366 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4367 } 4368 out: 4369 mutex_unlock(&mddev->open_mutex); 4370 if (err) 4371 return err; 4372 /* 4373 * Free resources if final stop 4374 */ 4375 if (mode == 0) { 4376 4377 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev)); 4378 4379 bitmap_destroy(mddev); 4380 if (mddev->bitmap_file) { 4381 restore_bitmap_write_access(mddev->bitmap_file); 4382 fput(mddev->bitmap_file); 4383 mddev->bitmap_file = NULL; 4384 } 4385 mddev->bitmap_offset = 0; 4386 4387 /* make sure all md_delayed_delete calls have finished */ 4388 flush_scheduled_work(); 4389 4390 export_array(mddev); 4391 4392 mddev->array_sectors = 0; 4393 mddev->external_size = 0; 4394 mddev->dev_sectors = 0; 4395 mddev->raid_disks = 0; 4396 mddev->recovery_cp = 0; 4397 mddev->resync_min = 0; 4398 mddev->resync_max = MaxSector; 4399 mddev->reshape_position = MaxSector; 4400 mddev->external = 0; 4401 mddev->persistent = 0; 4402 mddev->level = LEVEL_NONE; 4403 mddev->clevel[0] = 0; 4404 mddev->flags = 0; 4405 mddev->ro = 0; 4406 mddev->metadata_type[0] = 0; 4407 mddev->chunk_sectors = 0; 4408 mddev->ctime = mddev->utime = 0; 4409 mddev->layout = 0; 4410 mddev->max_disks = 0; 4411 mddev->events = 0; 4412 mddev->delta_disks = 0; 4413 mddev->new_level = LEVEL_NONE; 4414 mddev->new_layout = 0; 4415 mddev->new_chunk_sectors = 0; 4416 mddev->curr_resync = 0; 4417 mddev->resync_mismatches = 0; 4418 mddev->suspend_lo = mddev->suspend_hi = 0; 4419 mddev->sync_speed_min = mddev->sync_speed_max = 0; 4420 mddev->recovery = 0; 4421 mddev->in_sync = 0; 4422 mddev->changed = 0; 4423 mddev->degraded = 0; 4424 mddev->barriers_work = 0; 4425 mddev->safemode = 0; 4426 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); 4427 if (mddev->hold_active == UNTIL_STOP) 4428 mddev->hold_active = 0; 4429 4430 } else if (mddev->pers) 4431 printk(KERN_INFO "md: %s switched to read-only mode.\n", 4432 mdname(mddev)); 4433 err = 0; 4434 blk_integrity_unregister(disk); 4435 md_new_event(mddev); 4436 sysfs_notify_dirent(mddev->sysfs_state); 4437 return err; 4438 } 4439 4440 #ifndef MODULE 4441 static void autorun_array(mddev_t *mddev) 4442 { 4443 mdk_rdev_t *rdev; 4444 int err; 4445 4446 if (list_empty(&mddev->disks)) 4447 return; 4448 4449 printk(KERN_INFO "md: running: "); 4450 4451 list_for_each_entry(rdev, &mddev->disks, same_set) { 4452 char b[BDEVNAME_SIZE]; 4453 printk("<%s>", bdevname(rdev->bdev,b)); 4454 } 4455 printk("\n"); 4456 4457 err = do_md_run(mddev); 4458 if (err) { 4459 printk(KERN_WARNING "md: do_md_run() returned %d\n", err); 4460 do_md_stop(mddev, 0, 0); 4461 } 4462 } 4463 4464 /* 4465 * lets try to run arrays based on all disks that have arrived 4466 * until now. (those are in pending_raid_disks) 4467 * 4468 * the method: pick the first pending disk, collect all disks with 4469 * the same UUID, remove all from the pending list and put them into 4470 * the 'same_array' list. Then order this list based on superblock 4471 * update time (freshest comes first), kick out 'old' disks and 4472 * compare superblocks. If everything's fine then run it. 4473 * 4474 * If "unit" is allocated, then bump its reference count 4475 */ 4476 static void autorun_devices(int part) 4477 { 4478 mdk_rdev_t *rdev0, *rdev, *tmp; 4479 mddev_t *mddev; 4480 char b[BDEVNAME_SIZE]; 4481 4482 printk(KERN_INFO "md: autorun ...\n"); 4483 while (!list_empty(&pending_raid_disks)) { 4484 int unit; 4485 dev_t dev; 4486 LIST_HEAD(candidates); 4487 rdev0 = list_entry(pending_raid_disks.next, 4488 mdk_rdev_t, same_set); 4489 4490 printk(KERN_INFO "md: considering %s ...\n", 4491 bdevname(rdev0->bdev,b)); 4492 INIT_LIST_HEAD(&candidates); 4493 rdev_for_each_list(rdev, tmp, &pending_raid_disks) 4494 if (super_90_load(rdev, rdev0, 0) >= 0) { 4495 printk(KERN_INFO "md: adding %s ...\n", 4496 bdevname(rdev->bdev,b)); 4497 list_move(&rdev->same_set, &candidates); 4498 } 4499 /* 4500 * now we have a set of devices, with all of them having 4501 * mostly sane superblocks. It's time to allocate the 4502 * mddev. 4503 */ 4504 if (part) { 4505 dev = MKDEV(mdp_major, 4506 rdev0->preferred_minor << MdpMinorShift); 4507 unit = MINOR(dev) >> MdpMinorShift; 4508 } else { 4509 dev = MKDEV(MD_MAJOR, rdev0->preferred_minor); 4510 unit = MINOR(dev); 4511 } 4512 if (rdev0->preferred_minor != unit) { 4513 printk(KERN_INFO "md: unit number in %s is bad: %d\n", 4514 bdevname(rdev0->bdev, b), rdev0->preferred_minor); 4515 break; 4516 } 4517 4518 md_probe(dev, NULL, NULL); 4519 mddev = mddev_find(dev); 4520 if (!mddev || !mddev->gendisk) { 4521 if (mddev) 4522 mddev_put(mddev); 4523 printk(KERN_ERR 4524 "md: cannot allocate memory for md drive.\n"); 4525 break; 4526 } 4527 if (mddev_lock(mddev)) 4528 printk(KERN_WARNING "md: %s locked, cannot run\n", 4529 mdname(mddev)); 4530 else if (mddev->raid_disks || mddev->major_version 4531 || !list_empty(&mddev->disks)) { 4532 printk(KERN_WARNING 4533 "md: %s already running, cannot run %s\n", 4534 mdname(mddev), bdevname(rdev0->bdev,b)); 4535 mddev_unlock(mddev); 4536 } else { 4537 printk(KERN_INFO "md: created %s\n", mdname(mddev)); 4538 mddev->persistent = 1; 4539 rdev_for_each_list(rdev, tmp, &candidates) { 4540 list_del_init(&rdev->same_set); 4541 if (bind_rdev_to_array(rdev, mddev)) 4542 export_rdev(rdev); 4543 } 4544 autorun_array(mddev); 4545 mddev_unlock(mddev); 4546 } 4547 /* on success, candidates will be empty, on error 4548 * it won't... 4549 */ 4550 rdev_for_each_list(rdev, tmp, &candidates) { 4551 list_del_init(&rdev->same_set); 4552 export_rdev(rdev); 4553 } 4554 mddev_put(mddev); 4555 } 4556 printk(KERN_INFO "md: ... autorun DONE.\n"); 4557 } 4558 #endif /* !MODULE */ 4559 4560 static int get_version(void __user * arg) 4561 { 4562 mdu_version_t ver; 4563 4564 ver.major = MD_MAJOR_VERSION; 4565 ver.minor = MD_MINOR_VERSION; 4566 ver.patchlevel = MD_PATCHLEVEL_VERSION; 4567 4568 if (copy_to_user(arg, &ver, sizeof(ver))) 4569 return -EFAULT; 4570 4571 return 0; 4572 } 4573 4574 static int get_array_info(mddev_t * mddev, void __user * arg) 4575 { 4576 mdu_array_info_t info; 4577 int nr,working,active,failed,spare; 4578 mdk_rdev_t *rdev; 4579 4580 nr=working=active=failed=spare=0; 4581 list_for_each_entry(rdev, &mddev->disks, same_set) { 4582 nr++; 4583 if (test_bit(Faulty, &rdev->flags)) 4584 failed++; 4585 else { 4586 working++; 4587 if (test_bit(In_sync, &rdev->flags)) 4588 active++; 4589 else 4590 spare++; 4591 } 4592 } 4593 4594 info.major_version = mddev->major_version; 4595 info.minor_version = mddev->minor_version; 4596 info.patch_version = MD_PATCHLEVEL_VERSION; 4597 info.ctime = mddev->ctime; 4598 info.level = mddev->level; 4599 info.size = mddev->dev_sectors / 2; 4600 if (info.size != mddev->dev_sectors / 2) /* overflow */ 4601 info.size = -1; 4602 info.nr_disks = nr; 4603 info.raid_disks = mddev->raid_disks; 4604 info.md_minor = mddev->md_minor; 4605 info.not_persistent= !mddev->persistent; 4606 4607 info.utime = mddev->utime; 4608 info.state = 0; 4609 if (mddev->in_sync) 4610 info.state = (1<<MD_SB_CLEAN); 4611 if (mddev->bitmap && mddev->bitmap_offset) 4612 info.state = (1<<MD_SB_BITMAP_PRESENT); 4613 info.active_disks = active; 4614 info.working_disks = working; 4615 info.failed_disks = failed; 4616 info.spare_disks = spare; 4617 4618 info.layout = mddev->layout; 4619 info.chunk_size = mddev->chunk_sectors << 9; 4620 4621 if (copy_to_user(arg, &info, sizeof(info))) 4622 return -EFAULT; 4623 4624 return 0; 4625 } 4626 4627 static int get_bitmap_file(mddev_t * mddev, void __user * arg) 4628 { 4629 mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */ 4630 char *ptr, *buf = NULL; 4631 int err = -ENOMEM; 4632 4633 if (md_allow_write(mddev)) 4634 file = kmalloc(sizeof(*file), GFP_NOIO); 4635 else 4636 file = kmalloc(sizeof(*file), GFP_KERNEL); 4637 4638 if (!file) 4639 goto out; 4640 4641 /* bitmap disabled, zero the first byte and copy out */ 4642 if (!mddev->bitmap || !mddev->bitmap->file) { 4643 file->pathname[0] = '\0'; 4644 goto copy_out; 4645 } 4646 4647 buf = kmalloc(sizeof(file->pathname), GFP_KERNEL); 4648 if (!buf) 4649 goto out; 4650 4651 ptr = d_path(&mddev->bitmap->file->f_path, buf, sizeof(file->pathname)); 4652 if (IS_ERR(ptr)) 4653 goto out; 4654 4655 strcpy(file->pathname, ptr); 4656 4657 copy_out: 4658 err = 0; 4659 if (copy_to_user(arg, file, sizeof(*file))) 4660 err = -EFAULT; 4661 out: 4662 kfree(buf); 4663 kfree(file); 4664 return err; 4665 } 4666 4667 static int get_disk_info(mddev_t * mddev, void __user * arg) 4668 { 4669 mdu_disk_info_t info; 4670 mdk_rdev_t *rdev; 4671 4672 if (copy_from_user(&info, arg, sizeof(info))) 4673 return -EFAULT; 4674 4675 rdev = find_rdev_nr(mddev, info.number); 4676 if (rdev) { 4677 info.major = MAJOR(rdev->bdev->bd_dev); 4678 info.minor = MINOR(rdev->bdev->bd_dev); 4679 info.raid_disk = rdev->raid_disk; 4680 info.state = 0; 4681 if (test_bit(Faulty, &rdev->flags)) 4682 info.state |= (1<<MD_DISK_FAULTY); 4683 else if (test_bit(In_sync, &rdev->flags)) { 4684 info.state |= (1<<MD_DISK_ACTIVE); 4685 info.state |= (1<<MD_DISK_SYNC); 4686 } 4687 if (test_bit(WriteMostly, &rdev->flags)) 4688 info.state |= (1<<MD_DISK_WRITEMOSTLY); 4689 } else { 4690 info.major = info.minor = 0; 4691 info.raid_disk = -1; 4692 info.state = (1<<MD_DISK_REMOVED); 4693 } 4694 4695 if (copy_to_user(arg, &info, sizeof(info))) 4696 return -EFAULT; 4697 4698 return 0; 4699 } 4700 4701 static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info) 4702 { 4703 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 4704 mdk_rdev_t *rdev; 4705 dev_t dev = MKDEV(info->major,info->minor); 4706 4707 if (info->major != MAJOR(dev) || info->minor != MINOR(dev)) 4708 return -EOVERFLOW; 4709 4710 if (!mddev->raid_disks) { 4711 int err; 4712 /* expecting a device which has a superblock */ 4713 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version); 4714 if (IS_ERR(rdev)) { 4715 printk(KERN_WARNING 4716 "md: md_import_device returned %ld\n", 4717 PTR_ERR(rdev)); 4718 return PTR_ERR(rdev); 4719 } 4720 if (!list_empty(&mddev->disks)) { 4721 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next, 4722 mdk_rdev_t, same_set); 4723 int err = super_types[mddev->major_version] 4724 .load_super(rdev, rdev0, mddev->minor_version); 4725 if (err < 0) { 4726 printk(KERN_WARNING 4727 "md: %s has different UUID to %s\n", 4728 bdevname(rdev->bdev,b), 4729 bdevname(rdev0->bdev,b2)); 4730 export_rdev(rdev); 4731 return -EINVAL; 4732 } 4733 } 4734 err = bind_rdev_to_array(rdev, mddev); 4735 if (err) 4736 export_rdev(rdev); 4737 return err; 4738 } 4739 4740 /* 4741 * add_new_disk can be used once the array is assembled 4742 * to add "hot spares". They must already have a superblock 4743 * written 4744 */ 4745 if (mddev->pers) { 4746 int err; 4747 if (!mddev->pers->hot_add_disk) { 4748 printk(KERN_WARNING 4749 "%s: personality does not support diskops!\n", 4750 mdname(mddev)); 4751 return -EINVAL; 4752 } 4753 if (mddev->persistent) 4754 rdev = md_import_device(dev, mddev->major_version, 4755 mddev->minor_version); 4756 else 4757 rdev = md_import_device(dev, -1, -1); 4758 if (IS_ERR(rdev)) { 4759 printk(KERN_WARNING 4760 "md: md_import_device returned %ld\n", 4761 PTR_ERR(rdev)); 4762 return PTR_ERR(rdev); 4763 } 4764 /* set save_raid_disk if appropriate */ 4765 if (!mddev->persistent) { 4766 if (info->state & (1<<MD_DISK_SYNC) && 4767 info->raid_disk < mddev->raid_disks) 4768 rdev->raid_disk = info->raid_disk; 4769 else 4770 rdev->raid_disk = -1; 4771 } else 4772 super_types[mddev->major_version]. 4773 validate_super(mddev, rdev); 4774 rdev->saved_raid_disk = rdev->raid_disk; 4775 4776 clear_bit(In_sync, &rdev->flags); /* just to be sure */ 4777 if (info->state & (1<<MD_DISK_WRITEMOSTLY)) 4778 set_bit(WriteMostly, &rdev->flags); 4779 else 4780 clear_bit(WriteMostly, &rdev->flags); 4781 4782 rdev->raid_disk = -1; 4783 err = bind_rdev_to_array(rdev, mddev); 4784 if (!err && !mddev->pers->hot_remove_disk) { 4785 /* If there is hot_add_disk but no hot_remove_disk 4786 * then added disks for geometry changes, 4787 * and should be added immediately. 4788 */ 4789 super_types[mddev->major_version]. 4790 validate_super(mddev, rdev); 4791 err = mddev->pers->hot_add_disk(mddev, rdev); 4792 if (err) 4793 unbind_rdev_from_array(rdev); 4794 } 4795 if (err) 4796 export_rdev(rdev); 4797 else 4798 sysfs_notify_dirent(rdev->sysfs_state); 4799 4800 md_update_sb(mddev, 1); 4801 if (mddev->degraded) 4802 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 4803 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4804 md_wakeup_thread(mddev->thread); 4805 return err; 4806 } 4807 4808 /* otherwise, add_new_disk is only allowed 4809 * for major_version==0 superblocks 4810 */ 4811 if (mddev->major_version != 0) { 4812 printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n", 4813 mdname(mddev)); 4814 return -EINVAL; 4815 } 4816 4817 if (!(info->state & (1<<MD_DISK_FAULTY))) { 4818 int err; 4819 rdev = md_import_device(dev, -1, 0); 4820 if (IS_ERR(rdev)) { 4821 printk(KERN_WARNING 4822 "md: error, md_import_device() returned %ld\n", 4823 PTR_ERR(rdev)); 4824 return PTR_ERR(rdev); 4825 } 4826 rdev->desc_nr = info->number; 4827 if (info->raid_disk < mddev->raid_disks) 4828 rdev->raid_disk = info->raid_disk; 4829 else 4830 rdev->raid_disk = -1; 4831 4832 if (rdev->raid_disk < mddev->raid_disks) 4833 if (info->state & (1<<MD_DISK_SYNC)) 4834 set_bit(In_sync, &rdev->flags); 4835 4836 if (info->state & (1<<MD_DISK_WRITEMOSTLY)) 4837 set_bit(WriteMostly, &rdev->flags); 4838 4839 if (!mddev->persistent) { 4840 printk(KERN_INFO "md: nonpersistent superblock ...\n"); 4841 rdev->sb_start = rdev->bdev->bd_inode->i_size / 512; 4842 } else 4843 rdev->sb_start = calc_dev_sboffset(rdev->bdev); 4844 rdev->sectors = rdev->sb_start; 4845 4846 err = bind_rdev_to_array(rdev, mddev); 4847 if (err) { 4848 export_rdev(rdev); 4849 return err; 4850 } 4851 } 4852 4853 return 0; 4854 } 4855 4856 static int hot_remove_disk(mddev_t * mddev, dev_t dev) 4857 { 4858 char b[BDEVNAME_SIZE]; 4859 mdk_rdev_t *rdev; 4860 4861 rdev = find_rdev(mddev, dev); 4862 if (!rdev) 4863 return -ENXIO; 4864 4865 if (rdev->raid_disk >= 0) 4866 goto busy; 4867 4868 kick_rdev_from_array(rdev); 4869 md_update_sb(mddev, 1); 4870 md_new_event(mddev); 4871 4872 return 0; 4873 busy: 4874 printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n", 4875 bdevname(rdev->bdev,b), mdname(mddev)); 4876 return -EBUSY; 4877 } 4878 4879 static int hot_add_disk(mddev_t * mddev, dev_t dev) 4880 { 4881 char b[BDEVNAME_SIZE]; 4882 int err; 4883 mdk_rdev_t *rdev; 4884 4885 if (!mddev->pers) 4886 return -ENODEV; 4887 4888 if (mddev->major_version != 0) { 4889 printk(KERN_WARNING "%s: HOT_ADD may only be used with" 4890 " version-0 superblocks.\n", 4891 mdname(mddev)); 4892 return -EINVAL; 4893 } 4894 if (!mddev->pers->hot_add_disk) { 4895 printk(KERN_WARNING 4896 "%s: personality does not support diskops!\n", 4897 mdname(mddev)); 4898 return -EINVAL; 4899 } 4900 4901 rdev = md_import_device(dev, -1, 0); 4902 if (IS_ERR(rdev)) { 4903 printk(KERN_WARNING 4904 "md: error, md_import_device() returned %ld\n", 4905 PTR_ERR(rdev)); 4906 return -EINVAL; 4907 } 4908 4909 if (mddev->persistent) 4910 rdev->sb_start = calc_dev_sboffset(rdev->bdev); 4911 else 4912 rdev->sb_start = rdev->bdev->bd_inode->i_size / 512; 4913 4914 rdev->sectors = rdev->sb_start; 4915 4916 if (test_bit(Faulty, &rdev->flags)) { 4917 printk(KERN_WARNING 4918 "md: can not hot-add faulty %s disk to %s!\n", 4919 bdevname(rdev->bdev,b), mdname(mddev)); 4920 err = -EINVAL; 4921 goto abort_export; 4922 } 4923 clear_bit(In_sync, &rdev->flags); 4924 rdev->desc_nr = -1; 4925 rdev->saved_raid_disk = -1; 4926 err = bind_rdev_to_array(rdev, mddev); 4927 if (err) 4928 goto abort_export; 4929 4930 /* 4931 * The rest should better be atomic, we can have disk failures 4932 * noticed in interrupt contexts ... 4933 */ 4934 4935 rdev->raid_disk = -1; 4936 4937 md_update_sb(mddev, 1); 4938 4939 /* 4940 * Kick recovery, maybe this spare has to be added to the 4941 * array immediately. 4942 */ 4943 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4944 md_wakeup_thread(mddev->thread); 4945 md_new_event(mddev); 4946 return 0; 4947 4948 abort_export: 4949 export_rdev(rdev); 4950 return err; 4951 } 4952 4953 static int set_bitmap_file(mddev_t *mddev, int fd) 4954 { 4955 int err; 4956 4957 if (mddev->pers) { 4958 if (!mddev->pers->quiesce) 4959 return -EBUSY; 4960 if (mddev->recovery || mddev->sync_thread) 4961 return -EBUSY; 4962 /* we should be able to change the bitmap.. */ 4963 } 4964 4965 4966 if (fd >= 0) { 4967 if (mddev->bitmap) 4968 return -EEXIST; /* cannot add when bitmap is present */ 4969 mddev->bitmap_file = fget(fd); 4970 4971 if (mddev->bitmap_file == NULL) { 4972 printk(KERN_ERR "%s: error: failed to get bitmap file\n", 4973 mdname(mddev)); 4974 return -EBADF; 4975 } 4976 4977 err = deny_bitmap_write_access(mddev->bitmap_file); 4978 if (err) { 4979 printk(KERN_ERR "%s: error: bitmap file is already in use\n", 4980 mdname(mddev)); 4981 fput(mddev->bitmap_file); 4982 mddev->bitmap_file = NULL; 4983 return err; 4984 } 4985 mddev->bitmap_offset = 0; /* file overrides offset */ 4986 } else if (mddev->bitmap == NULL) 4987 return -ENOENT; /* cannot remove what isn't there */ 4988 err = 0; 4989 if (mddev->pers) { 4990 mddev->pers->quiesce(mddev, 1); 4991 if (fd >= 0) 4992 err = bitmap_create(mddev); 4993 if (fd < 0 || err) { 4994 bitmap_destroy(mddev); 4995 fd = -1; /* make sure to put the file */ 4996 } 4997 mddev->pers->quiesce(mddev, 0); 4998 } 4999 if (fd < 0) { 5000 if (mddev->bitmap_file) { 5001 restore_bitmap_write_access(mddev->bitmap_file); 5002 fput(mddev->bitmap_file); 5003 } 5004 mddev->bitmap_file = NULL; 5005 } 5006 5007 return err; 5008 } 5009 5010 /* 5011 * set_array_info is used two different ways 5012 * The original usage is when creating a new array. 5013 * In this usage, raid_disks is > 0 and it together with 5014 * level, size, not_persistent,layout,chunksize determine the 5015 * shape of the array. 5016 * This will always create an array with a type-0.90.0 superblock. 5017 * The newer usage is when assembling an array. 5018 * In this case raid_disks will be 0, and the major_version field is 5019 * use to determine which style super-blocks are to be found on the devices. 5020 * The minor and patch _version numbers are also kept incase the 5021 * super_block handler wishes to interpret them. 5022 */ 5023 static int set_array_info(mddev_t * mddev, mdu_array_info_t *info) 5024 { 5025 5026 if (info->raid_disks == 0) { 5027 /* just setting version number for superblock loading */ 5028 if (info->major_version < 0 || 5029 info->major_version >= ARRAY_SIZE(super_types) || 5030 super_types[info->major_version].name == NULL) { 5031 /* maybe try to auto-load a module? */ 5032 printk(KERN_INFO 5033 "md: superblock version %d not known\n", 5034 info->major_version); 5035 return -EINVAL; 5036 } 5037 mddev->major_version = info->major_version; 5038 mddev->minor_version = info->minor_version; 5039 mddev->patch_version = info->patch_version; 5040 mddev->persistent = !info->not_persistent; 5041 return 0; 5042 } 5043 mddev->major_version = MD_MAJOR_VERSION; 5044 mddev->minor_version = MD_MINOR_VERSION; 5045 mddev->patch_version = MD_PATCHLEVEL_VERSION; 5046 mddev->ctime = get_seconds(); 5047 5048 mddev->level = info->level; 5049 mddev->clevel[0] = 0; 5050 mddev->dev_sectors = 2 * (sector_t)info->size; 5051 mddev->raid_disks = info->raid_disks; 5052 /* don't set md_minor, it is determined by which /dev/md* was 5053 * openned 5054 */ 5055 if (info->state & (1<<MD_SB_CLEAN)) 5056 mddev->recovery_cp = MaxSector; 5057 else 5058 mddev->recovery_cp = 0; 5059 mddev->persistent = ! info->not_persistent; 5060 mddev->external = 0; 5061 5062 mddev->layout = info->layout; 5063 mddev->chunk_sectors = info->chunk_size >> 9; 5064 5065 mddev->max_disks = MD_SB_DISKS; 5066 5067 if (mddev->persistent) 5068 mddev->flags = 0; 5069 set_bit(MD_CHANGE_DEVS, &mddev->flags); 5070 5071 mddev->default_bitmap_offset = MD_SB_BYTES >> 9; 5072 mddev->bitmap_offset = 0; 5073 5074 mddev->reshape_position = MaxSector; 5075 5076 /* 5077 * Generate a 128 bit UUID 5078 */ 5079 get_random_bytes(mddev->uuid, 16); 5080 5081 mddev->new_level = mddev->level; 5082 mddev->new_chunk_sectors = mddev->chunk_sectors; 5083 mddev->new_layout = mddev->layout; 5084 mddev->delta_disks = 0; 5085 5086 return 0; 5087 } 5088 5089 void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors) 5090 { 5091 WARN(!mddev_is_locked(mddev), "%s: unlocked mddev!\n", __func__); 5092 5093 if (mddev->external_size) 5094 return; 5095 5096 mddev->array_sectors = array_sectors; 5097 } 5098 EXPORT_SYMBOL(md_set_array_sectors); 5099 5100 static int update_size(mddev_t *mddev, sector_t num_sectors) 5101 { 5102 mdk_rdev_t *rdev; 5103 int rv; 5104 int fit = (num_sectors == 0); 5105 5106 if (mddev->pers->resize == NULL) 5107 return -EINVAL; 5108 /* The "num_sectors" is the number of sectors of each device that 5109 * is used. This can only make sense for arrays with redundancy. 5110 * linear and raid0 always use whatever space is available. We can only 5111 * consider changing this number if no resync or reconstruction is 5112 * happening, and if the new size is acceptable. It must fit before the 5113 * sb_start or, if that is <data_offset, it must fit before the size 5114 * of each device. If num_sectors is zero, we find the largest size 5115 * that fits. 5116 5117 */ 5118 if (mddev->sync_thread) 5119 return -EBUSY; 5120 if (mddev->bitmap) 5121 /* Sorry, cannot grow a bitmap yet, just remove it, 5122 * grow, and re-add. 5123 */ 5124 return -EBUSY; 5125 list_for_each_entry(rdev, &mddev->disks, same_set) { 5126 sector_t avail = rdev->sectors; 5127 5128 if (fit && (num_sectors == 0 || num_sectors > avail)) 5129 num_sectors = avail; 5130 if (avail < num_sectors) 5131 return -ENOSPC; 5132 } 5133 rv = mddev->pers->resize(mddev, num_sectors); 5134 if (!rv) 5135 revalidate_disk(mddev->gendisk); 5136 return rv; 5137 } 5138 5139 static int update_raid_disks(mddev_t *mddev, int raid_disks) 5140 { 5141 int rv; 5142 /* change the number of raid disks */ 5143 if (mddev->pers->check_reshape == NULL) 5144 return -EINVAL; 5145 if (raid_disks <= 0 || 5146 raid_disks >= mddev->max_disks) 5147 return -EINVAL; 5148 if (mddev->sync_thread || mddev->reshape_position != MaxSector) 5149 return -EBUSY; 5150 mddev->delta_disks = raid_disks - mddev->raid_disks; 5151 5152 rv = mddev->pers->check_reshape(mddev); 5153 return rv; 5154 } 5155 5156 5157 /* 5158 * update_array_info is used to change the configuration of an 5159 * on-line array. 5160 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size 5161 * fields in the info are checked against the array. 5162 * Any differences that cannot be handled will cause an error. 5163 * Normally, only one change can be managed at a time. 5164 */ 5165 static int update_array_info(mddev_t *mddev, mdu_array_info_t *info) 5166 { 5167 int rv = 0; 5168 int cnt = 0; 5169 int state = 0; 5170 5171 /* calculate expected state,ignoring low bits */ 5172 if (mddev->bitmap && mddev->bitmap_offset) 5173 state |= (1 << MD_SB_BITMAP_PRESENT); 5174 5175 if (mddev->major_version != info->major_version || 5176 mddev->minor_version != info->minor_version || 5177 /* mddev->patch_version != info->patch_version || */ 5178 mddev->ctime != info->ctime || 5179 mddev->level != info->level || 5180 /* mddev->layout != info->layout || */ 5181 !mddev->persistent != info->not_persistent|| 5182 mddev->chunk_sectors != info->chunk_size >> 9 || 5183 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */ 5184 ((state^info->state) & 0xfffffe00) 5185 ) 5186 return -EINVAL; 5187 /* Check there is only one change */ 5188 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) 5189 cnt++; 5190 if (mddev->raid_disks != info->raid_disks) 5191 cnt++; 5192 if (mddev->layout != info->layout) 5193 cnt++; 5194 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) 5195 cnt++; 5196 if (cnt == 0) 5197 return 0; 5198 if (cnt > 1) 5199 return -EINVAL; 5200 5201 if (mddev->layout != info->layout) { 5202 /* Change layout 5203 * we don't need to do anything at the md level, the 5204 * personality will take care of it all. 5205 */ 5206 if (mddev->pers->check_reshape == NULL) 5207 return -EINVAL; 5208 else { 5209 mddev->new_layout = info->layout; 5210 rv = mddev->pers->check_reshape(mddev); 5211 if (rv) 5212 mddev->new_layout = mddev->layout; 5213 return rv; 5214 } 5215 } 5216 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) 5217 rv = update_size(mddev, (sector_t)info->size * 2); 5218 5219 if (mddev->raid_disks != info->raid_disks) 5220 rv = update_raid_disks(mddev, info->raid_disks); 5221 5222 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) { 5223 if (mddev->pers->quiesce == NULL) 5224 return -EINVAL; 5225 if (mddev->recovery || mddev->sync_thread) 5226 return -EBUSY; 5227 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) { 5228 /* add the bitmap */ 5229 if (mddev->bitmap) 5230 return -EEXIST; 5231 if (mddev->default_bitmap_offset == 0) 5232 return -EINVAL; 5233 mddev->bitmap_offset = mddev->default_bitmap_offset; 5234 mddev->pers->quiesce(mddev, 1); 5235 rv = bitmap_create(mddev); 5236 if (rv) 5237 bitmap_destroy(mddev); 5238 mddev->pers->quiesce(mddev, 0); 5239 } else { 5240 /* remove the bitmap */ 5241 if (!mddev->bitmap) 5242 return -ENOENT; 5243 if (mddev->bitmap->file) 5244 return -EINVAL; 5245 mddev->pers->quiesce(mddev, 1); 5246 bitmap_destroy(mddev); 5247 mddev->pers->quiesce(mddev, 0); 5248 mddev->bitmap_offset = 0; 5249 } 5250 } 5251 md_update_sb(mddev, 1); 5252 return rv; 5253 } 5254 5255 static int set_disk_faulty(mddev_t *mddev, dev_t dev) 5256 { 5257 mdk_rdev_t *rdev; 5258 5259 if (mddev->pers == NULL) 5260 return -ENODEV; 5261 5262 rdev = find_rdev(mddev, dev); 5263 if (!rdev) 5264 return -ENODEV; 5265 5266 md_error(mddev, rdev); 5267 return 0; 5268 } 5269 5270 /* 5271 * We have a problem here : there is no easy way to give a CHS 5272 * virtual geometry. We currently pretend that we have a 2 heads 5273 * 4 sectors (with a BIG number of cylinders...). This drives 5274 * dosfs just mad... ;-) 5275 */ 5276 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo) 5277 { 5278 mddev_t *mddev = bdev->bd_disk->private_data; 5279 5280 geo->heads = 2; 5281 geo->sectors = 4; 5282 geo->cylinders = get_capacity(mddev->gendisk) / 8; 5283 return 0; 5284 } 5285 5286 static int md_ioctl(struct block_device *bdev, fmode_t mode, 5287 unsigned int cmd, unsigned long arg) 5288 { 5289 int err = 0; 5290 void __user *argp = (void __user *)arg; 5291 mddev_t *mddev = NULL; 5292 5293 if (!capable(CAP_SYS_ADMIN)) 5294 return -EACCES; 5295 5296 /* 5297 * Commands dealing with the RAID driver but not any 5298 * particular array: 5299 */ 5300 switch (cmd) 5301 { 5302 case RAID_VERSION: 5303 err = get_version(argp); 5304 goto done; 5305 5306 case PRINT_RAID_DEBUG: 5307 err = 0; 5308 md_print_devices(); 5309 goto done; 5310 5311 #ifndef MODULE 5312 case RAID_AUTORUN: 5313 err = 0; 5314 autostart_arrays(arg); 5315 goto done; 5316 #endif 5317 default:; 5318 } 5319 5320 /* 5321 * Commands creating/starting a new array: 5322 */ 5323 5324 mddev = bdev->bd_disk->private_data; 5325 5326 if (!mddev) { 5327 BUG(); 5328 goto abort; 5329 } 5330 5331 err = mddev_lock(mddev); 5332 if (err) { 5333 printk(KERN_INFO 5334 "md: ioctl lock interrupted, reason %d, cmd %d\n", 5335 err, cmd); 5336 goto abort; 5337 } 5338 5339 switch (cmd) 5340 { 5341 case SET_ARRAY_INFO: 5342 { 5343 mdu_array_info_t info; 5344 if (!arg) 5345 memset(&info, 0, sizeof(info)); 5346 else if (copy_from_user(&info, argp, sizeof(info))) { 5347 err = -EFAULT; 5348 goto abort_unlock; 5349 } 5350 if (mddev->pers) { 5351 err = update_array_info(mddev, &info); 5352 if (err) { 5353 printk(KERN_WARNING "md: couldn't update" 5354 " array info. %d\n", err); 5355 goto abort_unlock; 5356 } 5357 goto done_unlock; 5358 } 5359 if (!list_empty(&mddev->disks)) { 5360 printk(KERN_WARNING 5361 "md: array %s already has disks!\n", 5362 mdname(mddev)); 5363 err = -EBUSY; 5364 goto abort_unlock; 5365 } 5366 if (mddev->raid_disks) { 5367 printk(KERN_WARNING 5368 "md: array %s already initialised!\n", 5369 mdname(mddev)); 5370 err = -EBUSY; 5371 goto abort_unlock; 5372 } 5373 err = set_array_info(mddev, &info); 5374 if (err) { 5375 printk(KERN_WARNING "md: couldn't set" 5376 " array info. %d\n", err); 5377 goto abort_unlock; 5378 } 5379 } 5380 goto done_unlock; 5381 5382 default:; 5383 } 5384 5385 /* 5386 * Commands querying/configuring an existing array: 5387 */ 5388 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY, 5389 * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */ 5390 if ((!mddev->raid_disks && !mddev->external) 5391 && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY 5392 && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE 5393 && cmd != GET_BITMAP_FILE) { 5394 err = -ENODEV; 5395 goto abort_unlock; 5396 } 5397 5398 /* 5399 * Commands even a read-only array can execute: 5400 */ 5401 switch (cmd) 5402 { 5403 case GET_ARRAY_INFO: 5404 err = get_array_info(mddev, argp); 5405 goto done_unlock; 5406 5407 case GET_BITMAP_FILE: 5408 err = get_bitmap_file(mddev, argp); 5409 goto done_unlock; 5410 5411 case GET_DISK_INFO: 5412 err = get_disk_info(mddev, argp); 5413 goto done_unlock; 5414 5415 case RESTART_ARRAY_RW: 5416 err = restart_array(mddev); 5417 goto done_unlock; 5418 5419 case STOP_ARRAY: 5420 err = do_md_stop(mddev, 0, 1); 5421 goto done_unlock; 5422 5423 case STOP_ARRAY_RO: 5424 err = do_md_stop(mddev, 1, 1); 5425 goto done_unlock; 5426 5427 } 5428 5429 /* 5430 * The remaining ioctls are changing the state of the 5431 * superblock, so we do not allow them on read-only arrays. 5432 * However non-MD ioctls (e.g. get-size) will still come through 5433 * here and hit the 'default' below, so only disallow 5434 * 'md' ioctls, and switch to rw mode if started auto-readonly. 5435 */ 5436 if (_IOC_TYPE(cmd) == MD_MAJOR && mddev->ro && mddev->pers) { 5437 if (mddev->ro == 2) { 5438 mddev->ro = 0; 5439 sysfs_notify_dirent(mddev->sysfs_state); 5440 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5441 md_wakeup_thread(mddev->thread); 5442 } else { 5443 err = -EROFS; 5444 goto abort_unlock; 5445 } 5446 } 5447 5448 switch (cmd) 5449 { 5450 case ADD_NEW_DISK: 5451 { 5452 mdu_disk_info_t info; 5453 if (copy_from_user(&info, argp, sizeof(info))) 5454 err = -EFAULT; 5455 else 5456 err = add_new_disk(mddev, &info); 5457 goto done_unlock; 5458 } 5459 5460 case HOT_REMOVE_DISK: 5461 err = hot_remove_disk(mddev, new_decode_dev(arg)); 5462 goto done_unlock; 5463 5464 case HOT_ADD_DISK: 5465 err = hot_add_disk(mddev, new_decode_dev(arg)); 5466 goto done_unlock; 5467 5468 case SET_DISK_FAULTY: 5469 err = set_disk_faulty(mddev, new_decode_dev(arg)); 5470 goto done_unlock; 5471 5472 case RUN_ARRAY: 5473 err = do_md_run(mddev); 5474 goto done_unlock; 5475 5476 case SET_BITMAP_FILE: 5477 err = set_bitmap_file(mddev, (int)arg); 5478 goto done_unlock; 5479 5480 default: 5481 err = -EINVAL; 5482 goto abort_unlock; 5483 } 5484 5485 done_unlock: 5486 abort_unlock: 5487 if (mddev->hold_active == UNTIL_IOCTL && 5488 err != -EINVAL) 5489 mddev->hold_active = 0; 5490 mddev_unlock(mddev); 5491 5492 return err; 5493 done: 5494 if (err) 5495 MD_BUG(); 5496 abort: 5497 return err; 5498 } 5499 5500 static int md_open(struct block_device *bdev, fmode_t mode) 5501 { 5502 /* 5503 * Succeed if we can lock the mddev, which confirms that 5504 * it isn't being stopped right now. 5505 */ 5506 mddev_t *mddev = mddev_find(bdev->bd_dev); 5507 int err; 5508 5509 if (mddev->gendisk != bdev->bd_disk) { 5510 /* we are racing with mddev_put which is discarding this 5511 * bd_disk. 5512 */ 5513 mddev_put(mddev); 5514 /* Wait until bdev->bd_disk is definitely gone */ 5515 flush_scheduled_work(); 5516 /* Then retry the open from the top */ 5517 return -ERESTARTSYS; 5518 } 5519 BUG_ON(mddev != bdev->bd_disk->private_data); 5520 5521 if ((err = mutex_lock_interruptible(&mddev->open_mutex))) 5522 goto out; 5523 5524 err = 0; 5525 atomic_inc(&mddev->openers); 5526 mutex_unlock(&mddev->open_mutex); 5527 5528 check_disk_change(bdev); 5529 out: 5530 return err; 5531 } 5532 5533 static int md_release(struct gendisk *disk, fmode_t mode) 5534 { 5535 mddev_t *mddev = disk->private_data; 5536 5537 BUG_ON(!mddev); 5538 atomic_dec(&mddev->openers); 5539 mddev_put(mddev); 5540 5541 return 0; 5542 } 5543 5544 static int md_media_changed(struct gendisk *disk) 5545 { 5546 mddev_t *mddev = disk->private_data; 5547 5548 return mddev->changed; 5549 } 5550 5551 static int md_revalidate(struct gendisk *disk) 5552 { 5553 mddev_t *mddev = disk->private_data; 5554 5555 mddev->changed = 0; 5556 return 0; 5557 } 5558 static struct block_device_operations md_fops = 5559 { 5560 .owner = THIS_MODULE, 5561 .open = md_open, 5562 .release = md_release, 5563 .ioctl = md_ioctl, 5564 .getgeo = md_getgeo, 5565 .media_changed = md_media_changed, 5566 .revalidate_disk= md_revalidate, 5567 }; 5568 5569 static int md_thread(void * arg) 5570 { 5571 mdk_thread_t *thread = arg; 5572 5573 /* 5574 * md_thread is a 'system-thread', it's priority should be very 5575 * high. We avoid resource deadlocks individually in each 5576 * raid personality. (RAID5 does preallocation) We also use RR and 5577 * the very same RT priority as kswapd, thus we will never get 5578 * into a priority inversion deadlock. 5579 * 5580 * we definitely have to have equal or higher priority than 5581 * bdflush, otherwise bdflush will deadlock if there are too 5582 * many dirty RAID5 blocks. 5583 */ 5584 5585 allow_signal(SIGKILL); 5586 while (!kthread_should_stop()) { 5587 5588 /* We need to wait INTERRUPTIBLE so that 5589 * we don't add to the load-average. 5590 * That means we need to be sure no signals are 5591 * pending 5592 */ 5593 if (signal_pending(current)) 5594 flush_signals(current); 5595 5596 wait_event_interruptible_timeout 5597 (thread->wqueue, 5598 test_bit(THREAD_WAKEUP, &thread->flags) 5599 || kthread_should_stop(), 5600 thread->timeout); 5601 5602 clear_bit(THREAD_WAKEUP, &thread->flags); 5603 5604 thread->run(thread->mddev); 5605 } 5606 5607 return 0; 5608 } 5609 5610 void md_wakeup_thread(mdk_thread_t *thread) 5611 { 5612 if (thread) { 5613 dprintk("md: waking up MD thread %s.\n", thread->tsk->comm); 5614 set_bit(THREAD_WAKEUP, &thread->flags); 5615 wake_up(&thread->wqueue); 5616 } 5617 } 5618 5619 mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev, 5620 const char *name) 5621 { 5622 mdk_thread_t *thread; 5623 5624 thread = kzalloc(sizeof(mdk_thread_t), GFP_KERNEL); 5625 if (!thread) 5626 return NULL; 5627 5628 init_waitqueue_head(&thread->wqueue); 5629 5630 thread->run = run; 5631 thread->mddev = mddev; 5632 thread->timeout = MAX_SCHEDULE_TIMEOUT; 5633 thread->tsk = kthread_run(md_thread, thread, name, mdname(thread->mddev)); 5634 if (IS_ERR(thread->tsk)) { 5635 kfree(thread); 5636 return NULL; 5637 } 5638 return thread; 5639 } 5640 5641 void md_unregister_thread(mdk_thread_t *thread) 5642 { 5643 if (!thread) 5644 return; 5645 dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk)); 5646 5647 kthread_stop(thread->tsk); 5648 kfree(thread); 5649 } 5650 5651 void md_error(mddev_t *mddev, mdk_rdev_t *rdev) 5652 { 5653 if (!mddev) { 5654 MD_BUG(); 5655 return; 5656 } 5657 5658 if (!rdev || test_bit(Faulty, &rdev->flags)) 5659 return; 5660 5661 if (mddev->external) 5662 set_bit(Blocked, &rdev->flags); 5663 /* 5664 dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n", 5665 mdname(mddev), 5666 MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev), 5667 __builtin_return_address(0),__builtin_return_address(1), 5668 __builtin_return_address(2),__builtin_return_address(3)); 5669 */ 5670 if (!mddev->pers) 5671 return; 5672 if (!mddev->pers->error_handler) 5673 return; 5674 mddev->pers->error_handler(mddev,rdev); 5675 if (mddev->degraded) 5676 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 5677 set_bit(StateChanged, &rdev->flags); 5678 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 5679 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5680 md_wakeup_thread(mddev->thread); 5681 md_new_event_inintr(mddev); 5682 } 5683 5684 /* seq_file implementation /proc/mdstat */ 5685 5686 static void status_unused(struct seq_file *seq) 5687 { 5688 int i = 0; 5689 mdk_rdev_t *rdev; 5690 5691 seq_printf(seq, "unused devices: "); 5692 5693 list_for_each_entry(rdev, &pending_raid_disks, same_set) { 5694 char b[BDEVNAME_SIZE]; 5695 i++; 5696 seq_printf(seq, "%s ", 5697 bdevname(rdev->bdev,b)); 5698 } 5699 if (!i) 5700 seq_printf(seq, "<none>"); 5701 5702 seq_printf(seq, "\n"); 5703 } 5704 5705 5706 static void status_resync(struct seq_file *seq, mddev_t * mddev) 5707 { 5708 sector_t max_sectors, resync, res; 5709 unsigned long dt, db; 5710 sector_t rt; 5711 int scale; 5712 unsigned int per_milli; 5713 5714 resync = mddev->curr_resync - atomic_read(&mddev->recovery_active); 5715 5716 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 5717 max_sectors = mddev->resync_max_sectors; 5718 else 5719 max_sectors = mddev->dev_sectors; 5720 5721 /* 5722 * Should not happen. 5723 */ 5724 if (!max_sectors) { 5725 MD_BUG(); 5726 return; 5727 } 5728 /* Pick 'scale' such that (resync>>scale)*1000 will fit 5729 * in a sector_t, and (max_sectors>>scale) will fit in a 5730 * u32, as those are the requirements for sector_div. 5731 * Thus 'scale' must be at least 10 5732 */ 5733 scale = 10; 5734 if (sizeof(sector_t) > sizeof(unsigned long)) { 5735 while ( max_sectors/2 > (1ULL<<(scale+32))) 5736 scale++; 5737 } 5738 res = (resync>>scale)*1000; 5739 sector_div(res, (u32)((max_sectors>>scale)+1)); 5740 5741 per_milli = res; 5742 { 5743 int i, x = per_milli/50, y = 20-x; 5744 seq_printf(seq, "["); 5745 for (i = 0; i < x; i++) 5746 seq_printf(seq, "="); 5747 seq_printf(seq, ">"); 5748 for (i = 0; i < y; i++) 5749 seq_printf(seq, "."); 5750 seq_printf(seq, "] "); 5751 } 5752 seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)", 5753 (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)? 5754 "reshape" : 5755 (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)? 5756 "check" : 5757 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ? 5758 "resync" : "recovery"))), 5759 per_milli/10, per_milli % 10, 5760 (unsigned long long) resync/2, 5761 (unsigned long long) max_sectors/2); 5762 5763 /* 5764 * dt: time from mark until now 5765 * db: blocks written from mark until now 5766 * rt: remaining time 5767 * 5768 * rt is a sector_t, so could be 32bit or 64bit. 5769 * So we divide before multiply in case it is 32bit and close 5770 * to the limit. 5771 * We scale the divisor (db) by 32 to avoid loosing precision 5772 * near the end of resync when the number of remaining sectors 5773 * is close to 'db'. 5774 * We then divide rt by 32 after multiplying by db to compensate. 5775 * The '+1' avoids division by zero if db is very small. 5776 */ 5777 dt = ((jiffies - mddev->resync_mark) / HZ); 5778 if (!dt) dt++; 5779 db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active)) 5780 - mddev->resync_mark_cnt; 5781 5782 rt = max_sectors - resync; /* number of remaining sectors */ 5783 sector_div(rt, db/32+1); 5784 rt *= dt; 5785 rt >>= 5; 5786 5787 seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60, 5788 ((unsigned long)rt % 60)/6); 5789 5790 seq_printf(seq, " speed=%ldK/sec", db/2/dt); 5791 } 5792 5793 static void *md_seq_start(struct seq_file *seq, loff_t *pos) 5794 { 5795 struct list_head *tmp; 5796 loff_t l = *pos; 5797 mddev_t *mddev; 5798 5799 if (l >= 0x10000) 5800 return NULL; 5801 if (!l--) 5802 /* header */ 5803 return (void*)1; 5804 5805 spin_lock(&all_mddevs_lock); 5806 list_for_each(tmp,&all_mddevs) 5807 if (!l--) { 5808 mddev = list_entry(tmp, mddev_t, all_mddevs); 5809 mddev_get(mddev); 5810 spin_unlock(&all_mddevs_lock); 5811 return mddev; 5812 } 5813 spin_unlock(&all_mddevs_lock); 5814 if (!l--) 5815 return (void*)2;/* tail */ 5816 return NULL; 5817 } 5818 5819 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos) 5820 { 5821 struct list_head *tmp; 5822 mddev_t *next_mddev, *mddev = v; 5823 5824 ++*pos; 5825 if (v == (void*)2) 5826 return NULL; 5827 5828 spin_lock(&all_mddevs_lock); 5829 if (v == (void*)1) 5830 tmp = all_mddevs.next; 5831 else 5832 tmp = mddev->all_mddevs.next; 5833 if (tmp != &all_mddevs) 5834 next_mddev = mddev_get(list_entry(tmp,mddev_t,all_mddevs)); 5835 else { 5836 next_mddev = (void*)2; 5837 *pos = 0x10000; 5838 } 5839 spin_unlock(&all_mddevs_lock); 5840 5841 if (v != (void*)1) 5842 mddev_put(mddev); 5843 return next_mddev; 5844 5845 } 5846 5847 static void md_seq_stop(struct seq_file *seq, void *v) 5848 { 5849 mddev_t *mddev = v; 5850 5851 if (mddev && v != (void*)1 && v != (void*)2) 5852 mddev_put(mddev); 5853 } 5854 5855 struct mdstat_info { 5856 int event; 5857 }; 5858 5859 static int md_seq_show(struct seq_file *seq, void *v) 5860 { 5861 mddev_t *mddev = v; 5862 sector_t sectors; 5863 mdk_rdev_t *rdev; 5864 struct mdstat_info *mi = seq->private; 5865 struct bitmap *bitmap; 5866 5867 if (v == (void*)1) { 5868 struct mdk_personality *pers; 5869 seq_printf(seq, "Personalities : "); 5870 spin_lock(&pers_lock); 5871 list_for_each_entry(pers, &pers_list, list) 5872 seq_printf(seq, "[%s] ", pers->name); 5873 5874 spin_unlock(&pers_lock); 5875 seq_printf(seq, "\n"); 5876 mi->event = atomic_read(&md_event_count); 5877 return 0; 5878 } 5879 if (v == (void*)2) { 5880 status_unused(seq); 5881 return 0; 5882 } 5883 5884 if (mddev_lock(mddev) < 0) 5885 return -EINTR; 5886 5887 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) { 5888 seq_printf(seq, "%s : %sactive", mdname(mddev), 5889 mddev->pers ? "" : "in"); 5890 if (mddev->pers) { 5891 if (mddev->ro==1) 5892 seq_printf(seq, " (read-only)"); 5893 if (mddev->ro==2) 5894 seq_printf(seq, " (auto-read-only)"); 5895 seq_printf(seq, " %s", mddev->pers->name); 5896 } 5897 5898 sectors = 0; 5899 list_for_each_entry(rdev, &mddev->disks, same_set) { 5900 char b[BDEVNAME_SIZE]; 5901 seq_printf(seq, " %s[%d]", 5902 bdevname(rdev->bdev,b), rdev->desc_nr); 5903 if (test_bit(WriteMostly, &rdev->flags)) 5904 seq_printf(seq, "(W)"); 5905 if (test_bit(Faulty, &rdev->flags)) { 5906 seq_printf(seq, "(F)"); 5907 continue; 5908 } else if (rdev->raid_disk < 0) 5909 seq_printf(seq, "(S)"); /* spare */ 5910 sectors += rdev->sectors; 5911 } 5912 5913 if (!list_empty(&mddev->disks)) { 5914 if (mddev->pers) 5915 seq_printf(seq, "\n %llu blocks", 5916 (unsigned long long) 5917 mddev->array_sectors / 2); 5918 else 5919 seq_printf(seq, "\n %llu blocks", 5920 (unsigned long long)sectors / 2); 5921 } 5922 if (mddev->persistent) { 5923 if (mddev->major_version != 0 || 5924 mddev->minor_version != 90) { 5925 seq_printf(seq," super %d.%d", 5926 mddev->major_version, 5927 mddev->minor_version); 5928 } 5929 } else if (mddev->external) 5930 seq_printf(seq, " super external:%s", 5931 mddev->metadata_type); 5932 else 5933 seq_printf(seq, " super non-persistent"); 5934 5935 if (mddev->pers) { 5936 mddev->pers->status(seq, mddev); 5937 seq_printf(seq, "\n "); 5938 if (mddev->pers->sync_request) { 5939 if (mddev->curr_resync > 2) { 5940 status_resync(seq, mddev); 5941 seq_printf(seq, "\n "); 5942 } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2) 5943 seq_printf(seq, "\tresync=DELAYED\n "); 5944 else if (mddev->recovery_cp < MaxSector) 5945 seq_printf(seq, "\tresync=PENDING\n "); 5946 } 5947 } else 5948 seq_printf(seq, "\n "); 5949 5950 if ((bitmap = mddev->bitmap)) { 5951 unsigned long chunk_kb; 5952 unsigned long flags; 5953 spin_lock_irqsave(&bitmap->lock, flags); 5954 chunk_kb = bitmap->chunksize >> 10; 5955 seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], " 5956 "%lu%s chunk", 5957 bitmap->pages - bitmap->missing_pages, 5958 bitmap->pages, 5959 (bitmap->pages - bitmap->missing_pages) 5960 << (PAGE_SHIFT - 10), 5961 chunk_kb ? chunk_kb : bitmap->chunksize, 5962 chunk_kb ? "KB" : "B"); 5963 if (bitmap->file) { 5964 seq_printf(seq, ", file: "); 5965 seq_path(seq, &bitmap->file->f_path, " \t\n"); 5966 } 5967 5968 seq_printf(seq, "\n"); 5969 spin_unlock_irqrestore(&bitmap->lock, flags); 5970 } 5971 5972 seq_printf(seq, "\n"); 5973 } 5974 mddev_unlock(mddev); 5975 5976 return 0; 5977 } 5978 5979 static const struct seq_operations md_seq_ops = { 5980 .start = md_seq_start, 5981 .next = md_seq_next, 5982 .stop = md_seq_stop, 5983 .show = md_seq_show, 5984 }; 5985 5986 static int md_seq_open(struct inode *inode, struct file *file) 5987 { 5988 int error; 5989 struct mdstat_info *mi = kmalloc(sizeof(*mi), GFP_KERNEL); 5990 if (mi == NULL) 5991 return -ENOMEM; 5992 5993 error = seq_open(file, &md_seq_ops); 5994 if (error) 5995 kfree(mi); 5996 else { 5997 struct seq_file *p = file->private_data; 5998 p->private = mi; 5999 mi->event = atomic_read(&md_event_count); 6000 } 6001 return error; 6002 } 6003 6004 static unsigned int mdstat_poll(struct file *filp, poll_table *wait) 6005 { 6006 struct seq_file *m = filp->private_data; 6007 struct mdstat_info *mi = m->private; 6008 int mask; 6009 6010 poll_wait(filp, &md_event_waiters, wait); 6011 6012 /* always allow read */ 6013 mask = POLLIN | POLLRDNORM; 6014 6015 if (mi->event != atomic_read(&md_event_count)) 6016 mask |= POLLERR | POLLPRI; 6017 return mask; 6018 } 6019 6020 static const struct file_operations md_seq_fops = { 6021 .owner = THIS_MODULE, 6022 .open = md_seq_open, 6023 .read = seq_read, 6024 .llseek = seq_lseek, 6025 .release = seq_release_private, 6026 .poll = mdstat_poll, 6027 }; 6028 6029 int register_md_personality(struct mdk_personality *p) 6030 { 6031 spin_lock(&pers_lock); 6032 list_add_tail(&p->list, &pers_list); 6033 printk(KERN_INFO "md: %s personality registered for level %d\n", p->name, p->level); 6034 spin_unlock(&pers_lock); 6035 return 0; 6036 } 6037 6038 int unregister_md_personality(struct mdk_personality *p) 6039 { 6040 printk(KERN_INFO "md: %s personality unregistered\n", p->name); 6041 spin_lock(&pers_lock); 6042 list_del_init(&p->list); 6043 spin_unlock(&pers_lock); 6044 return 0; 6045 } 6046 6047 static int is_mddev_idle(mddev_t *mddev, int init) 6048 { 6049 mdk_rdev_t * rdev; 6050 int idle; 6051 int curr_events; 6052 6053 idle = 1; 6054 rcu_read_lock(); 6055 rdev_for_each_rcu(rdev, mddev) { 6056 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk; 6057 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) + 6058 (int)part_stat_read(&disk->part0, sectors[1]) - 6059 atomic_read(&disk->sync_io); 6060 /* sync IO will cause sync_io to increase before the disk_stats 6061 * as sync_io is counted when a request starts, and 6062 * disk_stats is counted when it completes. 6063 * So resync activity will cause curr_events to be smaller than 6064 * when there was no such activity. 6065 * non-sync IO will cause disk_stat to increase without 6066 * increasing sync_io so curr_events will (eventually) 6067 * be larger than it was before. Once it becomes 6068 * substantially larger, the test below will cause 6069 * the array to appear non-idle, and resync will slow 6070 * down. 6071 * If there is a lot of outstanding resync activity when 6072 * we set last_event to curr_events, then all that activity 6073 * completing might cause the array to appear non-idle 6074 * and resync will be slowed down even though there might 6075 * not have been non-resync activity. This will only 6076 * happen once though. 'last_events' will soon reflect 6077 * the state where there is little or no outstanding 6078 * resync requests, and further resync activity will 6079 * always make curr_events less than last_events. 6080 * 6081 */ 6082 if (init || curr_events - rdev->last_events > 64) { 6083 rdev->last_events = curr_events; 6084 idle = 0; 6085 } 6086 } 6087 rcu_read_unlock(); 6088 return idle; 6089 } 6090 6091 void md_done_sync(mddev_t *mddev, int blocks, int ok) 6092 { 6093 /* another "blocks" (512byte) blocks have been synced */ 6094 atomic_sub(blocks, &mddev->recovery_active); 6095 wake_up(&mddev->recovery_wait); 6096 if (!ok) { 6097 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 6098 md_wakeup_thread(mddev->thread); 6099 // stop recovery, signal do_sync .... 6100 } 6101 } 6102 6103 6104 /* md_write_start(mddev, bi) 6105 * If we need to update some array metadata (e.g. 'active' flag 6106 * in superblock) before writing, schedule a superblock update 6107 * and wait for it to complete. 6108 */ 6109 void md_write_start(mddev_t *mddev, struct bio *bi) 6110 { 6111 int did_change = 0; 6112 if (bio_data_dir(bi) != WRITE) 6113 return; 6114 6115 BUG_ON(mddev->ro == 1); 6116 if (mddev->ro == 2) { 6117 /* need to switch to read/write */ 6118 mddev->ro = 0; 6119 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6120 md_wakeup_thread(mddev->thread); 6121 md_wakeup_thread(mddev->sync_thread); 6122 did_change = 1; 6123 } 6124 atomic_inc(&mddev->writes_pending); 6125 if (mddev->safemode == 1) 6126 mddev->safemode = 0; 6127 if (mddev->in_sync) { 6128 spin_lock_irq(&mddev->write_lock); 6129 if (mddev->in_sync) { 6130 mddev->in_sync = 0; 6131 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 6132 md_wakeup_thread(mddev->thread); 6133 did_change = 1; 6134 } 6135 spin_unlock_irq(&mddev->write_lock); 6136 } 6137 if (did_change) 6138 sysfs_notify_dirent(mddev->sysfs_state); 6139 wait_event(mddev->sb_wait, 6140 !test_bit(MD_CHANGE_CLEAN, &mddev->flags) && 6141 !test_bit(MD_CHANGE_PENDING, &mddev->flags)); 6142 } 6143 6144 void md_write_end(mddev_t *mddev) 6145 { 6146 if (atomic_dec_and_test(&mddev->writes_pending)) { 6147 if (mddev->safemode == 2) 6148 md_wakeup_thread(mddev->thread); 6149 else if (mddev->safemode_delay) 6150 mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay); 6151 } 6152 } 6153 6154 /* md_allow_write(mddev) 6155 * Calling this ensures that the array is marked 'active' so that writes 6156 * may proceed without blocking. It is important to call this before 6157 * attempting a GFP_KERNEL allocation while holding the mddev lock. 6158 * Must be called with mddev_lock held. 6159 * 6160 * In the ->external case MD_CHANGE_CLEAN can not be cleared until mddev->lock 6161 * is dropped, so return -EAGAIN after notifying userspace. 6162 */ 6163 int md_allow_write(mddev_t *mddev) 6164 { 6165 if (!mddev->pers) 6166 return 0; 6167 if (mddev->ro) 6168 return 0; 6169 if (!mddev->pers->sync_request) 6170 return 0; 6171 6172 spin_lock_irq(&mddev->write_lock); 6173 if (mddev->in_sync) { 6174 mddev->in_sync = 0; 6175 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 6176 if (mddev->safemode_delay && 6177 mddev->safemode == 0) 6178 mddev->safemode = 1; 6179 spin_unlock_irq(&mddev->write_lock); 6180 md_update_sb(mddev, 0); 6181 sysfs_notify_dirent(mddev->sysfs_state); 6182 } else 6183 spin_unlock_irq(&mddev->write_lock); 6184 6185 if (test_bit(MD_CHANGE_CLEAN, &mddev->flags)) 6186 return -EAGAIN; 6187 else 6188 return 0; 6189 } 6190 EXPORT_SYMBOL_GPL(md_allow_write); 6191 6192 #define SYNC_MARKS 10 6193 #define SYNC_MARK_STEP (3*HZ) 6194 void md_do_sync(mddev_t *mddev) 6195 { 6196 mddev_t *mddev2; 6197 unsigned int currspeed = 0, 6198 window; 6199 sector_t max_sectors,j, io_sectors; 6200 unsigned long mark[SYNC_MARKS]; 6201 sector_t mark_cnt[SYNC_MARKS]; 6202 int last_mark,m; 6203 struct list_head *tmp; 6204 sector_t last_check; 6205 int skipped = 0; 6206 mdk_rdev_t *rdev; 6207 char *desc; 6208 6209 /* just incase thread restarts... */ 6210 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) 6211 return; 6212 if (mddev->ro) /* never try to sync a read-only array */ 6213 return; 6214 6215 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 6216 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) 6217 desc = "data-check"; 6218 else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 6219 desc = "requested-resync"; 6220 else 6221 desc = "resync"; 6222 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 6223 desc = "reshape"; 6224 else 6225 desc = "recovery"; 6226 6227 /* we overload curr_resync somewhat here. 6228 * 0 == not engaged in resync at all 6229 * 2 == checking that there is no conflict with another sync 6230 * 1 == like 2, but have yielded to allow conflicting resync to 6231 * commense 6232 * other == active in resync - this many blocks 6233 * 6234 * Before starting a resync we must have set curr_resync to 6235 * 2, and then checked that every "conflicting" array has curr_resync 6236 * less than ours. When we find one that is the same or higher 6237 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync 6238 * to 1 if we choose to yield (based arbitrarily on address of mddev structure). 6239 * This will mean we have to start checking from the beginning again. 6240 * 6241 */ 6242 6243 do { 6244 mddev->curr_resync = 2; 6245 6246 try_again: 6247 if (kthread_should_stop()) { 6248 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 6249 goto skip; 6250 } 6251 for_each_mddev(mddev2, tmp) { 6252 if (mddev2 == mddev) 6253 continue; 6254 if (!mddev->parallel_resync 6255 && mddev2->curr_resync 6256 && match_mddev_units(mddev, mddev2)) { 6257 DEFINE_WAIT(wq); 6258 if (mddev < mddev2 && mddev->curr_resync == 2) { 6259 /* arbitrarily yield */ 6260 mddev->curr_resync = 1; 6261 wake_up(&resync_wait); 6262 } 6263 if (mddev > mddev2 && mddev->curr_resync == 1) 6264 /* no need to wait here, we can wait the next 6265 * time 'round when curr_resync == 2 6266 */ 6267 continue; 6268 /* We need to wait 'interruptible' so as not to 6269 * contribute to the load average, and not to 6270 * be caught by 'softlockup' 6271 */ 6272 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE); 6273 if (!kthread_should_stop() && 6274 mddev2->curr_resync >= mddev->curr_resync) { 6275 printk(KERN_INFO "md: delaying %s of %s" 6276 " until %s has finished (they" 6277 " share one or more physical units)\n", 6278 desc, mdname(mddev), mdname(mddev2)); 6279 mddev_put(mddev2); 6280 if (signal_pending(current)) 6281 flush_signals(current); 6282 schedule(); 6283 finish_wait(&resync_wait, &wq); 6284 goto try_again; 6285 } 6286 finish_wait(&resync_wait, &wq); 6287 } 6288 } 6289 } while (mddev->curr_resync < 2); 6290 6291 j = 0; 6292 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 6293 /* resync follows the size requested by the personality, 6294 * which defaults to physical size, but can be virtual size 6295 */ 6296 max_sectors = mddev->resync_max_sectors; 6297 mddev->resync_mismatches = 0; 6298 /* we don't use the checkpoint if there's a bitmap */ 6299 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 6300 j = mddev->resync_min; 6301 else if (!mddev->bitmap) 6302 j = mddev->recovery_cp; 6303 6304 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 6305 max_sectors = mddev->dev_sectors; 6306 else { 6307 /* recovery follows the physical size of devices */ 6308 max_sectors = mddev->dev_sectors; 6309 j = MaxSector; 6310 list_for_each_entry(rdev, &mddev->disks, same_set) 6311 if (rdev->raid_disk >= 0 && 6312 !test_bit(Faulty, &rdev->flags) && 6313 !test_bit(In_sync, &rdev->flags) && 6314 rdev->recovery_offset < j) 6315 j = rdev->recovery_offset; 6316 } 6317 6318 printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev)); 6319 printk(KERN_INFO "md: minimum _guaranteed_ speed:" 6320 " %d KB/sec/disk.\n", speed_min(mddev)); 6321 printk(KERN_INFO "md: using maximum available idle IO bandwidth " 6322 "(but not more than %d KB/sec) for %s.\n", 6323 speed_max(mddev), desc); 6324 6325 is_mddev_idle(mddev, 1); /* this initializes IO event counters */ 6326 6327 io_sectors = 0; 6328 for (m = 0; m < SYNC_MARKS; m++) { 6329 mark[m] = jiffies; 6330 mark_cnt[m] = io_sectors; 6331 } 6332 last_mark = 0; 6333 mddev->resync_mark = mark[last_mark]; 6334 mddev->resync_mark_cnt = mark_cnt[last_mark]; 6335 6336 /* 6337 * Tune reconstruction: 6338 */ 6339 window = 32*(PAGE_SIZE/512); 6340 printk(KERN_INFO "md: using %dk window, over a total of %llu blocks.\n", 6341 window/2,(unsigned long long) max_sectors/2); 6342 6343 atomic_set(&mddev->recovery_active, 0); 6344 last_check = 0; 6345 6346 if (j>2) { 6347 printk(KERN_INFO 6348 "md: resuming %s of %s from checkpoint.\n", 6349 desc, mdname(mddev)); 6350 mddev->curr_resync = j; 6351 } 6352 6353 while (j < max_sectors) { 6354 sector_t sectors; 6355 6356 skipped = 0; 6357 6358 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 6359 ((mddev->curr_resync > mddev->curr_resync_completed && 6360 (mddev->curr_resync - mddev->curr_resync_completed) 6361 > (max_sectors >> 4)) || 6362 (j - mddev->curr_resync_completed)*2 6363 >= mddev->resync_max - mddev->curr_resync_completed 6364 )) { 6365 /* time to update curr_resync_completed */ 6366 blk_unplug(mddev->queue); 6367 wait_event(mddev->recovery_wait, 6368 atomic_read(&mddev->recovery_active) == 0); 6369 mddev->curr_resync_completed = 6370 mddev->curr_resync; 6371 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 6372 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 6373 } 6374 6375 while (j >= mddev->resync_max && !kthread_should_stop()) { 6376 /* As this condition is controlled by user-space, 6377 * we can block indefinitely, so use '_interruptible' 6378 * to avoid triggering warnings. 6379 */ 6380 flush_signals(current); /* just in case */ 6381 wait_event_interruptible(mddev->recovery_wait, 6382 mddev->resync_max > j 6383 || kthread_should_stop()); 6384 } 6385 6386 if (kthread_should_stop()) 6387 goto interrupted; 6388 6389 sectors = mddev->pers->sync_request(mddev, j, &skipped, 6390 currspeed < speed_min(mddev)); 6391 if (sectors == 0) { 6392 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 6393 goto out; 6394 } 6395 6396 if (!skipped) { /* actual IO requested */ 6397 io_sectors += sectors; 6398 atomic_add(sectors, &mddev->recovery_active); 6399 } 6400 6401 j += sectors; 6402 if (j>1) mddev->curr_resync = j; 6403 mddev->curr_mark_cnt = io_sectors; 6404 if (last_check == 0) 6405 /* this is the earliers that rebuilt will be 6406 * visible in /proc/mdstat 6407 */ 6408 md_new_event(mddev); 6409 6410 if (last_check + window > io_sectors || j == max_sectors) 6411 continue; 6412 6413 last_check = io_sectors; 6414 6415 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 6416 break; 6417 6418 repeat: 6419 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) { 6420 /* step marks */ 6421 int next = (last_mark+1) % SYNC_MARKS; 6422 6423 mddev->resync_mark = mark[next]; 6424 mddev->resync_mark_cnt = mark_cnt[next]; 6425 mark[next] = jiffies; 6426 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active); 6427 last_mark = next; 6428 } 6429 6430 6431 if (kthread_should_stop()) 6432 goto interrupted; 6433 6434 6435 /* 6436 * this loop exits only if either when we are slower than 6437 * the 'hard' speed limit, or the system was IO-idle for 6438 * a jiffy. 6439 * the system might be non-idle CPU-wise, but we only care 6440 * about not overloading the IO subsystem. (things like an 6441 * e2fsck being done on the RAID array should execute fast) 6442 */ 6443 blk_unplug(mddev->queue); 6444 cond_resched(); 6445 6446 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2 6447 /((jiffies-mddev->resync_mark)/HZ +1) +1; 6448 6449 if (currspeed > speed_min(mddev)) { 6450 if ((currspeed > speed_max(mddev)) || 6451 !is_mddev_idle(mddev, 0)) { 6452 msleep(500); 6453 goto repeat; 6454 } 6455 } 6456 } 6457 printk(KERN_INFO "md: %s: %s done.\n",mdname(mddev), desc); 6458 /* 6459 * this also signals 'finished resyncing' to md_stop 6460 */ 6461 out: 6462 blk_unplug(mddev->queue); 6463 6464 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); 6465 6466 /* tell personality that we are finished */ 6467 mddev->pers->sync_request(mddev, max_sectors, &skipped, 1); 6468 6469 if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) && 6470 mddev->curr_resync > 2) { 6471 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 6472 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 6473 if (mddev->curr_resync >= mddev->recovery_cp) { 6474 printk(KERN_INFO 6475 "md: checkpointing %s of %s.\n", 6476 desc, mdname(mddev)); 6477 mddev->recovery_cp = mddev->curr_resync; 6478 } 6479 } else 6480 mddev->recovery_cp = MaxSector; 6481 } else { 6482 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 6483 mddev->curr_resync = MaxSector; 6484 list_for_each_entry(rdev, &mddev->disks, same_set) 6485 if (rdev->raid_disk >= 0 && 6486 !test_bit(Faulty, &rdev->flags) && 6487 !test_bit(In_sync, &rdev->flags) && 6488 rdev->recovery_offset < mddev->curr_resync) 6489 rdev->recovery_offset = mddev->curr_resync; 6490 } 6491 } 6492 set_bit(MD_CHANGE_DEVS, &mddev->flags); 6493 6494 skip: 6495 mddev->curr_resync = 0; 6496 mddev->curr_resync_completed = 0; 6497 mddev->resync_min = 0; 6498 mddev->resync_max = MaxSector; 6499 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 6500 wake_up(&resync_wait); 6501 set_bit(MD_RECOVERY_DONE, &mddev->recovery); 6502 md_wakeup_thread(mddev->thread); 6503 return; 6504 6505 interrupted: 6506 /* 6507 * got a signal, exit. 6508 */ 6509 printk(KERN_INFO 6510 "md: md_do_sync() got signal ... exiting\n"); 6511 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 6512 goto out; 6513 6514 } 6515 EXPORT_SYMBOL_GPL(md_do_sync); 6516 6517 6518 static int remove_and_add_spares(mddev_t *mddev) 6519 { 6520 mdk_rdev_t *rdev; 6521 int spares = 0; 6522 6523 mddev->curr_resync_completed = 0; 6524 6525 list_for_each_entry(rdev, &mddev->disks, same_set) 6526 if (rdev->raid_disk >= 0 && 6527 !test_bit(Blocked, &rdev->flags) && 6528 (test_bit(Faulty, &rdev->flags) || 6529 ! test_bit(In_sync, &rdev->flags)) && 6530 atomic_read(&rdev->nr_pending)==0) { 6531 if (mddev->pers->hot_remove_disk( 6532 mddev, rdev->raid_disk)==0) { 6533 char nm[20]; 6534 sprintf(nm,"rd%d", rdev->raid_disk); 6535 sysfs_remove_link(&mddev->kobj, nm); 6536 rdev->raid_disk = -1; 6537 } 6538 } 6539 6540 if (mddev->degraded && ! mddev->ro && !mddev->recovery_disabled) { 6541 list_for_each_entry(rdev, &mddev->disks, same_set) { 6542 if (rdev->raid_disk >= 0 && 6543 !test_bit(In_sync, &rdev->flags) && 6544 !test_bit(Blocked, &rdev->flags)) 6545 spares++; 6546 if (rdev->raid_disk < 0 6547 && !test_bit(Faulty, &rdev->flags)) { 6548 rdev->recovery_offset = 0; 6549 if (mddev->pers-> 6550 hot_add_disk(mddev, rdev) == 0) { 6551 char nm[20]; 6552 sprintf(nm, "rd%d", rdev->raid_disk); 6553 if (sysfs_create_link(&mddev->kobj, 6554 &rdev->kobj, nm)) 6555 printk(KERN_WARNING 6556 "md: cannot register " 6557 "%s for %s\n", 6558 nm, mdname(mddev)); 6559 spares++; 6560 md_new_event(mddev); 6561 } else 6562 break; 6563 } 6564 } 6565 } 6566 return spares; 6567 } 6568 /* 6569 * This routine is regularly called by all per-raid-array threads to 6570 * deal with generic issues like resync and super-block update. 6571 * Raid personalities that don't have a thread (linear/raid0) do not 6572 * need this as they never do any recovery or update the superblock. 6573 * 6574 * It does not do any resync itself, but rather "forks" off other threads 6575 * to do that as needed. 6576 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in 6577 * "->recovery" and create a thread at ->sync_thread. 6578 * When the thread finishes it sets MD_RECOVERY_DONE 6579 * and wakeups up this thread which will reap the thread and finish up. 6580 * This thread also removes any faulty devices (with nr_pending == 0). 6581 * 6582 * The overall approach is: 6583 * 1/ if the superblock needs updating, update it. 6584 * 2/ If a recovery thread is running, don't do anything else. 6585 * 3/ If recovery has finished, clean up, possibly marking spares active. 6586 * 4/ If there are any faulty devices, remove them. 6587 * 5/ If array is degraded, try to add spares devices 6588 * 6/ If array has spares or is not in-sync, start a resync thread. 6589 */ 6590 void md_check_recovery(mddev_t *mddev) 6591 { 6592 mdk_rdev_t *rdev; 6593 6594 6595 if (mddev->bitmap) 6596 bitmap_daemon_work(mddev->bitmap); 6597 6598 if (mddev->ro) 6599 return; 6600 6601 if (signal_pending(current)) { 6602 if (mddev->pers->sync_request && !mddev->external) { 6603 printk(KERN_INFO "md: %s in immediate safe mode\n", 6604 mdname(mddev)); 6605 mddev->safemode = 2; 6606 } 6607 flush_signals(current); 6608 } 6609 6610 if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) 6611 return; 6612 if ( ! ( 6613 (mddev->flags && !mddev->external) || 6614 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || 6615 test_bit(MD_RECOVERY_DONE, &mddev->recovery) || 6616 (mddev->external == 0 && mddev->safemode == 1) || 6617 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending) 6618 && !mddev->in_sync && mddev->recovery_cp == MaxSector) 6619 )) 6620 return; 6621 6622 if (mddev_trylock(mddev)) { 6623 int spares = 0; 6624 6625 if (mddev->ro) { 6626 /* Only thing we do on a ro array is remove 6627 * failed devices. 6628 */ 6629 remove_and_add_spares(mddev); 6630 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6631 goto unlock; 6632 } 6633 6634 if (!mddev->external) { 6635 int did_change = 0; 6636 spin_lock_irq(&mddev->write_lock); 6637 if (mddev->safemode && 6638 !atomic_read(&mddev->writes_pending) && 6639 !mddev->in_sync && 6640 mddev->recovery_cp == MaxSector) { 6641 mddev->in_sync = 1; 6642 did_change = 1; 6643 if (mddev->persistent) 6644 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 6645 } 6646 if (mddev->safemode == 1) 6647 mddev->safemode = 0; 6648 spin_unlock_irq(&mddev->write_lock); 6649 if (did_change) 6650 sysfs_notify_dirent(mddev->sysfs_state); 6651 } 6652 6653 if (mddev->flags) 6654 md_update_sb(mddev, 0); 6655 6656 list_for_each_entry(rdev, &mddev->disks, same_set) 6657 if (test_and_clear_bit(StateChanged, &rdev->flags)) 6658 sysfs_notify_dirent(rdev->sysfs_state); 6659 6660 6661 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && 6662 !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) { 6663 /* resync/recovery still happening */ 6664 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6665 goto unlock; 6666 } 6667 if (mddev->sync_thread) { 6668 /* resync has finished, collect result */ 6669 md_unregister_thread(mddev->sync_thread); 6670 mddev->sync_thread = NULL; 6671 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 6672 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 6673 /* success...*/ 6674 /* activate any spares */ 6675 if (mddev->pers->spare_active(mddev)) 6676 sysfs_notify(&mddev->kobj, NULL, 6677 "degraded"); 6678 } 6679 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 6680 mddev->pers->finish_reshape) 6681 mddev->pers->finish_reshape(mddev); 6682 md_update_sb(mddev, 1); 6683 6684 /* if array is no-longer degraded, then any saved_raid_disk 6685 * information must be scrapped 6686 */ 6687 if (!mddev->degraded) 6688 list_for_each_entry(rdev, &mddev->disks, same_set) 6689 rdev->saved_raid_disk = -1; 6690 6691 mddev->recovery = 0; 6692 /* flag recovery needed just to double check */ 6693 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6694 sysfs_notify_dirent(mddev->sysfs_action); 6695 md_new_event(mddev); 6696 goto unlock; 6697 } 6698 /* Set RUNNING before clearing NEEDED to avoid 6699 * any transients in the value of "sync_action". 6700 */ 6701 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 6702 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6703 /* Clear some bits that don't mean anything, but 6704 * might be left set 6705 */ 6706 clear_bit(MD_RECOVERY_INTR, &mddev->recovery); 6707 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); 6708 6709 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) 6710 goto unlock; 6711 /* no recovery is running. 6712 * remove any failed drives, then 6713 * add spares if possible. 6714 * Spare are also removed and re-added, to allow 6715 * the personality to fail the re-add. 6716 */ 6717 6718 if (mddev->reshape_position != MaxSector) { 6719 if (mddev->pers->check_reshape == NULL || 6720 mddev->pers->check_reshape(mddev) != 0) 6721 /* Cannot proceed */ 6722 goto unlock; 6723 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 6724 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 6725 } else if ((spares = remove_and_add_spares(mddev))) { 6726 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 6727 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 6728 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 6729 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 6730 } else if (mddev->recovery_cp < MaxSector) { 6731 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 6732 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 6733 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 6734 /* nothing to be done ... */ 6735 goto unlock; 6736 6737 if (mddev->pers->sync_request) { 6738 if (spares && mddev->bitmap && ! mddev->bitmap->file) { 6739 /* We are adding a device or devices to an array 6740 * which has the bitmap stored on all devices. 6741 * So make sure all bitmap pages get written 6742 */ 6743 bitmap_write_all(mddev->bitmap); 6744 } 6745 mddev->sync_thread = md_register_thread(md_do_sync, 6746 mddev, 6747 "%s_resync"); 6748 if (!mddev->sync_thread) { 6749 printk(KERN_ERR "%s: could not start resync" 6750 " thread...\n", 6751 mdname(mddev)); 6752 /* leave the spares where they are, it shouldn't hurt */ 6753 mddev->recovery = 0; 6754 } else 6755 md_wakeup_thread(mddev->sync_thread); 6756 sysfs_notify_dirent(mddev->sysfs_action); 6757 md_new_event(mddev); 6758 } 6759 unlock: 6760 if (!mddev->sync_thread) { 6761 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 6762 if (test_and_clear_bit(MD_RECOVERY_RECOVER, 6763 &mddev->recovery)) 6764 if (mddev->sysfs_action) 6765 sysfs_notify_dirent(mddev->sysfs_action); 6766 } 6767 mddev_unlock(mddev); 6768 } 6769 } 6770 6771 void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev) 6772 { 6773 sysfs_notify_dirent(rdev->sysfs_state); 6774 wait_event_timeout(rdev->blocked_wait, 6775 !test_bit(Blocked, &rdev->flags), 6776 msecs_to_jiffies(5000)); 6777 rdev_dec_pending(rdev, mddev); 6778 } 6779 EXPORT_SYMBOL(md_wait_for_blocked_rdev); 6780 6781 static int md_notify_reboot(struct notifier_block *this, 6782 unsigned long code, void *x) 6783 { 6784 struct list_head *tmp; 6785 mddev_t *mddev; 6786 6787 if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) { 6788 6789 printk(KERN_INFO "md: stopping all md devices.\n"); 6790 6791 for_each_mddev(mddev, tmp) 6792 if (mddev_trylock(mddev)) { 6793 /* Force a switch to readonly even array 6794 * appears to still be in use. Hence 6795 * the '100'. 6796 */ 6797 do_md_stop(mddev, 1, 100); 6798 mddev_unlock(mddev); 6799 } 6800 /* 6801 * certain more exotic SCSI devices are known to be 6802 * volatile wrt too early system reboots. While the 6803 * right place to handle this issue is the given 6804 * driver, we do want to have a safe RAID driver ... 6805 */ 6806 mdelay(1000*1); 6807 } 6808 return NOTIFY_DONE; 6809 } 6810 6811 static struct notifier_block md_notifier = { 6812 .notifier_call = md_notify_reboot, 6813 .next = NULL, 6814 .priority = INT_MAX, /* before any real devices */ 6815 }; 6816 6817 static void md_geninit(void) 6818 { 6819 dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t)); 6820 6821 proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops); 6822 } 6823 6824 static int __init md_init(void) 6825 { 6826 if (register_blkdev(MD_MAJOR, "md")) 6827 return -1; 6828 if ((mdp_major=register_blkdev(0, "mdp"))<=0) { 6829 unregister_blkdev(MD_MAJOR, "md"); 6830 return -1; 6831 } 6832 blk_register_region(MKDEV(MD_MAJOR, 0), 1UL<<MINORBITS, THIS_MODULE, 6833 md_probe, NULL, NULL); 6834 blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE, 6835 md_probe, NULL, NULL); 6836 6837 register_reboot_notifier(&md_notifier); 6838 raid_table_header = register_sysctl_table(raid_root_table); 6839 6840 md_geninit(); 6841 return 0; 6842 } 6843 6844 6845 #ifndef MODULE 6846 6847 /* 6848 * Searches all registered partitions for autorun RAID arrays 6849 * at boot time. 6850 */ 6851 6852 static LIST_HEAD(all_detected_devices); 6853 struct detected_devices_node { 6854 struct list_head list; 6855 dev_t dev; 6856 }; 6857 6858 void md_autodetect_dev(dev_t dev) 6859 { 6860 struct detected_devices_node *node_detected_dev; 6861 6862 node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL); 6863 if (node_detected_dev) { 6864 node_detected_dev->dev = dev; 6865 list_add_tail(&node_detected_dev->list, &all_detected_devices); 6866 } else { 6867 printk(KERN_CRIT "md: md_autodetect_dev: kzalloc failed" 6868 ", skipping dev(%d,%d)\n", MAJOR(dev), MINOR(dev)); 6869 } 6870 } 6871 6872 6873 static void autostart_arrays(int part) 6874 { 6875 mdk_rdev_t *rdev; 6876 struct detected_devices_node *node_detected_dev; 6877 dev_t dev; 6878 int i_scanned, i_passed; 6879 6880 i_scanned = 0; 6881 i_passed = 0; 6882 6883 printk(KERN_INFO "md: Autodetecting RAID arrays.\n"); 6884 6885 while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) { 6886 i_scanned++; 6887 node_detected_dev = list_entry(all_detected_devices.next, 6888 struct detected_devices_node, list); 6889 list_del(&node_detected_dev->list); 6890 dev = node_detected_dev->dev; 6891 kfree(node_detected_dev); 6892 rdev = md_import_device(dev,0, 90); 6893 if (IS_ERR(rdev)) 6894 continue; 6895 6896 if (test_bit(Faulty, &rdev->flags)) { 6897 MD_BUG(); 6898 continue; 6899 } 6900 set_bit(AutoDetected, &rdev->flags); 6901 list_add(&rdev->same_set, &pending_raid_disks); 6902 i_passed++; 6903 } 6904 6905 printk(KERN_INFO "md: Scanned %d and added %d devices.\n", 6906 i_scanned, i_passed); 6907 6908 autorun_devices(part); 6909 } 6910 6911 #endif /* !MODULE */ 6912 6913 static __exit void md_exit(void) 6914 { 6915 mddev_t *mddev; 6916 struct list_head *tmp; 6917 6918 blk_unregister_region(MKDEV(MD_MAJOR,0), 1U << MINORBITS); 6919 blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS); 6920 6921 unregister_blkdev(MD_MAJOR,"md"); 6922 unregister_blkdev(mdp_major, "mdp"); 6923 unregister_reboot_notifier(&md_notifier); 6924 unregister_sysctl_table(raid_table_header); 6925 remove_proc_entry("mdstat", NULL); 6926 for_each_mddev(mddev, tmp) { 6927 export_array(mddev); 6928 mddev->hold_active = 0; 6929 } 6930 } 6931 6932 subsys_initcall(md_init); 6933 module_exit(md_exit) 6934 6935 static int get_ro(char *buffer, struct kernel_param *kp) 6936 { 6937 return sprintf(buffer, "%d", start_readonly); 6938 } 6939 static int set_ro(const char *val, struct kernel_param *kp) 6940 { 6941 char *e; 6942 int num = simple_strtoul(val, &e, 10); 6943 if (*val && (*e == '\0' || *e == '\n')) { 6944 start_readonly = num; 6945 return 0; 6946 } 6947 return -EINVAL; 6948 } 6949 6950 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR); 6951 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR); 6952 6953 module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR); 6954 6955 EXPORT_SYMBOL(register_md_personality); 6956 EXPORT_SYMBOL(unregister_md_personality); 6957 EXPORT_SYMBOL(md_error); 6958 EXPORT_SYMBOL(md_done_sync); 6959 EXPORT_SYMBOL(md_write_start); 6960 EXPORT_SYMBOL(md_write_end); 6961 EXPORT_SYMBOL(md_register_thread); 6962 EXPORT_SYMBOL(md_unregister_thread); 6963 EXPORT_SYMBOL(md_wakeup_thread); 6964 EXPORT_SYMBOL(md_check_recovery); 6965 MODULE_LICENSE("GPL"); 6966 MODULE_ALIAS("md"); 6967 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR); 6968