1 /* 2 md.c : Multiple Devices driver for Linux 3 Copyright (C) 1998, 1999, 2000 Ingo Molnar 4 5 completely rewritten, based on the MD driver code from Marc Zyngier 6 7 Changes: 8 9 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar 10 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com> 11 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net> 12 - kerneld support by Boris Tobotras <boris@xtalk.msk.su> 13 - kmod support by: Cyrus Durgin 14 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com> 15 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au> 16 17 - lots of fixes and improvements to the RAID1/RAID5 and generic 18 RAID code (such as request based resynchronization): 19 20 Neil Brown <neilb@cse.unsw.edu.au>. 21 22 - persistent bitmap code 23 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc. 24 25 This program is free software; you can redistribute it and/or modify 26 it under the terms of the GNU General Public License as published by 27 the Free Software Foundation; either version 2, or (at your option) 28 any later version. 29 30 You should have received a copy of the GNU General Public License 31 (for example /usr/src/linux/COPYING); if not, write to the Free 32 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 33 */ 34 35 #include <linux/module.h> 36 #include <linux/config.h> 37 #include <linux/kthread.h> 38 #include <linux/linkage.h> 39 #include <linux/raid/md.h> 40 #include <linux/raid/bitmap.h> 41 #include <linux/sysctl.h> 42 #include <linux/devfs_fs_kernel.h> 43 #include <linux/buffer_head.h> /* for invalidate_bdev */ 44 #include <linux/suspend.h> 45 #include <linux/poll.h> 46 #include <linux/mutex.h> 47 48 #include <linux/init.h> 49 50 #include <linux/file.h> 51 52 #ifdef CONFIG_KMOD 53 #include <linux/kmod.h> 54 #endif 55 56 #include <asm/unaligned.h> 57 58 #define MAJOR_NR MD_MAJOR 59 #define MD_DRIVER 60 61 /* 63 partitions with the alternate major number (mdp) */ 62 #define MdpMinorShift 6 63 64 #define DEBUG 0 65 #define dprintk(x...) ((void)(DEBUG && printk(x))) 66 67 68 #ifndef MODULE 69 static void autostart_arrays (int part); 70 #endif 71 72 static LIST_HEAD(pers_list); 73 static DEFINE_SPINLOCK(pers_lock); 74 75 /* 76 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit' 77 * is 1000 KB/sec, so the extra system load does not show up that much. 78 * Increase it if you want to have more _guaranteed_ speed. Note that 79 * the RAID driver will use the maximum available bandwidth if the IO 80 * subsystem is idle. There is also an 'absolute maximum' reconstruction 81 * speed limit - in case reconstruction slows down your system despite 82 * idle IO detection. 83 * 84 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max. 85 * or /sys/block/mdX/md/sync_speed_{min,max} 86 */ 87 88 static int sysctl_speed_limit_min = 1000; 89 static int sysctl_speed_limit_max = 200000; 90 static inline int speed_min(mddev_t *mddev) 91 { 92 return mddev->sync_speed_min ? 93 mddev->sync_speed_min : sysctl_speed_limit_min; 94 } 95 96 static inline int speed_max(mddev_t *mddev) 97 { 98 return mddev->sync_speed_max ? 99 mddev->sync_speed_max : sysctl_speed_limit_max; 100 } 101 102 static struct ctl_table_header *raid_table_header; 103 104 static ctl_table raid_table[] = { 105 { 106 .ctl_name = DEV_RAID_SPEED_LIMIT_MIN, 107 .procname = "speed_limit_min", 108 .data = &sysctl_speed_limit_min, 109 .maxlen = sizeof(int), 110 .mode = 0644, 111 .proc_handler = &proc_dointvec, 112 }, 113 { 114 .ctl_name = DEV_RAID_SPEED_LIMIT_MAX, 115 .procname = "speed_limit_max", 116 .data = &sysctl_speed_limit_max, 117 .maxlen = sizeof(int), 118 .mode = 0644, 119 .proc_handler = &proc_dointvec, 120 }, 121 { .ctl_name = 0 } 122 }; 123 124 static ctl_table raid_dir_table[] = { 125 { 126 .ctl_name = DEV_RAID, 127 .procname = "raid", 128 .maxlen = 0, 129 .mode = 0555, 130 .child = raid_table, 131 }, 132 { .ctl_name = 0 } 133 }; 134 135 static ctl_table raid_root_table[] = { 136 { 137 .ctl_name = CTL_DEV, 138 .procname = "dev", 139 .maxlen = 0, 140 .mode = 0555, 141 .child = raid_dir_table, 142 }, 143 { .ctl_name = 0 } 144 }; 145 146 static struct block_device_operations md_fops; 147 148 static int start_readonly; 149 150 /* 151 * We have a system wide 'event count' that is incremented 152 * on any 'interesting' event, and readers of /proc/mdstat 153 * can use 'poll' or 'select' to find out when the event 154 * count increases. 155 * 156 * Events are: 157 * start array, stop array, error, add device, remove device, 158 * start build, activate spare 159 */ 160 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters); 161 static atomic_t md_event_count; 162 void md_new_event(mddev_t *mddev) 163 { 164 atomic_inc(&md_event_count); 165 wake_up(&md_event_waiters); 166 sysfs_notify(&mddev->kobj, NULL, "sync_action"); 167 } 168 EXPORT_SYMBOL_GPL(md_new_event); 169 170 /* Alternate version that can be called from interrupts 171 * when calling sysfs_notify isn't needed. 172 */ 173 void md_new_event_inintr(mddev_t *mddev) 174 { 175 atomic_inc(&md_event_count); 176 wake_up(&md_event_waiters); 177 } 178 179 /* 180 * Enables to iterate over all existing md arrays 181 * all_mddevs_lock protects this list. 182 */ 183 static LIST_HEAD(all_mddevs); 184 static DEFINE_SPINLOCK(all_mddevs_lock); 185 186 187 /* 188 * iterates through all used mddevs in the system. 189 * We take care to grab the all_mddevs_lock whenever navigating 190 * the list, and to always hold a refcount when unlocked. 191 * Any code which breaks out of this loop while own 192 * a reference to the current mddev and must mddev_put it. 193 */ 194 #define ITERATE_MDDEV(mddev,tmp) \ 195 \ 196 for (({ spin_lock(&all_mddevs_lock); \ 197 tmp = all_mddevs.next; \ 198 mddev = NULL;}); \ 199 ({ if (tmp != &all_mddevs) \ 200 mddev_get(list_entry(tmp, mddev_t, all_mddevs));\ 201 spin_unlock(&all_mddevs_lock); \ 202 if (mddev) mddev_put(mddev); \ 203 mddev = list_entry(tmp, mddev_t, all_mddevs); \ 204 tmp != &all_mddevs;}); \ 205 ({ spin_lock(&all_mddevs_lock); \ 206 tmp = tmp->next;}) \ 207 ) 208 209 210 static int md_fail_request (request_queue_t *q, struct bio *bio) 211 { 212 bio_io_error(bio, bio->bi_size); 213 return 0; 214 } 215 216 static inline mddev_t *mddev_get(mddev_t *mddev) 217 { 218 atomic_inc(&mddev->active); 219 return mddev; 220 } 221 222 static void mddev_put(mddev_t *mddev) 223 { 224 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock)) 225 return; 226 if (!mddev->raid_disks && list_empty(&mddev->disks)) { 227 list_del(&mddev->all_mddevs); 228 spin_unlock(&all_mddevs_lock); 229 blk_cleanup_queue(mddev->queue); 230 kobject_unregister(&mddev->kobj); 231 } else 232 spin_unlock(&all_mddevs_lock); 233 } 234 235 static mddev_t * mddev_find(dev_t unit) 236 { 237 mddev_t *mddev, *new = NULL; 238 239 retry: 240 spin_lock(&all_mddevs_lock); 241 list_for_each_entry(mddev, &all_mddevs, all_mddevs) 242 if (mddev->unit == unit) { 243 mddev_get(mddev); 244 spin_unlock(&all_mddevs_lock); 245 kfree(new); 246 return mddev; 247 } 248 249 if (new) { 250 list_add(&new->all_mddevs, &all_mddevs); 251 spin_unlock(&all_mddevs_lock); 252 return new; 253 } 254 spin_unlock(&all_mddevs_lock); 255 256 new = kzalloc(sizeof(*new), GFP_KERNEL); 257 if (!new) 258 return NULL; 259 260 new->unit = unit; 261 if (MAJOR(unit) == MD_MAJOR) 262 new->md_minor = MINOR(unit); 263 else 264 new->md_minor = MINOR(unit) >> MdpMinorShift; 265 266 mutex_init(&new->reconfig_mutex); 267 INIT_LIST_HEAD(&new->disks); 268 INIT_LIST_HEAD(&new->all_mddevs); 269 init_timer(&new->safemode_timer); 270 atomic_set(&new->active, 1); 271 spin_lock_init(&new->write_lock); 272 init_waitqueue_head(&new->sb_wait); 273 274 new->queue = blk_alloc_queue(GFP_KERNEL); 275 if (!new->queue) { 276 kfree(new); 277 return NULL; 278 } 279 set_bit(QUEUE_FLAG_CLUSTER, &new->queue->queue_flags); 280 281 blk_queue_make_request(new->queue, md_fail_request); 282 283 goto retry; 284 } 285 286 static inline int mddev_lock(mddev_t * mddev) 287 { 288 return mutex_lock_interruptible(&mddev->reconfig_mutex); 289 } 290 291 static inline int mddev_trylock(mddev_t * mddev) 292 { 293 return mutex_trylock(&mddev->reconfig_mutex); 294 } 295 296 static inline void mddev_unlock(mddev_t * mddev) 297 { 298 mutex_unlock(&mddev->reconfig_mutex); 299 300 md_wakeup_thread(mddev->thread); 301 } 302 303 static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr) 304 { 305 mdk_rdev_t * rdev; 306 struct list_head *tmp; 307 308 ITERATE_RDEV(mddev,rdev,tmp) { 309 if (rdev->desc_nr == nr) 310 return rdev; 311 } 312 return NULL; 313 } 314 315 static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev) 316 { 317 struct list_head *tmp; 318 mdk_rdev_t *rdev; 319 320 ITERATE_RDEV(mddev,rdev,tmp) { 321 if (rdev->bdev->bd_dev == dev) 322 return rdev; 323 } 324 return NULL; 325 } 326 327 static struct mdk_personality *find_pers(int level, char *clevel) 328 { 329 struct mdk_personality *pers; 330 list_for_each_entry(pers, &pers_list, list) { 331 if (level != LEVEL_NONE && pers->level == level) 332 return pers; 333 if (strcmp(pers->name, clevel)==0) 334 return pers; 335 } 336 return NULL; 337 } 338 339 static inline sector_t calc_dev_sboffset(struct block_device *bdev) 340 { 341 sector_t size = bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; 342 return MD_NEW_SIZE_BLOCKS(size); 343 } 344 345 static sector_t calc_dev_size(mdk_rdev_t *rdev, unsigned chunk_size) 346 { 347 sector_t size; 348 349 size = rdev->sb_offset; 350 351 if (chunk_size) 352 size &= ~((sector_t)chunk_size/1024 - 1); 353 return size; 354 } 355 356 static int alloc_disk_sb(mdk_rdev_t * rdev) 357 { 358 if (rdev->sb_page) 359 MD_BUG(); 360 361 rdev->sb_page = alloc_page(GFP_KERNEL); 362 if (!rdev->sb_page) { 363 printk(KERN_ALERT "md: out of memory.\n"); 364 return -EINVAL; 365 } 366 367 return 0; 368 } 369 370 static void free_disk_sb(mdk_rdev_t * rdev) 371 { 372 if (rdev->sb_page) { 373 put_page(rdev->sb_page); 374 rdev->sb_loaded = 0; 375 rdev->sb_page = NULL; 376 rdev->sb_offset = 0; 377 rdev->size = 0; 378 } 379 } 380 381 382 static int super_written(struct bio *bio, unsigned int bytes_done, int error) 383 { 384 mdk_rdev_t *rdev = bio->bi_private; 385 mddev_t *mddev = rdev->mddev; 386 if (bio->bi_size) 387 return 1; 388 389 if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) 390 md_error(mddev, rdev); 391 392 if (atomic_dec_and_test(&mddev->pending_writes)) 393 wake_up(&mddev->sb_wait); 394 bio_put(bio); 395 return 0; 396 } 397 398 static int super_written_barrier(struct bio *bio, unsigned int bytes_done, int error) 399 { 400 struct bio *bio2 = bio->bi_private; 401 mdk_rdev_t *rdev = bio2->bi_private; 402 mddev_t *mddev = rdev->mddev; 403 if (bio->bi_size) 404 return 1; 405 406 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && 407 error == -EOPNOTSUPP) { 408 unsigned long flags; 409 /* barriers don't appear to be supported :-( */ 410 set_bit(BarriersNotsupp, &rdev->flags); 411 mddev->barriers_work = 0; 412 spin_lock_irqsave(&mddev->write_lock, flags); 413 bio2->bi_next = mddev->biolist; 414 mddev->biolist = bio2; 415 spin_unlock_irqrestore(&mddev->write_lock, flags); 416 wake_up(&mddev->sb_wait); 417 bio_put(bio); 418 return 0; 419 } 420 bio_put(bio2); 421 bio->bi_private = rdev; 422 return super_written(bio, bytes_done, error); 423 } 424 425 void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, 426 sector_t sector, int size, struct page *page) 427 { 428 /* write first size bytes of page to sector of rdev 429 * Increment mddev->pending_writes before returning 430 * and decrement it on completion, waking up sb_wait 431 * if zero is reached. 432 * If an error occurred, call md_error 433 * 434 * As we might need to resubmit the request if BIO_RW_BARRIER 435 * causes ENOTSUPP, we allocate a spare bio... 436 */ 437 struct bio *bio = bio_alloc(GFP_NOIO, 1); 438 int rw = (1<<BIO_RW) | (1<<BIO_RW_SYNC); 439 440 bio->bi_bdev = rdev->bdev; 441 bio->bi_sector = sector; 442 bio_add_page(bio, page, size, 0); 443 bio->bi_private = rdev; 444 bio->bi_end_io = super_written; 445 bio->bi_rw = rw; 446 447 atomic_inc(&mddev->pending_writes); 448 if (!test_bit(BarriersNotsupp, &rdev->flags)) { 449 struct bio *rbio; 450 rw |= (1<<BIO_RW_BARRIER); 451 rbio = bio_clone(bio, GFP_NOIO); 452 rbio->bi_private = bio; 453 rbio->bi_end_io = super_written_barrier; 454 submit_bio(rw, rbio); 455 } else 456 submit_bio(rw, bio); 457 } 458 459 void md_super_wait(mddev_t *mddev) 460 { 461 /* wait for all superblock writes that were scheduled to complete. 462 * if any had to be retried (due to BARRIER problems), retry them 463 */ 464 DEFINE_WAIT(wq); 465 for(;;) { 466 prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE); 467 if (atomic_read(&mddev->pending_writes)==0) 468 break; 469 while (mddev->biolist) { 470 struct bio *bio; 471 spin_lock_irq(&mddev->write_lock); 472 bio = mddev->biolist; 473 mddev->biolist = bio->bi_next ; 474 bio->bi_next = NULL; 475 spin_unlock_irq(&mddev->write_lock); 476 submit_bio(bio->bi_rw, bio); 477 } 478 schedule(); 479 } 480 finish_wait(&mddev->sb_wait, &wq); 481 } 482 483 static int bi_complete(struct bio *bio, unsigned int bytes_done, int error) 484 { 485 if (bio->bi_size) 486 return 1; 487 488 complete((struct completion*)bio->bi_private); 489 return 0; 490 } 491 492 int sync_page_io(struct block_device *bdev, sector_t sector, int size, 493 struct page *page, int rw) 494 { 495 struct bio *bio = bio_alloc(GFP_NOIO, 1); 496 struct completion event; 497 int ret; 498 499 rw |= (1 << BIO_RW_SYNC); 500 501 bio->bi_bdev = bdev; 502 bio->bi_sector = sector; 503 bio_add_page(bio, page, size, 0); 504 init_completion(&event); 505 bio->bi_private = &event; 506 bio->bi_end_io = bi_complete; 507 submit_bio(rw, bio); 508 wait_for_completion(&event); 509 510 ret = test_bit(BIO_UPTODATE, &bio->bi_flags); 511 bio_put(bio); 512 return ret; 513 } 514 EXPORT_SYMBOL_GPL(sync_page_io); 515 516 static int read_disk_sb(mdk_rdev_t * rdev, int size) 517 { 518 char b[BDEVNAME_SIZE]; 519 if (!rdev->sb_page) { 520 MD_BUG(); 521 return -EINVAL; 522 } 523 if (rdev->sb_loaded) 524 return 0; 525 526 527 if (!sync_page_io(rdev->bdev, rdev->sb_offset<<1, size, rdev->sb_page, READ)) 528 goto fail; 529 rdev->sb_loaded = 1; 530 return 0; 531 532 fail: 533 printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n", 534 bdevname(rdev->bdev,b)); 535 return -EINVAL; 536 } 537 538 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2) 539 { 540 if ( (sb1->set_uuid0 == sb2->set_uuid0) && 541 (sb1->set_uuid1 == sb2->set_uuid1) && 542 (sb1->set_uuid2 == sb2->set_uuid2) && 543 (sb1->set_uuid3 == sb2->set_uuid3)) 544 545 return 1; 546 547 return 0; 548 } 549 550 551 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2) 552 { 553 int ret; 554 mdp_super_t *tmp1, *tmp2; 555 556 tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL); 557 tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL); 558 559 if (!tmp1 || !tmp2) { 560 ret = 0; 561 printk(KERN_INFO "md.c: sb1 is not equal to sb2!\n"); 562 goto abort; 563 } 564 565 *tmp1 = *sb1; 566 *tmp2 = *sb2; 567 568 /* 569 * nr_disks is not constant 570 */ 571 tmp1->nr_disks = 0; 572 tmp2->nr_disks = 0; 573 574 if (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4)) 575 ret = 0; 576 else 577 ret = 1; 578 579 abort: 580 kfree(tmp1); 581 kfree(tmp2); 582 return ret; 583 } 584 585 static unsigned int calc_sb_csum(mdp_super_t * sb) 586 { 587 unsigned int disk_csum, csum; 588 589 disk_csum = sb->sb_csum; 590 sb->sb_csum = 0; 591 csum = csum_partial((void *)sb, MD_SB_BYTES, 0); 592 sb->sb_csum = disk_csum; 593 return csum; 594 } 595 596 597 /* 598 * Handle superblock details. 599 * We want to be able to handle multiple superblock formats 600 * so we have a common interface to them all, and an array of 601 * different handlers. 602 * We rely on user-space to write the initial superblock, and support 603 * reading and updating of superblocks. 604 * Interface methods are: 605 * int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version) 606 * loads and validates a superblock on dev. 607 * if refdev != NULL, compare superblocks on both devices 608 * Return: 609 * 0 - dev has a superblock that is compatible with refdev 610 * 1 - dev has a superblock that is compatible and newer than refdev 611 * so dev should be used as the refdev in future 612 * -EINVAL superblock incompatible or invalid 613 * -othererror e.g. -EIO 614 * 615 * int validate_super(mddev_t *mddev, mdk_rdev_t *dev) 616 * Verify that dev is acceptable into mddev. 617 * The first time, mddev->raid_disks will be 0, and data from 618 * dev should be merged in. Subsequent calls check that dev 619 * is new enough. Return 0 or -EINVAL 620 * 621 * void sync_super(mddev_t *mddev, mdk_rdev_t *dev) 622 * Update the superblock for rdev with data in mddev 623 * This does not write to disc. 624 * 625 */ 626 627 struct super_type { 628 char *name; 629 struct module *owner; 630 int (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version); 631 int (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev); 632 void (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev); 633 }; 634 635 /* 636 * load_super for 0.90.0 637 */ 638 static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) 639 { 640 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 641 mdp_super_t *sb; 642 int ret; 643 sector_t sb_offset; 644 645 /* 646 * Calculate the position of the superblock, 647 * it's at the end of the disk. 648 * 649 * It also happens to be a multiple of 4Kb. 650 */ 651 sb_offset = calc_dev_sboffset(rdev->bdev); 652 rdev->sb_offset = sb_offset; 653 654 ret = read_disk_sb(rdev, MD_SB_BYTES); 655 if (ret) return ret; 656 657 ret = -EINVAL; 658 659 bdevname(rdev->bdev, b); 660 sb = (mdp_super_t*)page_address(rdev->sb_page); 661 662 if (sb->md_magic != MD_SB_MAGIC) { 663 printk(KERN_ERR "md: invalid raid superblock magic on %s\n", 664 b); 665 goto abort; 666 } 667 668 if (sb->major_version != 0 || 669 sb->minor_version < 90 || 670 sb->minor_version > 91) { 671 printk(KERN_WARNING "Bad version number %d.%d on %s\n", 672 sb->major_version, sb->minor_version, 673 b); 674 goto abort; 675 } 676 677 if (sb->raid_disks <= 0) 678 goto abort; 679 680 if (csum_fold(calc_sb_csum(sb)) != csum_fold(sb->sb_csum)) { 681 printk(KERN_WARNING "md: invalid superblock checksum on %s\n", 682 b); 683 goto abort; 684 } 685 686 rdev->preferred_minor = sb->md_minor; 687 rdev->data_offset = 0; 688 rdev->sb_size = MD_SB_BYTES; 689 690 if (sb->level == LEVEL_MULTIPATH) 691 rdev->desc_nr = -1; 692 else 693 rdev->desc_nr = sb->this_disk.number; 694 695 if (refdev == 0) 696 ret = 1; 697 else { 698 __u64 ev1, ev2; 699 mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page); 700 if (!uuid_equal(refsb, sb)) { 701 printk(KERN_WARNING "md: %s has different UUID to %s\n", 702 b, bdevname(refdev->bdev,b2)); 703 goto abort; 704 } 705 if (!sb_equal(refsb, sb)) { 706 printk(KERN_WARNING "md: %s has same UUID" 707 " but different superblock to %s\n", 708 b, bdevname(refdev->bdev, b2)); 709 goto abort; 710 } 711 ev1 = md_event(sb); 712 ev2 = md_event(refsb); 713 if (ev1 > ev2) 714 ret = 1; 715 else 716 ret = 0; 717 } 718 rdev->size = calc_dev_size(rdev, sb->chunk_size); 719 720 if (rdev->size < sb->size && sb->level > 1) 721 /* "this cannot possibly happen" ... */ 722 ret = -EINVAL; 723 724 abort: 725 return ret; 726 } 727 728 /* 729 * validate_super for 0.90.0 730 */ 731 static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev) 732 { 733 mdp_disk_t *desc; 734 mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page); 735 736 rdev->raid_disk = -1; 737 rdev->flags = 0; 738 if (mddev->raid_disks == 0) { 739 mddev->major_version = 0; 740 mddev->minor_version = sb->minor_version; 741 mddev->patch_version = sb->patch_version; 742 mddev->persistent = ! sb->not_persistent; 743 mddev->chunk_size = sb->chunk_size; 744 mddev->ctime = sb->ctime; 745 mddev->utime = sb->utime; 746 mddev->level = sb->level; 747 mddev->clevel[0] = 0; 748 mddev->layout = sb->layout; 749 mddev->raid_disks = sb->raid_disks; 750 mddev->size = sb->size; 751 mddev->events = md_event(sb); 752 mddev->bitmap_offset = 0; 753 mddev->default_bitmap_offset = MD_SB_BYTES >> 9; 754 755 if (mddev->minor_version >= 91) { 756 mddev->reshape_position = sb->reshape_position; 757 mddev->delta_disks = sb->delta_disks; 758 mddev->new_level = sb->new_level; 759 mddev->new_layout = sb->new_layout; 760 mddev->new_chunk = sb->new_chunk; 761 } else { 762 mddev->reshape_position = MaxSector; 763 mddev->delta_disks = 0; 764 mddev->new_level = mddev->level; 765 mddev->new_layout = mddev->layout; 766 mddev->new_chunk = mddev->chunk_size; 767 } 768 769 if (sb->state & (1<<MD_SB_CLEAN)) 770 mddev->recovery_cp = MaxSector; 771 else { 772 if (sb->events_hi == sb->cp_events_hi && 773 sb->events_lo == sb->cp_events_lo) { 774 mddev->recovery_cp = sb->recovery_cp; 775 } else 776 mddev->recovery_cp = 0; 777 } 778 779 memcpy(mddev->uuid+0, &sb->set_uuid0, 4); 780 memcpy(mddev->uuid+4, &sb->set_uuid1, 4); 781 memcpy(mddev->uuid+8, &sb->set_uuid2, 4); 782 memcpy(mddev->uuid+12,&sb->set_uuid3, 4); 783 784 mddev->max_disks = MD_SB_DISKS; 785 786 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) && 787 mddev->bitmap_file == NULL) { 788 if (mddev->level != 1 && mddev->level != 4 789 && mddev->level != 5 && mddev->level != 6 790 && mddev->level != 10) { 791 /* FIXME use a better test */ 792 printk(KERN_WARNING "md: bitmaps not supported for this level.\n"); 793 return -EINVAL; 794 } 795 mddev->bitmap_offset = mddev->default_bitmap_offset; 796 } 797 798 } else if (mddev->pers == NULL) { 799 /* Insist on good event counter while assembling */ 800 __u64 ev1 = md_event(sb); 801 ++ev1; 802 if (ev1 < mddev->events) 803 return -EINVAL; 804 } else if (mddev->bitmap) { 805 /* if adding to array with a bitmap, then we can accept an 806 * older device ... but not too old. 807 */ 808 __u64 ev1 = md_event(sb); 809 if (ev1 < mddev->bitmap->events_cleared) 810 return 0; 811 } else /* just a hot-add of a new device, leave raid_disk at -1 */ 812 return 0; 813 814 if (mddev->level != LEVEL_MULTIPATH) { 815 desc = sb->disks + rdev->desc_nr; 816 817 if (desc->state & (1<<MD_DISK_FAULTY)) 818 set_bit(Faulty, &rdev->flags); 819 else if (desc->state & (1<<MD_DISK_SYNC) && 820 desc->raid_disk < mddev->raid_disks) { 821 set_bit(In_sync, &rdev->flags); 822 rdev->raid_disk = desc->raid_disk; 823 } 824 if (desc->state & (1<<MD_DISK_WRITEMOSTLY)) 825 set_bit(WriteMostly, &rdev->flags); 826 } else /* MULTIPATH are always insync */ 827 set_bit(In_sync, &rdev->flags); 828 return 0; 829 } 830 831 /* 832 * sync_super for 0.90.0 833 */ 834 static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev) 835 { 836 mdp_super_t *sb; 837 struct list_head *tmp; 838 mdk_rdev_t *rdev2; 839 int next_spare = mddev->raid_disks; 840 841 842 /* make rdev->sb match mddev data.. 843 * 844 * 1/ zero out disks 845 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare); 846 * 3/ any empty disks < next_spare become removed 847 * 848 * disks[0] gets initialised to REMOVED because 849 * we cannot be sure from other fields if it has 850 * been initialised or not. 851 */ 852 int i; 853 int active=0, working=0,failed=0,spare=0,nr_disks=0; 854 855 rdev->sb_size = MD_SB_BYTES; 856 857 sb = (mdp_super_t*)page_address(rdev->sb_page); 858 859 memset(sb, 0, sizeof(*sb)); 860 861 sb->md_magic = MD_SB_MAGIC; 862 sb->major_version = mddev->major_version; 863 sb->patch_version = mddev->patch_version; 864 sb->gvalid_words = 0; /* ignored */ 865 memcpy(&sb->set_uuid0, mddev->uuid+0, 4); 866 memcpy(&sb->set_uuid1, mddev->uuid+4, 4); 867 memcpy(&sb->set_uuid2, mddev->uuid+8, 4); 868 memcpy(&sb->set_uuid3, mddev->uuid+12,4); 869 870 sb->ctime = mddev->ctime; 871 sb->level = mddev->level; 872 sb->size = mddev->size; 873 sb->raid_disks = mddev->raid_disks; 874 sb->md_minor = mddev->md_minor; 875 sb->not_persistent = !mddev->persistent; 876 sb->utime = mddev->utime; 877 sb->state = 0; 878 sb->events_hi = (mddev->events>>32); 879 sb->events_lo = (u32)mddev->events; 880 881 if (mddev->reshape_position == MaxSector) 882 sb->minor_version = 90; 883 else { 884 sb->minor_version = 91; 885 sb->reshape_position = mddev->reshape_position; 886 sb->new_level = mddev->new_level; 887 sb->delta_disks = mddev->delta_disks; 888 sb->new_layout = mddev->new_layout; 889 sb->new_chunk = mddev->new_chunk; 890 } 891 mddev->minor_version = sb->minor_version; 892 if (mddev->in_sync) 893 { 894 sb->recovery_cp = mddev->recovery_cp; 895 sb->cp_events_hi = (mddev->events>>32); 896 sb->cp_events_lo = (u32)mddev->events; 897 if (mddev->recovery_cp == MaxSector) 898 sb->state = (1<< MD_SB_CLEAN); 899 } else 900 sb->recovery_cp = 0; 901 902 sb->layout = mddev->layout; 903 sb->chunk_size = mddev->chunk_size; 904 905 if (mddev->bitmap && mddev->bitmap_file == NULL) 906 sb->state |= (1<<MD_SB_BITMAP_PRESENT); 907 908 sb->disks[0].state = (1<<MD_DISK_REMOVED); 909 ITERATE_RDEV(mddev,rdev2,tmp) { 910 mdp_disk_t *d; 911 int desc_nr; 912 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags) 913 && !test_bit(Faulty, &rdev2->flags)) 914 desc_nr = rdev2->raid_disk; 915 else 916 desc_nr = next_spare++; 917 rdev2->desc_nr = desc_nr; 918 d = &sb->disks[rdev2->desc_nr]; 919 nr_disks++; 920 d->number = rdev2->desc_nr; 921 d->major = MAJOR(rdev2->bdev->bd_dev); 922 d->minor = MINOR(rdev2->bdev->bd_dev); 923 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags) 924 && !test_bit(Faulty, &rdev2->flags)) 925 d->raid_disk = rdev2->raid_disk; 926 else 927 d->raid_disk = rdev2->desc_nr; /* compatibility */ 928 if (test_bit(Faulty, &rdev2->flags)) 929 d->state = (1<<MD_DISK_FAULTY); 930 else if (test_bit(In_sync, &rdev2->flags)) { 931 d->state = (1<<MD_DISK_ACTIVE); 932 d->state |= (1<<MD_DISK_SYNC); 933 active++; 934 working++; 935 } else { 936 d->state = 0; 937 spare++; 938 working++; 939 } 940 if (test_bit(WriteMostly, &rdev2->flags)) 941 d->state |= (1<<MD_DISK_WRITEMOSTLY); 942 } 943 /* now set the "removed" and "faulty" bits on any missing devices */ 944 for (i=0 ; i < mddev->raid_disks ; i++) { 945 mdp_disk_t *d = &sb->disks[i]; 946 if (d->state == 0 && d->number == 0) { 947 d->number = i; 948 d->raid_disk = i; 949 d->state = (1<<MD_DISK_REMOVED); 950 d->state |= (1<<MD_DISK_FAULTY); 951 failed++; 952 } 953 } 954 sb->nr_disks = nr_disks; 955 sb->active_disks = active; 956 sb->working_disks = working; 957 sb->failed_disks = failed; 958 sb->spare_disks = spare; 959 960 sb->this_disk = sb->disks[rdev->desc_nr]; 961 sb->sb_csum = calc_sb_csum(sb); 962 } 963 964 /* 965 * version 1 superblock 966 */ 967 968 static unsigned int calc_sb_1_csum(struct mdp_superblock_1 * sb) 969 { 970 unsigned int disk_csum, csum; 971 unsigned long long newcsum; 972 int size = 256 + le32_to_cpu(sb->max_dev)*2; 973 unsigned int *isuper = (unsigned int*)sb; 974 int i; 975 976 disk_csum = sb->sb_csum; 977 sb->sb_csum = 0; 978 newcsum = 0; 979 for (i=0; size>=4; size -= 4 ) 980 newcsum += le32_to_cpu(*isuper++); 981 982 if (size == 2) 983 newcsum += le16_to_cpu(*(unsigned short*) isuper); 984 985 csum = (newcsum & 0xffffffff) + (newcsum >> 32); 986 sb->sb_csum = disk_csum; 987 return cpu_to_le32(csum); 988 } 989 990 static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) 991 { 992 struct mdp_superblock_1 *sb; 993 int ret; 994 sector_t sb_offset; 995 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 996 int bmask; 997 998 /* 999 * Calculate the position of the superblock. 1000 * It is always aligned to a 4K boundary and 1001 * depeding on minor_version, it can be: 1002 * 0: At least 8K, but less than 12K, from end of device 1003 * 1: At start of device 1004 * 2: 4K from start of device. 1005 */ 1006 switch(minor_version) { 1007 case 0: 1008 sb_offset = rdev->bdev->bd_inode->i_size >> 9; 1009 sb_offset -= 8*2; 1010 sb_offset &= ~(sector_t)(4*2-1); 1011 /* convert from sectors to K */ 1012 sb_offset /= 2; 1013 break; 1014 case 1: 1015 sb_offset = 0; 1016 break; 1017 case 2: 1018 sb_offset = 4; 1019 break; 1020 default: 1021 return -EINVAL; 1022 } 1023 rdev->sb_offset = sb_offset; 1024 1025 /* superblock is rarely larger than 1K, but it can be larger, 1026 * and it is safe to read 4k, so we do that 1027 */ 1028 ret = read_disk_sb(rdev, 4096); 1029 if (ret) return ret; 1030 1031 1032 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page); 1033 1034 if (sb->magic != cpu_to_le32(MD_SB_MAGIC) || 1035 sb->major_version != cpu_to_le32(1) || 1036 le32_to_cpu(sb->max_dev) > (4096-256)/2 || 1037 le64_to_cpu(sb->super_offset) != (rdev->sb_offset<<1) || 1038 (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0) 1039 return -EINVAL; 1040 1041 if (calc_sb_1_csum(sb) != sb->sb_csum) { 1042 printk("md: invalid superblock checksum on %s\n", 1043 bdevname(rdev->bdev,b)); 1044 return -EINVAL; 1045 } 1046 if (le64_to_cpu(sb->data_size) < 10) { 1047 printk("md: data_size too small on %s\n", 1048 bdevname(rdev->bdev,b)); 1049 return -EINVAL; 1050 } 1051 rdev->preferred_minor = 0xffff; 1052 rdev->data_offset = le64_to_cpu(sb->data_offset); 1053 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); 1054 1055 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; 1056 bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1; 1057 if (rdev->sb_size & bmask) 1058 rdev-> sb_size = (rdev->sb_size | bmask)+1; 1059 1060 if (refdev == 0) 1061 ret = 1; 1062 else { 1063 __u64 ev1, ev2; 1064 struct mdp_superblock_1 *refsb = 1065 (struct mdp_superblock_1*)page_address(refdev->sb_page); 1066 1067 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 || 1068 sb->level != refsb->level || 1069 sb->layout != refsb->layout || 1070 sb->chunksize != refsb->chunksize) { 1071 printk(KERN_WARNING "md: %s has strangely different" 1072 " superblock to %s\n", 1073 bdevname(rdev->bdev,b), 1074 bdevname(refdev->bdev,b2)); 1075 return -EINVAL; 1076 } 1077 ev1 = le64_to_cpu(sb->events); 1078 ev2 = le64_to_cpu(refsb->events); 1079 1080 if (ev1 > ev2) 1081 ret = 1; 1082 else 1083 ret = 0; 1084 } 1085 if (minor_version) 1086 rdev->size = ((rdev->bdev->bd_inode->i_size>>9) - le64_to_cpu(sb->data_offset)) / 2; 1087 else 1088 rdev->size = rdev->sb_offset; 1089 if (rdev->size < le64_to_cpu(sb->data_size)/2) 1090 return -EINVAL; 1091 rdev->size = le64_to_cpu(sb->data_size)/2; 1092 if (le32_to_cpu(sb->chunksize)) 1093 rdev->size &= ~((sector_t)le32_to_cpu(sb->chunksize)/2 - 1); 1094 1095 if (le32_to_cpu(sb->size) > rdev->size*2) 1096 return -EINVAL; 1097 return ret; 1098 } 1099 1100 static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev) 1101 { 1102 struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page); 1103 1104 rdev->raid_disk = -1; 1105 rdev->flags = 0; 1106 if (mddev->raid_disks == 0) { 1107 mddev->major_version = 1; 1108 mddev->patch_version = 0; 1109 mddev->persistent = 1; 1110 mddev->chunk_size = le32_to_cpu(sb->chunksize) << 9; 1111 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1); 1112 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1); 1113 mddev->level = le32_to_cpu(sb->level); 1114 mddev->clevel[0] = 0; 1115 mddev->layout = le32_to_cpu(sb->layout); 1116 mddev->raid_disks = le32_to_cpu(sb->raid_disks); 1117 mddev->size = le64_to_cpu(sb->size)/2; 1118 mddev->events = le64_to_cpu(sb->events); 1119 mddev->bitmap_offset = 0; 1120 mddev->default_bitmap_offset = 1024 >> 9; 1121 1122 mddev->recovery_cp = le64_to_cpu(sb->resync_offset); 1123 memcpy(mddev->uuid, sb->set_uuid, 16); 1124 1125 mddev->max_disks = (4096-256)/2; 1126 1127 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) && 1128 mddev->bitmap_file == NULL ) { 1129 if (mddev->level != 1 && mddev->level != 5 && mddev->level != 6 1130 && mddev->level != 10) { 1131 printk(KERN_WARNING "md: bitmaps not supported for this level.\n"); 1132 return -EINVAL; 1133 } 1134 mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset); 1135 } 1136 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) { 1137 mddev->reshape_position = le64_to_cpu(sb->reshape_position); 1138 mddev->delta_disks = le32_to_cpu(sb->delta_disks); 1139 mddev->new_level = le32_to_cpu(sb->new_level); 1140 mddev->new_layout = le32_to_cpu(sb->new_layout); 1141 mddev->new_chunk = le32_to_cpu(sb->new_chunk)<<9; 1142 } else { 1143 mddev->reshape_position = MaxSector; 1144 mddev->delta_disks = 0; 1145 mddev->new_level = mddev->level; 1146 mddev->new_layout = mddev->layout; 1147 mddev->new_chunk = mddev->chunk_size; 1148 } 1149 1150 } else if (mddev->pers == NULL) { 1151 /* Insist of good event counter while assembling */ 1152 __u64 ev1 = le64_to_cpu(sb->events); 1153 ++ev1; 1154 if (ev1 < mddev->events) 1155 return -EINVAL; 1156 } else if (mddev->bitmap) { 1157 /* If adding to array with a bitmap, then we can accept an 1158 * older device, but not too old. 1159 */ 1160 __u64 ev1 = le64_to_cpu(sb->events); 1161 if (ev1 < mddev->bitmap->events_cleared) 1162 return 0; 1163 } else /* just a hot-add of a new device, leave raid_disk at -1 */ 1164 return 0; 1165 1166 if (mddev->level != LEVEL_MULTIPATH) { 1167 int role; 1168 rdev->desc_nr = le32_to_cpu(sb->dev_number); 1169 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]); 1170 switch(role) { 1171 case 0xffff: /* spare */ 1172 break; 1173 case 0xfffe: /* faulty */ 1174 set_bit(Faulty, &rdev->flags); 1175 break; 1176 default: 1177 set_bit(In_sync, &rdev->flags); 1178 rdev->raid_disk = role; 1179 break; 1180 } 1181 if (sb->devflags & WriteMostly1) 1182 set_bit(WriteMostly, &rdev->flags); 1183 } else /* MULTIPATH are always insync */ 1184 set_bit(In_sync, &rdev->flags); 1185 1186 return 0; 1187 } 1188 1189 static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev) 1190 { 1191 struct mdp_superblock_1 *sb; 1192 struct list_head *tmp; 1193 mdk_rdev_t *rdev2; 1194 int max_dev, i; 1195 /* make rdev->sb match mddev and rdev data. */ 1196 1197 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page); 1198 1199 sb->feature_map = 0; 1200 sb->pad0 = 0; 1201 memset(sb->pad1, 0, sizeof(sb->pad1)); 1202 memset(sb->pad2, 0, sizeof(sb->pad2)); 1203 memset(sb->pad3, 0, sizeof(sb->pad3)); 1204 1205 sb->utime = cpu_to_le64((__u64)mddev->utime); 1206 sb->events = cpu_to_le64(mddev->events); 1207 if (mddev->in_sync) 1208 sb->resync_offset = cpu_to_le64(mddev->recovery_cp); 1209 else 1210 sb->resync_offset = cpu_to_le64(0); 1211 1212 sb->cnt_corrected_read = atomic_read(&rdev->corrected_errors); 1213 1214 sb->raid_disks = cpu_to_le32(mddev->raid_disks); 1215 sb->size = cpu_to_le64(mddev->size<<1); 1216 1217 if (mddev->bitmap && mddev->bitmap_file == NULL) { 1218 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset); 1219 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET); 1220 } 1221 if (mddev->reshape_position != MaxSector) { 1222 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE); 1223 sb->reshape_position = cpu_to_le64(mddev->reshape_position); 1224 sb->new_layout = cpu_to_le32(mddev->new_layout); 1225 sb->delta_disks = cpu_to_le32(mddev->delta_disks); 1226 sb->new_level = cpu_to_le32(mddev->new_level); 1227 sb->new_chunk = cpu_to_le32(mddev->new_chunk>>9); 1228 } 1229 1230 max_dev = 0; 1231 ITERATE_RDEV(mddev,rdev2,tmp) 1232 if (rdev2->desc_nr+1 > max_dev) 1233 max_dev = rdev2->desc_nr+1; 1234 1235 sb->max_dev = cpu_to_le32(max_dev); 1236 for (i=0; i<max_dev;i++) 1237 sb->dev_roles[i] = cpu_to_le16(0xfffe); 1238 1239 ITERATE_RDEV(mddev,rdev2,tmp) { 1240 i = rdev2->desc_nr; 1241 if (test_bit(Faulty, &rdev2->flags)) 1242 sb->dev_roles[i] = cpu_to_le16(0xfffe); 1243 else if (test_bit(In_sync, &rdev2->flags)) 1244 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); 1245 else 1246 sb->dev_roles[i] = cpu_to_le16(0xffff); 1247 } 1248 1249 sb->recovery_offset = cpu_to_le64(0); /* not supported yet */ 1250 sb->sb_csum = calc_sb_1_csum(sb); 1251 } 1252 1253 1254 static struct super_type super_types[] = { 1255 [0] = { 1256 .name = "0.90.0", 1257 .owner = THIS_MODULE, 1258 .load_super = super_90_load, 1259 .validate_super = super_90_validate, 1260 .sync_super = super_90_sync, 1261 }, 1262 [1] = { 1263 .name = "md-1", 1264 .owner = THIS_MODULE, 1265 .load_super = super_1_load, 1266 .validate_super = super_1_validate, 1267 .sync_super = super_1_sync, 1268 }, 1269 }; 1270 1271 static mdk_rdev_t * match_dev_unit(mddev_t *mddev, mdk_rdev_t *dev) 1272 { 1273 struct list_head *tmp; 1274 mdk_rdev_t *rdev; 1275 1276 ITERATE_RDEV(mddev,rdev,tmp) 1277 if (rdev->bdev->bd_contains == dev->bdev->bd_contains) 1278 return rdev; 1279 1280 return NULL; 1281 } 1282 1283 static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2) 1284 { 1285 struct list_head *tmp; 1286 mdk_rdev_t *rdev; 1287 1288 ITERATE_RDEV(mddev1,rdev,tmp) 1289 if (match_dev_unit(mddev2, rdev)) 1290 return 1; 1291 1292 return 0; 1293 } 1294 1295 static LIST_HEAD(pending_raid_disks); 1296 1297 static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev) 1298 { 1299 mdk_rdev_t *same_pdev; 1300 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 1301 struct kobject *ko; 1302 char *s; 1303 1304 if (rdev->mddev) { 1305 MD_BUG(); 1306 return -EINVAL; 1307 } 1308 /* make sure rdev->size exceeds mddev->size */ 1309 if (rdev->size && (mddev->size == 0 || rdev->size < mddev->size)) { 1310 if (mddev->pers) 1311 /* Cannot change size, so fail */ 1312 return -ENOSPC; 1313 else 1314 mddev->size = rdev->size; 1315 } 1316 same_pdev = match_dev_unit(mddev, rdev); 1317 if (same_pdev) 1318 printk(KERN_WARNING 1319 "%s: WARNING: %s appears to be on the same physical" 1320 " disk as %s. True\n protection against single-disk" 1321 " failure might be compromised.\n", 1322 mdname(mddev), bdevname(rdev->bdev,b), 1323 bdevname(same_pdev->bdev,b2)); 1324 1325 /* Verify rdev->desc_nr is unique. 1326 * If it is -1, assign a free number, else 1327 * check number is not in use 1328 */ 1329 if (rdev->desc_nr < 0) { 1330 int choice = 0; 1331 if (mddev->pers) choice = mddev->raid_disks; 1332 while (find_rdev_nr(mddev, choice)) 1333 choice++; 1334 rdev->desc_nr = choice; 1335 } else { 1336 if (find_rdev_nr(mddev, rdev->desc_nr)) 1337 return -EBUSY; 1338 } 1339 bdevname(rdev->bdev,b); 1340 if (kobject_set_name(&rdev->kobj, "dev-%s", b) < 0) 1341 return -ENOMEM; 1342 while ( (s=strchr(rdev->kobj.k_name, '/')) != NULL) 1343 *s = '!'; 1344 1345 list_add(&rdev->same_set, &mddev->disks); 1346 rdev->mddev = mddev; 1347 printk(KERN_INFO "md: bind<%s>\n", b); 1348 1349 rdev->kobj.parent = &mddev->kobj; 1350 kobject_add(&rdev->kobj); 1351 1352 if (rdev->bdev->bd_part) 1353 ko = &rdev->bdev->bd_part->kobj; 1354 else 1355 ko = &rdev->bdev->bd_disk->kobj; 1356 sysfs_create_link(&rdev->kobj, ko, "block"); 1357 bd_claim_by_disk(rdev->bdev, rdev, mddev->gendisk); 1358 return 0; 1359 } 1360 1361 static void unbind_rdev_from_array(mdk_rdev_t * rdev) 1362 { 1363 char b[BDEVNAME_SIZE]; 1364 if (!rdev->mddev) { 1365 MD_BUG(); 1366 return; 1367 } 1368 bd_release_from_disk(rdev->bdev, rdev->mddev->gendisk); 1369 list_del_init(&rdev->same_set); 1370 printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b)); 1371 rdev->mddev = NULL; 1372 sysfs_remove_link(&rdev->kobj, "block"); 1373 kobject_del(&rdev->kobj); 1374 } 1375 1376 /* 1377 * prevent the device from being mounted, repartitioned or 1378 * otherwise reused by a RAID array (or any other kernel 1379 * subsystem), by bd_claiming the device. 1380 */ 1381 static int lock_rdev(mdk_rdev_t *rdev, dev_t dev) 1382 { 1383 int err = 0; 1384 struct block_device *bdev; 1385 char b[BDEVNAME_SIZE]; 1386 1387 bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE); 1388 if (IS_ERR(bdev)) { 1389 printk(KERN_ERR "md: could not open %s.\n", 1390 __bdevname(dev, b)); 1391 return PTR_ERR(bdev); 1392 } 1393 err = bd_claim(bdev, rdev); 1394 if (err) { 1395 printk(KERN_ERR "md: could not bd_claim %s.\n", 1396 bdevname(bdev, b)); 1397 blkdev_put(bdev); 1398 return err; 1399 } 1400 rdev->bdev = bdev; 1401 return err; 1402 } 1403 1404 static void unlock_rdev(mdk_rdev_t *rdev) 1405 { 1406 struct block_device *bdev = rdev->bdev; 1407 rdev->bdev = NULL; 1408 if (!bdev) 1409 MD_BUG(); 1410 bd_release(bdev); 1411 blkdev_put(bdev); 1412 } 1413 1414 void md_autodetect_dev(dev_t dev); 1415 1416 static void export_rdev(mdk_rdev_t * rdev) 1417 { 1418 char b[BDEVNAME_SIZE]; 1419 printk(KERN_INFO "md: export_rdev(%s)\n", 1420 bdevname(rdev->bdev,b)); 1421 if (rdev->mddev) 1422 MD_BUG(); 1423 free_disk_sb(rdev); 1424 list_del_init(&rdev->same_set); 1425 #ifndef MODULE 1426 md_autodetect_dev(rdev->bdev->bd_dev); 1427 #endif 1428 unlock_rdev(rdev); 1429 kobject_put(&rdev->kobj); 1430 } 1431 1432 static void kick_rdev_from_array(mdk_rdev_t * rdev) 1433 { 1434 unbind_rdev_from_array(rdev); 1435 export_rdev(rdev); 1436 } 1437 1438 static void export_array(mddev_t *mddev) 1439 { 1440 struct list_head *tmp; 1441 mdk_rdev_t *rdev; 1442 1443 ITERATE_RDEV(mddev,rdev,tmp) { 1444 if (!rdev->mddev) { 1445 MD_BUG(); 1446 continue; 1447 } 1448 kick_rdev_from_array(rdev); 1449 } 1450 if (!list_empty(&mddev->disks)) 1451 MD_BUG(); 1452 mddev->raid_disks = 0; 1453 mddev->major_version = 0; 1454 } 1455 1456 static void print_desc(mdp_disk_t *desc) 1457 { 1458 printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number, 1459 desc->major,desc->minor,desc->raid_disk,desc->state); 1460 } 1461 1462 static void print_sb(mdp_super_t *sb) 1463 { 1464 int i; 1465 1466 printk(KERN_INFO 1467 "md: SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n", 1468 sb->major_version, sb->minor_version, sb->patch_version, 1469 sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3, 1470 sb->ctime); 1471 printk(KERN_INFO "md: L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n", 1472 sb->level, sb->size, sb->nr_disks, sb->raid_disks, 1473 sb->md_minor, sb->layout, sb->chunk_size); 1474 printk(KERN_INFO "md: UT:%08x ST:%d AD:%d WD:%d" 1475 " FD:%d SD:%d CSUM:%08x E:%08lx\n", 1476 sb->utime, sb->state, sb->active_disks, sb->working_disks, 1477 sb->failed_disks, sb->spare_disks, 1478 sb->sb_csum, (unsigned long)sb->events_lo); 1479 1480 printk(KERN_INFO); 1481 for (i = 0; i < MD_SB_DISKS; i++) { 1482 mdp_disk_t *desc; 1483 1484 desc = sb->disks + i; 1485 if (desc->number || desc->major || desc->minor || 1486 desc->raid_disk || (desc->state && (desc->state != 4))) { 1487 printk(" D %2d: ", i); 1488 print_desc(desc); 1489 } 1490 } 1491 printk(KERN_INFO "md: THIS: "); 1492 print_desc(&sb->this_disk); 1493 1494 } 1495 1496 static void print_rdev(mdk_rdev_t *rdev) 1497 { 1498 char b[BDEVNAME_SIZE]; 1499 printk(KERN_INFO "md: rdev %s, SZ:%08llu F:%d S:%d DN:%u\n", 1500 bdevname(rdev->bdev,b), (unsigned long long)rdev->size, 1501 test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags), 1502 rdev->desc_nr); 1503 if (rdev->sb_loaded) { 1504 printk(KERN_INFO "md: rdev superblock:\n"); 1505 print_sb((mdp_super_t*)page_address(rdev->sb_page)); 1506 } else 1507 printk(KERN_INFO "md: no rdev superblock!\n"); 1508 } 1509 1510 void md_print_devices(void) 1511 { 1512 struct list_head *tmp, *tmp2; 1513 mdk_rdev_t *rdev; 1514 mddev_t *mddev; 1515 char b[BDEVNAME_SIZE]; 1516 1517 printk("\n"); 1518 printk("md: **********************************\n"); 1519 printk("md: * <COMPLETE RAID STATE PRINTOUT> *\n"); 1520 printk("md: **********************************\n"); 1521 ITERATE_MDDEV(mddev,tmp) { 1522 1523 if (mddev->bitmap) 1524 bitmap_print_sb(mddev->bitmap); 1525 else 1526 printk("%s: ", mdname(mddev)); 1527 ITERATE_RDEV(mddev,rdev,tmp2) 1528 printk("<%s>", bdevname(rdev->bdev,b)); 1529 printk("\n"); 1530 1531 ITERATE_RDEV(mddev,rdev,tmp2) 1532 print_rdev(rdev); 1533 } 1534 printk("md: **********************************\n"); 1535 printk("\n"); 1536 } 1537 1538 1539 static void sync_sbs(mddev_t * mddev) 1540 { 1541 mdk_rdev_t *rdev; 1542 struct list_head *tmp; 1543 1544 ITERATE_RDEV(mddev,rdev,tmp) { 1545 super_types[mddev->major_version]. 1546 sync_super(mddev, rdev); 1547 rdev->sb_loaded = 1; 1548 } 1549 } 1550 1551 void md_update_sb(mddev_t * mddev) 1552 { 1553 int err; 1554 struct list_head *tmp; 1555 mdk_rdev_t *rdev; 1556 int sync_req; 1557 1558 repeat: 1559 spin_lock_irq(&mddev->write_lock); 1560 sync_req = mddev->in_sync; 1561 mddev->utime = get_seconds(); 1562 mddev->events ++; 1563 1564 if (!mddev->events) { 1565 /* 1566 * oops, this 64-bit counter should never wrap. 1567 * Either we are in around ~1 trillion A.C., assuming 1568 * 1 reboot per second, or we have a bug: 1569 */ 1570 MD_BUG(); 1571 mddev->events --; 1572 } 1573 mddev->sb_dirty = 2; 1574 sync_sbs(mddev); 1575 1576 /* 1577 * do not write anything to disk if using 1578 * nonpersistent superblocks 1579 */ 1580 if (!mddev->persistent) { 1581 mddev->sb_dirty = 0; 1582 spin_unlock_irq(&mddev->write_lock); 1583 wake_up(&mddev->sb_wait); 1584 return; 1585 } 1586 spin_unlock_irq(&mddev->write_lock); 1587 1588 dprintk(KERN_INFO 1589 "md: updating %s RAID superblock on device (in sync %d)\n", 1590 mdname(mddev),mddev->in_sync); 1591 1592 err = bitmap_update_sb(mddev->bitmap); 1593 ITERATE_RDEV(mddev,rdev,tmp) { 1594 char b[BDEVNAME_SIZE]; 1595 dprintk(KERN_INFO "md: "); 1596 if (test_bit(Faulty, &rdev->flags)) 1597 dprintk("(skipping faulty "); 1598 1599 dprintk("%s ", bdevname(rdev->bdev,b)); 1600 if (!test_bit(Faulty, &rdev->flags)) { 1601 md_super_write(mddev,rdev, 1602 rdev->sb_offset<<1, rdev->sb_size, 1603 rdev->sb_page); 1604 dprintk(KERN_INFO "(write) %s's sb offset: %llu\n", 1605 bdevname(rdev->bdev,b), 1606 (unsigned long long)rdev->sb_offset); 1607 1608 } else 1609 dprintk(")\n"); 1610 if (mddev->level == LEVEL_MULTIPATH) 1611 /* only need to write one superblock... */ 1612 break; 1613 } 1614 md_super_wait(mddev); 1615 /* if there was a failure, sb_dirty was set to 1, and we re-write super */ 1616 1617 spin_lock_irq(&mddev->write_lock); 1618 if (mddev->in_sync != sync_req|| mddev->sb_dirty == 1) { 1619 /* have to write it out again */ 1620 spin_unlock_irq(&mddev->write_lock); 1621 goto repeat; 1622 } 1623 mddev->sb_dirty = 0; 1624 spin_unlock_irq(&mddev->write_lock); 1625 wake_up(&mddev->sb_wait); 1626 1627 } 1628 EXPORT_SYMBOL_GPL(md_update_sb); 1629 1630 /* words written to sysfs files may, or my not, be \n terminated. 1631 * We want to accept with case. For this we use cmd_match. 1632 */ 1633 static int cmd_match(const char *cmd, const char *str) 1634 { 1635 /* See if cmd, written into a sysfs file, matches 1636 * str. They must either be the same, or cmd can 1637 * have a trailing newline 1638 */ 1639 while (*cmd && *str && *cmd == *str) { 1640 cmd++; 1641 str++; 1642 } 1643 if (*cmd == '\n') 1644 cmd++; 1645 if (*str || *cmd) 1646 return 0; 1647 return 1; 1648 } 1649 1650 struct rdev_sysfs_entry { 1651 struct attribute attr; 1652 ssize_t (*show)(mdk_rdev_t *, char *); 1653 ssize_t (*store)(mdk_rdev_t *, const char *, size_t); 1654 }; 1655 1656 static ssize_t 1657 state_show(mdk_rdev_t *rdev, char *page) 1658 { 1659 char *sep = ""; 1660 int len=0; 1661 1662 if (test_bit(Faulty, &rdev->flags)) { 1663 len+= sprintf(page+len, "%sfaulty",sep); 1664 sep = ","; 1665 } 1666 if (test_bit(In_sync, &rdev->flags)) { 1667 len += sprintf(page+len, "%sin_sync",sep); 1668 sep = ","; 1669 } 1670 if (!test_bit(Faulty, &rdev->flags) && 1671 !test_bit(In_sync, &rdev->flags)) { 1672 len += sprintf(page+len, "%sspare", sep); 1673 sep = ","; 1674 } 1675 return len+sprintf(page+len, "\n"); 1676 } 1677 1678 static struct rdev_sysfs_entry 1679 rdev_state = __ATTR_RO(state); 1680 1681 static ssize_t 1682 super_show(mdk_rdev_t *rdev, char *page) 1683 { 1684 if (rdev->sb_loaded && rdev->sb_size) { 1685 memcpy(page, page_address(rdev->sb_page), rdev->sb_size); 1686 return rdev->sb_size; 1687 } else 1688 return 0; 1689 } 1690 static struct rdev_sysfs_entry rdev_super = __ATTR_RO(super); 1691 1692 static ssize_t 1693 errors_show(mdk_rdev_t *rdev, char *page) 1694 { 1695 return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors)); 1696 } 1697 1698 static ssize_t 1699 errors_store(mdk_rdev_t *rdev, const char *buf, size_t len) 1700 { 1701 char *e; 1702 unsigned long n = simple_strtoul(buf, &e, 10); 1703 if (*buf && (*e == 0 || *e == '\n')) { 1704 atomic_set(&rdev->corrected_errors, n); 1705 return len; 1706 } 1707 return -EINVAL; 1708 } 1709 static struct rdev_sysfs_entry rdev_errors = 1710 __ATTR(errors, 0644, errors_show, errors_store); 1711 1712 static ssize_t 1713 slot_show(mdk_rdev_t *rdev, char *page) 1714 { 1715 if (rdev->raid_disk < 0) 1716 return sprintf(page, "none\n"); 1717 else 1718 return sprintf(page, "%d\n", rdev->raid_disk); 1719 } 1720 1721 static ssize_t 1722 slot_store(mdk_rdev_t *rdev, const char *buf, size_t len) 1723 { 1724 char *e; 1725 int slot = simple_strtoul(buf, &e, 10); 1726 if (strncmp(buf, "none", 4)==0) 1727 slot = -1; 1728 else if (e==buf || (*e && *e!= '\n')) 1729 return -EINVAL; 1730 if (rdev->mddev->pers) 1731 /* Cannot set slot in active array (yet) */ 1732 return -EBUSY; 1733 if (slot >= rdev->mddev->raid_disks) 1734 return -ENOSPC; 1735 rdev->raid_disk = slot; 1736 /* assume it is working */ 1737 rdev->flags = 0; 1738 set_bit(In_sync, &rdev->flags); 1739 return len; 1740 } 1741 1742 1743 static struct rdev_sysfs_entry rdev_slot = 1744 __ATTR(slot, 0644, slot_show, slot_store); 1745 1746 static ssize_t 1747 offset_show(mdk_rdev_t *rdev, char *page) 1748 { 1749 return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset); 1750 } 1751 1752 static ssize_t 1753 offset_store(mdk_rdev_t *rdev, const char *buf, size_t len) 1754 { 1755 char *e; 1756 unsigned long long offset = simple_strtoull(buf, &e, 10); 1757 if (e==buf || (*e && *e != '\n')) 1758 return -EINVAL; 1759 if (rdev->mddev->pers) 1760 return -EBUSY; 1761 rdev->data_offset = offset; 1762 return len; 1763 } 1764 1765 static struct rdev_sysfs_entry rdev_offset = 1766 __ATTR(offset, 0644, offset_show, offset_store); 1767 1768 static ssize_t 1769 rdev_size_show(mdk_rdev_t *rdev, char *page) 1770 { 1771 return sprintf(page, "%llu\n", (unsigned long long)rdev->size); 1772 } 1773 1774 static ssize_t 1775 rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len) 1776 { 1777 char *e; 1778 unsigned long long size = simple_strtoull(buf, &e, 10); 1779 if (e==buf || (*e && *e != '\n')) 1780 return -EINVAL; 1781 if (rdev->mddev->pers) 1782 return -EBUSY; 1783 rdev->size = size; 1784 if (size < rdev->mddev->size || rdev->mddev->size == 0) 1785 rdev->mddev->size = size; 1786 return len; 1787 } 1788 1789 static struct rdev_sysfs_entry rdev_size = 1790 __ATTR(size, 0644, rdev_size_show, rdev_size_store); 1791 1792 static struct attribute *rdev_default_attrs[] = { 1793 &rdev_state.attr, 1794 &rdev_super.attr, 1795 &rdev_errors.attr, 1796 &rdev_slot.attr, 1797 &rdev_offset.attr, 1798 &rdev_size.attr, 1799 NULL, 1800 }; 1801 static ssize_t 1802 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 1803 { 1804 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); 1805 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj); 1806 1807 if (!entry->show) 1808 return -EIO; 1809 return entry->show(rdev, page); 1810 } 1811 1812 static ssize_t 1813 rdev_attr_store(struct kobject *kobj, struct attribute *attr, 1814 const char *page, size_t length) 1815 { 1816 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); 1817 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj); 1818 1819 if (!entry->store) 1820 return -EIO; 1821 return entry->store(rdev, page, length); 1822 } 1823 1824 static void rdev_free(struct kobject *ko) 1825 { 1826 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj); 1827 kfree(rdev); 1828 } 1829 static struct sysfs_ops rdev_sysfs_ops = { 1830 .show = rdev_attr_show, 1831 .store = rdev_attr_store, 1832 }; 1833 static struct kobj_type rdev_ktype = { 1834 .release = rdev_free, 1835 .sysfs_ops = &rdev_sysfs_ops, 1836 .default_attrs = rdev_default_attrs, 1837 }; 1838 1839 /* 1840 * Import a device. If 'super_format' >= 0, then sanity check the superblock 1841 * 1842 * mark the device faulty if: 1843 * 1844 * - the device is nonexistent (zero size) 1845 * - the device has no valid superblock 1846 * 1847 * a faulty rdev _never_ has rdev->sb set. 1848 */ 1849 static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_minor) 1850 { 1851 char b[BDEVNAME_SIZE]; 1852 int err; 1853 mdk_rdev_t *rdev; 1854 sector_t size; 1855 1856 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); 1857 if (!rdev) { 1858 printk(KERN_ERR "md: could not alloc mem for new device!\n"); 1859 return ERR_PTR(-ENOMEM); 1860 } 1861 1862 if ((err = alloc_disk_sb(rdev))) 1863 goto abort_free; 1864 1865 err = lock_rdev(rdev, newdev); 1866 if (err) 1867 goto abort_free; 1868 1869 rdev->kobj.parent = NULL; 1870 rdev->kobj.ktype = &rdev_ktype; 1871 kobject_init(&rdev->kobj); 1872 1873 rdev->desc_nr = -1; 1874 rdev->flags = 0; 1875 rdev->data_offset = 0; 1876 atomic_set(&rdev->nr_pending, 0); 1877 atomic_set(&rdev->read_errors, 0); 1878 atomic_set(&rdev->corrected_errors, 0); 1879 1880 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; 1881 if (!size) { 1882 printk(KERN_WARNING 1883 "md: %s has zero or unknown size, marking faulty!\n", 1884 bdevname(rdev->bdev,b)); 1885 err = -EINVAL; 1886 goto abort_free; 1887 } 1888 1889 if (super_format >= 0) { 1890 err = super_types[super_format]. 1891 load_super(rdev, NULL, super_minor); 1892 if (err == -EINVAL) { 1893 printk(KERN_WARNING 1894 "md: %s has invalid sb, not importing!\n", 1895 bdevname(rdev->bdev,b)); 1896 goto abort_free; 1897 } 1898 if (err < 0) { 1899 printk(KERN_WARNING 1900 "md: could not read %s's sb, not importing!\n", 1901 bdevname(rdev->bdev,b)); 1902 goto abort_free; 1903 } 1904 } 1905 INIT_LIST_HEAD(&rdev->same_set); 1906 1907 return rdev; 1908 1909 abort_free: 1910 if (rdev->sb_page) { 1911 if (rdev->bdev) 1912 unlock_rdev(rdev); 1913 free_disk_sb(rdev); 1914 } 1915 kfree(rdev); 1916 return ERR_PTR(err); 1917 } 1918 1919 /* 1920 * Check a full RAID array for plausibility 1921 */ 1922 1923 1924 static void analyze_sbs(mddev_t * mddev) 1925 { 1926 int i; 1927 struct list_head *tmp; 1928 mdk_rdev_t *rdev, *freshest; 1929 char b[BDEVNAME_SIZE]; 1930 1931 freshest = NULL; 1932 ITERATE_RDEV(mddev,rdev,tmp) 1933 switch (super_types[mddev->major_version]. 1934 load_super(rdev, freshest, mddev->minor_version)) { 1935 case 1: 1936 freshest = rdev; 1937 break; 1938 case 0: 1939 break; 1940 default: 1941 printk( KERN_ERR \ 1942 "md: fatal superblock inconsistency in %s" 1943 " -- removing from array\n", 1944 bdevname(rdev->bdev,b)); 1945 kick_rdev_from_array(rdev); 1946 } 1947 1948 1949 super_types[mddev->major_version]. 1950 validate_super(mddev, freshest); 1951 1952 i = 0; 1953 ITERATE_RDEV(mddev,rdev,tmp) { 1954 if (rdev != freshest) 1955 if (super_types[mddev->major_version]. 1956 validate_super(mddev, rdev)) { 1957 printk(KERN_WARNING "md: kicking non-fresh %s" 1958 " from array!\n", 1959 bdevname(rdev->bdev,b)); 1960 kick_rdev_from_array(rdev); 1961 continue; 1962 } 1963 if (mddev->level == LEVEL_MULTIPATH) { 1964 rdev->desc_nr = i++; 1965 rdev->raid_disk = rdev->desc_nr; 1966 set_bit(In_sync, &rdev->flags); 1967 } 1968 } 1969 1970 1971 1972 if (mddev->recovery_cp != MaxSector && 1973 mddev->level >= 1) 1974 printk(KERN_ERR "md: %s: raid array is not clean" 1975 " -- starting background reconstruction\n", 1976 mdname(mddev)); 1977 1978 } 1979 1980 static ssize_t 1981 level_show(mddev_t *mddev, char *page) 1982 { 1983 struct mdk_personality *p = mddev->pers; 1984 if (p) 1985 return sprintf(page, "%s\n", p->name); 1986 else if (mddev->clevel[0]) 1987 return sprintf(page, "%s\n", mddev->clevel); 1988 else if (mddev->level != LEVEL_NONE) 1989 return sprintf(page, "%d\n", mddev->level); 1990 else 1991 return 0; 1992 } 1993 1994 static ssize_t 1995 level_store(mddev_t *mddev, const char *buf, size_t len) 1996 { 1997 int rv = len; 1998 if (mddev->pers) 1999 return -EBUSY; 2000 if (len == 0) 2001 return 0; 2002 if (len >= sizeof(mddev->clevel)) 2003 return -ENOSPC; 2004 strncpy(mddev->clevel, buf, len); 2005 if (mddev->clevel[len-1] == '\n') 2006 len--; 2007 mddev->clevel[len] = 0; 2008 mddev->level = LEVEL_NONE; 2009 return rv; 2010 } 2011 2012 static struct md_sysfs_entry md_level = 2013 __ATTR(level, 0644, level_show, level_store); 2014 2015 static ssize_t 2016 raid_disks_show(mddev_t *mddev, char *page) 2017 { 2018 if (mddev->raid_disks == 0) 2019 return 0; 2020 return sprintf(page, "%d\n", mddev->raid_disks); 2021 } 2022 2023 static int update_raid_disks(mddev_t *mddev, int raid_disks); 2024 2025 static ssize_t 2026 raid_disks_store(mddev_t *mddev, const char *buf, size_t len) 2027 { 2028 /* can only set raid_disks if array is not yet active */ 2029 char *e; 2030 int rv = 0; 2031 unsigned long n = simple_strtoul(buf, &e, 10); 2032 2033 if (!*buf || (*e && *e != '\n')) 2034 return -EINVAL; 2035 2036 if (mddev->pers) 2037 rv = update_raid_disks(mddev, n); 2038 else 2039 mddev->raid_disks = n; 2040 return rv ? rv : len; 2041 } 2042 static struct md_sysfs_entry md_raid_disks = 2043 __ATTR(raid_disks, 0644, raid_disks_show, raid_disks_store); 2044 2045 static ssize_t 2046 chunk_size_show(mddev_t *mddev, char *page) 2047 { 2048 return sprintf(page, "%d\n", mddev->chunk_size); 2049 } 2050 2051 static ssize_t 2052 chunk_size_store(mddev_t *mddev, const char *buf, size_t len) 2053 { 2054 /* can only set chunk_size if array is not yet active */ 2055 char *e; 2056 unsigned long n = simple_strtoul(buf, &e, 10); 2057 2058 if (mddev->pers) 2059 return -EBUSY; 2060 if (!*buf || (*e && *e != '\n')) 2061 return -EINVAL; 2062 2063 mddev->chunk_size = n; 2064 return len; 2065 } 2066 static struct md_sysfs_entry md_chunk_size = 2067 __ATTR(chunk_size, 0644, chunk_size_show, chunk_size_store); 2068 2069 static ssize_t 2070 null_show(mddev_t *mddev, char *page) 2071 { 2072 return -EINVAL; 2073 } 2074 2075 static ssize_t 2076 new_dev_store(mddev_t *mddev, const char *buf, size_t len) 2077 { 2078 /* buf must be %d:%d\n? giving major and minor numbers */ 2079 /* The new device is added to the array. 2080 * If the array has a persistent superblock, we read the 2081 * superblock to initialise info and check validity. 2082 * Otherwise, only checking done is that in bind_rdev_to_array, 2083 * which mainly checks size. 2084 */ 2085 char *e; 2086 int major = simple_strtoul(buf, &e, 10); 2087 int minor; 2088 dev_t dev; 2089 mdk_rdev_t *rdev; 2090 int err; 2091 2092 if (!*buf || *e != ':' || !e[1] || e[1] == '\n') 2093 return -EINVAL; 2094 minor = simple_strtoul(e+1, &e, 10); 2095 if (*e && *e != '\n') 2096 return -EINVAL; 2097 dev = MKDEV(major, minor); 2098 if (major != MAJOR(dev) || 2099 minor != MINOR(dev)) 2100 return -EOVERFLOW; 2101 2102 2103 if (mddev->persistent) { 2104 rdev = md_import_device(dev, mddev->major_version, 2105 mddev->minor_version); 2106 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) { 2107 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next, 2108 mdk_rdev_t, same_set); 2109 err = super_types[mddev->major_version] 2110 .load_super(rdev, rdev0, mddev->minor_version); 2111 if (err < 0) 2112 goto out; 2113 } 2114 } else 2115 rdev = md_import_device(dev, -1, -1); 2116 2117 if (IS_ERR(rdev)) 2118 return PTR_ERR(rdev); 2119 err = bind_rdev_to_array(rdev, mddev); 2120 out: 2121 if (err) 2122 export_rdev(rdev); 2123 return err ? err : len; 2124 } 2125 2126 static struct md_sysfs_entry md_new_device = 2127 __ATTR(new_dev, 0200, null_show, new_dev_store); 2128 2129 static ssize_t 2130 size_show(mddev_t *mddev, char *page) 2131 { 2132 return sprintf(page, "%llu\n", (unsigned long long)mddev->size); 2133 } 2134 2135 static int update_size(mddev_t *mddev, unsigned long size); 2136 2137 static ssize_t 2138 size_store(mddev_t *mddev, const char *buf, size_t len) 2139 { 2140 /* If array is inactive, we can reduce the component size, but 2141 * not increase it (except from 0). 2142 * If array is active, we can try an on-line resize 2143 */ 2144 char *e; 2145 int err = 0; 2146 unsigned long long size = simple_strtoull(buf, &e, 10); 2147 if (!*buf || *buf == '\n' || 2148 (*e && *e != '\n')) 2149 return -EINVAL; 2150 2151 if (mddev->pers) { 2152 err = update_size(mddev, size); 2153 md_update_sb(mddev); 2154 } else { 2155 if (mddev->size == 0 || 2156 mddev->size > size) 2157 mddev->size = size; 2158 else 2159 err = -ENOSPC; 2160 } 2161 return err ? err : len; 2162 } 2163 2164 static struct md_sysfs_entry md_size = 2165 __ATTR(component_size, 0644, size_show, size_store); 2166 2167 2168 /* Metdata version. 2169 * This is either 'none' for arrays with externally managed metadata, 2170 * or N.M for internally known formats 2171 */ 2172 static ssize_t 2173 metadata_show(mddev_t *mddev, char *page) 2174 { 2175 if (mddev->persistent) 2176 return sprintf(page, "%d.%d\n", 2177 mddev->major_version, mddev->minor_version); 2178 else 2179 return sprintf(page, "none\n"); 2180 } 2181 2182 static ssize_t 2183 metadata_store(mddev_t *mddev, const char *buf, size_t len) 2184 { 2185 int major, minor; 2186 char *e; 2187 if (!list_empty(&mddev->disks)) 2188 return -EBUSY; 2189 2190 if (cmd_match(buf, "none")) { 2191 mddev->persistent = 0; 2192 mddev->major_version = 0; 2193 mddev->minor_version = 90; 2194 return len; 2195 } 2196 major = simple_strtoul(buf, &e, 10); 2197 if (e==buf || *e != '.') 2198 return -EINVAL; 2199 buf = e+1; 2200 minor = simple_strtoul(buf, &e, 10); 2201 if (e==buf || *e != '\n') 2202 return -EINVAL; 2203 if (major >= sizeof(super_types)/sizeof(super_types[0]) || 2204 super_types[major].name == NULL) 2205 return -ENOENT; 2206 mddev->major_version = major; 2207 mddev->minor_version = minor; 2208 mddev->persistent = 1; 2209 return len; 2210 } 2211 2212 static struct md_sysfs_entry md_metadata = 2213 __ATTR(metadata_version, 0644, metadata_show, metadata_store); 2214 2215 static ssize_t 2216 action_show(mddev_t *mddev, char *page) 2217 { 2218 char *type = "idle"; 2219 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 2220 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) { 2221 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 2222 type = "reshape"; 2223 else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 2224 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 2225 type = "resync"; 2226 else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) 2227 type = "check"; 2228 else 2229 type = "repair"; 2230 } else 2231 type = "recover"; 2232 } 2233 return sprintf(page, "%s\n", type); 2234 } 2235 2236 static ssize_t 2237 action_store(mddev_t *mddev, const char *page, size_t len) 2238 { 2239 if (!mddev->pers || !mddev->pers->sync_request) 2240 return -EINVAL; 2241 2242 if (cmd_match(page, "idle")) { 2243 if (mddev->sync_thread) { 2244 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 2245 md_unregister_thread(mddev->sync_thread); 2246 mddev->sync_thread = NULL; 2247 mddev->recovery = 0; 2248 } 2249 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 2250 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) 2251 return -EBUSY; 2252 else if (cmd_match(page, "resync") || cmd_match(page, "recover")) 2253 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 2254 else if (cmd_match(page, "reshape")) { 2255 int err; 2256 if (mddev->pers->start_reshape == NULL) 2257 return -EINVAL; 2258 err = mddev->pers->start_reshape(mddev); 2259 if (err) 2260 return err; 2261 } else { 2262 if (cmd_match(page, "check")) 2263 set_bit(MD_RECOVERY_CHECK, &mddev->recovery); 2264 else if (!cmd_match(page, "repair")) 2265 return -EINVAL; 2266 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 2267 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 2268 } 2269 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 2270 md_wakeup_thread(mddev->thread); 2271 return len; 2272 } 2273 2274 static ssize_t 2275 mismatch_cnt_show(mddev_t *mddev, char *page) 2276 { 2277 return sprintf(page, "%llu\n", 2278 (unsigned long long) mddev->resync_mismatches); 2279 } 2280 2281 static struct md_sysfs_entry 2282 md_scan_mode = __ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store); 2283 2284 2285 static struct md_sysfs_entry 2286 md_mismatches = __ATTR_RO(mismatch_cnt); 2287 2288 static ssize_t 2289 sync_min_show(mddev_t *mddev, char *page) 2290 { 2291 return sprintf(page, "%d (%s)\n", speed_min(mddev), 2292 mddev->sync_speed_min ? "local": "system"); 2293 } 2294 2295 static ssize_t 2296 sync_min_store(mddev_t *mddev, const char *buf, size_t len) 2297 { 2298 int min; 2299 char *e; 2300 if (strncmp(buf, "system", 6)==0) { 2301 mddev->sync_speed_min = 0; 2302 return len; 2303 } 2304 min = simple_strtoul(buf, &e, 10); 2305 if (buf == e || (*e && *e != '\n') || min <= 0) 2306 return -EINVAL; 2307 mddev->sync_speed_min = min; 2308 return len; 2309 } 2310 2311 static struct md_sysfs_entry md_sync_min = 2312 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store); 2313 2314 static ssize_t 2315 sync_max_show(mddev_t *mddev, char *page) 2316 { 2317 return sprintf(page, "%d (%s)\n", speed_max(mddev), 2318 mddev->sync_speed_max ? "local": "system"); 2319 } 2320 2321 static ssize_t 2322 sync_max_store(mddev_t *mddev, const char *buf, size_t len) 2323 { 2324 int max; 2325 char *e; 2326 if (strncmp(buf, "system", 6)==0) { 2327 mddev->sync_speed_max = 0; 2328 return len; 2329 } 2330 max = simple_strtoul(buf, &e, 10); 2331 if (buf == e || (*e && *e != '\n') || max <= 0) 2332 return -EINVAL; 2333 mddev->sync_speed_max = max; 2334 return len; 2335 } 2336 2337 static struct md_sysfs_entry md_sync_max = 2338 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store); 2339 2340 2341 static ssize_t 2342 sync_speed_show(mddev_t *mddev, char *page) 2343 { 2344 unsigned long resync, dt, db; 2345 resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active)); 2346 dt = ((jiffies - mddev->resync_mark) / HZ); 2347 if (!dt) dt++; 2348 db = resync - (mddev->resync_mark_cnt); 2349 return sprintf(page, "%ld\n", db/dt/2); /* K/sec */ 2350 } 2351 2352 static struct md_sysfs_entry 2353 md_sync_speed = __ATTR_RO(sync_speed); 2354 2355 static ssize_t 2356 sync_completed_show(mddev_t *mddev, char *page) 2357 { 2358 unsigned long max_blocks, resync; 2359 2360 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 2361 max_blocks = mddev->resync_max_sectors; 2362 else 2363 max_blocks = mddev->size << 1; 2364 2365 resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active)); 2366 return sprintf(page, "%lu / %lu\n", resync, max_blocks); 2367 } 2368 2369 static struct md_sysfs_entry 2370 md_sync_completed = __ATTR_RO(sync_completed); 2371 2372 static ssize_t 2373 suspend_lo_show(mddev_t *mddev, char *page) 2374 { 2375 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo); 2376 } 2377 2378 static ssize_t 2379 suspend_lo_store(mddev_t *mddev, const char *buf, size_t len) 2380 { 2381 char *e; 2382 unsigned long long new = simple_strtoull(buf, &e, 10); 2383 2384 if (mddev->pers->quiesce == NULL) 2385 return -EINVAL; 2386 if (buf == e || (*e && *e != '\n')) 2387 return -EINVAL; 2388 if (new >= mddev->suspend_hi || 2389 (new > mddev->suspend_lo && new < mddev->suspend_hi)) { 2390 mddev->suspend_lo = new; 2391 mddev->pers->quiesce(mddev, 2); 2392 return len; 2393 } else 2394 return -EINVAL; 2395 } 2396 static struct md_sysfs_entry md_suspend_lo = 2397 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store); 2398 2399 2400 static ssize_t 2401 suspend_hi_show(mddev_t *mddev, char *page) 2402 { 2403 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi); 2404 } 2405 2406 static ssize_t 2407 suspend_hi_store(mddev_t *mddev, const char *buf, size_t len) 2408 { 2409 char *e; 2410 unsigned long long new = simple_strtoull(buf, &e, 10); 2411 2412 if (mddev->pers->quiesce == NULL) 2413 return -EINVAL; 2414 if (buf == e || (*e && *e != '\n')) 2415 return -EINVAL; 2416 if ((new <= mddev->suspend_lo && mddev->suspend_lo >= mddev->suspend_hi) || 2417 (new > mddev->suspend_lo && new > mddev->suspend_hi)) { 2418 mddev->suspend_hi = new; 2419 mddev->pers->quiesce(mddev, 1); 2420 mddev->pers->quiesce(mddev, 0); 2421 return len; 2422 } else 2423 return -EINVAL; 2424 } 2425 static struct md_sysfs_entry md_suspend_hi = 2426 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store); 2427 2428 2429 static struct attribute *md_default_attrs[] = { 2430 &md_level.attr, 2431 &md_raid_disks.attr, 2432 &md_chunk_size.attr, 2433 &md_size.attr, 2434 &md_metadata.attr, 2435 &md_new_device.attr, 2436 NULL, 2437 }; 2438 2439 static struct attribute *md_redundancy_attrs[] = { 2440 &md_scan_mode.attr, 2441 &md_mismatches.attr, 2442 &md_sync_min.attr, 2443 &md_sync_max.attr, 2444 &md_sync_speed.attr, 2445 &md_sync_completed.attr, 2446 &md_suspend_lo.attr, 2447 &md_suspend_hi.attr, 2448 NULL, 2449 }; 2450 static struct attribute_group md_redundancy_group = { 2451 .name = NULL, 2452 .attrs = md_redundancy_attrs, 2453 }; 2454 2455 2456 static ssize_t 2457 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 2458 { 2459 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); 2460 mddev_t *mddev = container_of(kobj, struct mddev_s, kobj); 2461 ssize_t rv; 2462 2463 if (!entry->show) 2464 return -EIO; 2465 rv = mddev_lock(mddev); 2466 if (!rv) { 2467 rv = entry->show(mddev, page); 2468 mddev_unlock(mddev); 2469 } 2470 return rv; 2471 } 2472 2473 static ssize_t 2474 md_attr_store(struct kobject *kobj, struct attribute *attr, 2475 const char *page, size_t length) 2476 { 2477 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); 2478 mddev_t *mddev = container_of(kobj, struct mddev_s, kobj); 2479 ssize_t rv; 2480 2481 if (!entry->store) 2482 return -EIO; 2483 rv = mddev_lock(mddev); 2484 if (!rv) { 2485 rv = entry->store(mddev, page, length); 2486 mddev_unlock(mddev); 2487 } 2488 return rv; 2489 } 2490 2491 static void md_free(struct kobject *ko) 2492 { 2493 mddev_t *mddev = container_of(ko, mddev_t, kobj); 2494 kfree(mddev); 2495 } 2496 2497 static struct sysfs_ops md_sysfs_ops = { 2498 .show = md_attr_show, 2499 .store = md_attr_store, 2500 }; 2501 static struct kobj_type md_ktype = { 2502 .release = md_free, 2503 .sysfs_ops = &md_sysfs_ops, 2504 .default_attrs = md_default_attrs, 2505 }; 2506 2507 int mdp_major = 0; 2508 2509 static struct kobject *md_probe(dev_t dev, int *part, void *data) 2510 { 2511 static DEFINE_MUTEX(disks_mutex); 2512 mddev_t *mddev = mddev_find(dev); 2513 struct gendisk *disk; 2514 int partitioned = (MAJOR(dev) != MD_MAJOR); 2515 int shift = partitioned ? MdpMinorShift : 0; 2516 int unit = MINOR(dev) >> shift; 2517 2518 if (!mddev) 2519 return NULL; 2520 2521 mutex_lock(&disks_mutex); 2522 if (mddev->gendisk) { 2523 mutex_unlock(&disks_mutex); 2524 mddev_put(mddev); 2525 return NULL; 2526 } 2527 disk = alloc_disk(1 << shift); 2528 if (!disk) { 2529 mutex_unlock(&disks_mutex); 2530 mddev_put(mddev); 2531 return NULL; 2532 } 2533 disk->major = MAJOR(dev); 2534 disk->first_minor = unit << shift; 2535 if (partitioned) { 2536 sprintf(disk->disk_name, "md_d%d", unit); 2537 sprintf(disk->devfs_name, "md/d%d", unit); 2538 } else { 2539 sprintf(disk->disk_name, "md%d", unit); 2540 sprintf(disk->devfs_name, "md/%d", unit); 2541 } 2542 disk->fops = &md_fops; 2543 disk->private_data = mddev; 2544 disk->queue = mddev->queue; 2545 add_disk(disk); 2546 mddev->gendisk = disk; 2547 mutex_unlock(&disks_mutex); 2548 mddev->kobj.parent = &disk->kobj; 2549 mddev->kobj.k_name = NULL; 2550 snprintf(mddev->kobj.name, KOBJ_NAME_LEN, "%s", "md"); 2551 mddev->kobj.ktype = &md_ktype; 2552 kobject_register(&mddev->kobj); 2553 return NULL; 2554 } 2555 2556 void md_wakeup_thread(mdk_thread_t *thread); 2557 2558 static void md_safemode_timeout(unsigned long data) 2559 { 2560 mddev_t *mddev = (mddev_t *) data; 2561 2562 mddev->safemode = 1; 2563 md_wakeup_thread(mddev->thread); 2564 } 2565 2566 static int start_dirty_degraded; 2567 2568 static int do_md_run(mddev_t * mddev) 2569 { 2570 int err; 2571 int chunk_size; 2572 struct list_head *tmp; 2573 mdk_rdev_t *rdev; 2574 struct gendisk *disk; 2575 struct mdk_personality *pers; 2576 char b[BDEVNAME_SIZE]; 2577 2578 if (list_empty(&mddev->disks)) 2579 /* cannot run an array with no devices.. */ 2580 return -EINVAL; 2581 2582 if (mddev->pers) 2583 return -EBUSY; 2584 2585 /* 2586 * Analyze all RAID superblock(s) 2587 */ 2588 if (!mddev->raid_disks) 2589 analyze_sbs(mddev); 2590 2591 chunk_size = mddev->chunk_size; 2592 2593 if (chunk_size) { 2594 if (chunk_size > MAX_CHUNK_SIZE) { 2595 printk(KERN_ERR "too big chunk_size: %d > %d\n", 2596 chunk_size, MAX_CHUNK_SIZE); 2597 return -EINVAL; 2598 } 2599 /* 2600 * chunk-size has to be a power of 2 and multiples of PAGE_SIZE 2601 */ 2602 if ( (1 << ffz(~chunk_size)) != chunk_size) { 2603 printk(KERN_ERR "chunk_size of %d not valid\n", chunk_size); 2604 return -EINVAL; 2605 } 2606 if (chunk_size < PAGE_SIZE) { 2607 printk(KERN_ERR "too small chunk_size: %d < %ld\n", 2608 chunk_size, PAGE_SIZE); 2609 return -EINVAL; 2610 } 2611 2612 /* devices must have minimum size of one chunk */ 2613 ITERATE_RDEV(mddev,rdev,tmp) { 2614 if (test_bit(Faulty, &rdev->flags)) 2615 continue; 2616 if (rdev->size < chunk_size / 1024) { 2617 printk(KERN_WARNING 2618 "md: Dev %s smaller than chunk_size:" 2619 " %lluk < %dk\n", 2620 bdevname(rdev->bdev,b), 2621 (unsigned long long)rdev->size, 2622 chunk_size / 1024); 2623 return -EINVAL; 2624 } 2625 } 2626 } 2627 2628 #ifdef CONFIG_KMOD 2629 if (mddev->level != LEVEL_NONE) 2630 request_module("md-level-%d", mddev->level); 2631 else if (mddev->clevel[0]) 2632 request_module("md-%s", mddev->clevel); 2633 #endif 2634 2635 /* 2636 * Drop all container device buffers, from now on 2637 * the only valid external interface is through the md 2638 * device. 2639 * Also find largest hardsector size 2640 */ 2641 ITERATE_RDEV(mddev,rdev,tmp) { 2642 if (test_bit(Faulty, &rdev->flags)) 2643 continue; 2644 sync_blockdev(rdev->bdev); 2645 invalidate_bdev(rdev->bdev, 0); 2646 } 2647 2648 md_probe(mddev->unit, NULL, NULL); 2649 disk = mddev->gendisk; 2650 if (!disk) 2651 return -ENOMEM; 2652 2653 spin_lock(&pers_lock); 2654 pers = find_pers(mddev->level, mddev->clevel); 2655 if (!pers || !try_module_get(pers->owner)) { 2656 spin_unlock(&pers_lock); 2657 if (mddev->level != LEVEL_NONE) 2658 printk(KERN_WARNING "md: personality for level %d is not loaded!\n", 2659 mddev->level); 2660 else 2661 printk(KERN_WARNING "md: personality for level %s is not loaded!\n", 2662 mddev->clevel); 2663 return -EINVAL; 2664 } 2665 mddev->pers = pers; 2666 spin_unlock(&pers_lock); 2667 mddev->level = pers->level; 2668 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); 2669 2670 if (mddev->reshape_position != MaxSector && 2671 pers->start_reshape == NULL) { 2672 /* This personality cannot handle reshaping... */ 2673 mddev->pers = NULL; 2674 module_put(pers->owner); 2675 return -EINVAL; 2676 } 2677 2678 mddev->recovery = 0; 2679 mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */ 2680 mddev->barriers_work = 1; 2681 mddev->ok_start_degraded = start_dirty_degraded; 2682 2683 if (start_readonly) 2684 mddev->ro = 2; /* read-only, but switch on first write */ 2685 2686 err = mddev->pers->run(mddev); 2687 if (!err && mddev->pers->sync_request) { 2688 err = bitmap_create(mddev); 2689 if (err) { 2690 printk(KERN_ERR "%s: failed to create bitmap (%d)\n", 2691 mdname(mddev), err); 2692 mddev->pers->stop(mddev); 2693 } 2694 } 2695 if (err) { 2696 printk(KERN_ERR "md: pers->run() failed ...\n"); 2697 module_put(mddev->pers->owner); 2698 mddev->pers = NULL; 2699 bitmap_destroy(mddev); 2700 return err; 2701 } 2702 if (mddev->pers->sync_request) 2703 sysfs_create_group(&mddev->kobj, &md_redundancy_group); 2704 else if (mddev->ro == 2) /* auto-readonly not meaningful */ 2705 mddev->ro = 0; 2706 2707 atomic_set(&mddev->writes_pending,0); 2708 mddev->safemode = 0; 2709 mddev->safemode_timer.function = md_safemode_timeout; 2710 mddev->safemode_timer.data = (unsigned long) mddev; 2711 mddev->safemode_delay = (20 * HZ)/1000 +1; /* 20 msec delay */ 2712 mddev->in_sync = 1; 2713 2714 ITERATE_RDEV(mddev,rdev,tmp) 2715 if (rdev->raid_disk >= 0) { 2716 char nm[20]; 2717 sprintf(nm, "rd%d", rdev->raid_disk); 2718 sysfs_create_link(&mddev->kobj, &rdev->kobj, nm); 2719 } 2720 2721 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 2722 md_wakeup_thread(mddev->thread); 2723 2724 if (mddev->sb_dirty) 2725 md_update_sb(mddev); 2726 2727 set_capacity(disk, mddev->array_size<<1); 2728 2729 /* If we call blk_queue_make_request here, it will 2730 * re-initialise max_sectors etc which may have been 2731 * refined inside -> run. So just set the bits we need to set. 2732 * Most initialisation happended when we called 2733 * blk_queue_make_request(..., md_fail_request) 2734 * earlier. 2735 */ 2736 mddev->queue->queuedata = mddev; 2737 mddev->queue->make_request_fn = mddev->pers->make_request; 2738 2739 mddev->changed = 1; 2740 md_new_event(mddev); 2741 return 0; 2742 } 2743 2744 static int restart_array(mddev_t *mddev) 2745 { 2746 struct gendisk *disk = mddev->gendisk; 2747 int err; 2748 2749 /* 2750 * Complain if it has no devices 2751 */ 2752 err = -ENXIO; 2753 if (list_empty(&mddev->disks)) 2754 goto out; 2755 2756 if (mddev->pers) { 2757 err = -EBUSY; 2758 if (!mddev->ro) 2759 goto out; 2760 2761 mddev->safemode = 0; 2762 mddev->ro = 0; 2763 set_disk_ro(disk, 0); 2764 2765 printk(KERN_INFO "md: %s switched to read-write mode.\n", 2766 mdname(mddev)); 2767 /* 2768 * Kick recovery or resync if necessary 2769 */ 2770 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 2771 md_wakeup_thread(mddev->thread); 2772 err = 0; 2773 } else { 2774 printk(KERN_ERR "md: %s has no personality assigned.\n", 2775 mdname(mddev)); 2776 err = -EINVAL; 2777 } 2778 2779 out: 2780 return err; 2781 } 2782 2783 static int do_md_stop(mddev_t * mddev, int ro) 2784 { 2785 int err = 0; 2786 struct gendisk *disk = mddev->gendisk; 2787 2788 if (mddev->pers) { 2789 if (atomic_read(&mddev->active)>2) { 2790 printk("md: %s still in use.\n",mdname(mddev)); 2791 return -EBUSY; 2792 } 2793 2794 if (mddev->sync_thread) { 2795 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 2796 md_unregister_thread(mddev->sync_thread); 2797 mddev->sync_thread = NULL; 2798 } 2799 2800 del_timer_sync(&mddev->safemode_timer); 2801 2802 invalidate_partition(disk, 0); 2803 2804 if (ro) { 2805 err = -ENXIO; 2806 if (mddev->ro==1) 2807 goto out; 2808 mddev->ro = 1; 2809 } else { 2810 bitmap_flush(mddev); 2811 md_super_wait(mddev); 2812 if (mddev->ro) 2813 set_disk_ro(disk, 0); 2814 blk_queue_make_request(mddev->queue, md_fail_request); 2815 mddev->pers->stop(mddev); 2816 if (mddev->pers->sync_request) 2817 sysfs_remove_group(&mddev->kobj, &md_redundancy_group); 2818 2819 module_put(mddev->pers->owner); 2820 mddev->pers = NULL; 2821 if (mddev->ro) 2822 mddev->ro = 0; 2823 } 2824 if (!mddev->in_sync) { 2825 /* mark array as shutdown cleanly */ 2826 mddev->in_sync = 1; 2827 md_update_sb(mddev); 2828 } 2829 if (ro) 2830 set_disk_ro(disk, 1); 2831 } 2832 2833 /* 2834 * Free resources if final stop 2835 */ 2836 if (!ro) { 2837 mdk_rdev_t *rdev; 2838 struct list_head *tmp; 2839 struct gendisk *disk; 2840 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev)); 2841 2842 bitmap_destroy(mddev); 2843 if (mddev->bitmap_file) { 2844 atomic_set(&mddev->bitmap_file->f_dentry->d_inode->i_writecount, 1); 2845 fput(mddev->bitmap_file); 2846 mddev->bitmap_file = NULL; 2847 } 2848 mddev->bitmap_offset = 0; 2849 2850 ITERATE_RDEV(mddev,rdev,tmp) 2851 if (rdev->raid_disk >= 0) { 2852 char nm[20]; 2853 sprintf(nm, "rd%d", rdev->raid_disk); 2854 sysfs_remove_link(&mddev->kobj, nm); 2855 } 2856 2857 export_array(mddev); 2858 2859 mddev->array_size = 0; 2860 disk = mddev->gendisk; 2861 if (disk) 2862 set_capacity(disk, 0); 2863 mddev->changed = 1; 2864 } else 2865 printk(KERN_INFO "md: %s switched to read-only mode.\n", 2866 mdname(mddev)); 2867 err = 0; 2868 md_new_event(mddev); 2869 out: 2870 return err; 2871 } 2872 2873 static void autorun_array(mddev_t *mddev) 2874 { 2875 mdk_rdev_t *rdev; 2876 struct list_head *tmp; 2877 int err; 2878 2879 if (list_empty(&mddev->disks)) 2880 return; 2881 2882 printk(KERN_INFO "md: running: "); 2883 2884 ITERATE_RDEV(mddev,rdev,tmp) { 2885 char b[BDEVNAME_SIZE]; 2886 printk("<%s>", bdevname(rdev->bdev,b)); 2887 } 2888 printk("\n"); 2889 2890 err = do_md_run (mddev); 2891 if (err) { 2892 printk(KERN_WARNING "md: do_md_run() returned %d\n", err); 2893 do_md_stop (mddev, 0); 2894 } 2895 } 2896 2897 /* 2898 * lets try to run arrays based on all disks that have arrived 2899 * until now. (those are in pending_raid_disks) 2900 * 2901 * the method: pick the first pending disk, collect all disks with 2902 * the same UUID, remove all from the pending list and put them into 2903 * the 'same_array' list. Then order this list based on superblock 2904 * update time (freshest comes first), kick out 'old' disks and 2905 * compare superblocks. If everything's fine then run it. 2906 * 2907 * If "unit" is allocated, then bump its reference count 2908 */ 2909 static void autorun_devices(int part) 2910 { 2911 struct list_head *tmp; 2912 mdk_rdev_t *rdev0, *rdev; 2913 mddev_t *mddev; 2914 char b[BDEVNAME_SIZE]; 2915 2916 printk(KERN_INFO "md: autorun ...\n"); 2917 while (!list_empty(&pending_raid_disks)) { 2918 dev_t dev; 2919 LIST_HEAD(candidates); 2920 rdev0 = list_entry(pending_raid_disks.next, 2921 mdk_rdev_t, same_set); 2922 2923 printk(KERN_INFO "md: considering %s ...\n", 2924 bdevname(rdev0->bdev,b)); 2925 INIT_LIST_HEAD(&candidates); 2926 ITERATE_RDEV_PENDING(rdev,tmp) 2927 if (super_90_load(rdev, rdev0, 0) >= 0) { 2928 printk(KERN_INFO "md: adding %s ...\n", 2929 bdevname(rdev->bdev,b)); 2930 list_move(&rdev->same_set, &candidates); 2931 } 2932 /* 2933 * now we have a set of devices, with all of them having 2934 * mostly sane superblocks. It's time to allocate the 2935 * mddev. 2936 */ 2937 if (rdev0->preferred_minor < 0 || rdev0->preferred_minor >= MAX_MD_DEVS) { 2938 printk(KERN_INFO "md: unit number in %s is bad: %d\n", 2939 bdevname(rdev0->bdev, b), rdev0->preferred_minor); 2940 break; 2941 } 2942 if (part) 2943 dev = MKDEV(mdp_major, 2944 rdev0->preferred_minor << MdpMinorShift); 2945 else 2946 dev = MKDEV(MD_MAJOR, rdev0->preferred_minor); 2947 2948 md_probe(dev, NULL, NULL); 2949 mddev = mddev_find(dev); 2950 if (!mddev) { 2951 printk(KERN_ERR 2952 "md: cannot allocate memory for md drive.\n"); 2953 break; 2954 } 2955 if (mddev_lock(mddev)) 2956 printk(KERN_WARNING "md: %s locked, cannot run\n", 2957 mdname(mddev)); 2958 else if (mddev->raid_disks || mddev->major_version 2959 || !list_empty(&mddev->disks)) { 2960 printk(KERN_WARNING 2961 "md: %s already running, cannot run %s\n", 2962 mdname(mddev), bdevname(rdev0->bdev,b)); 2963 mddev_unlock(mddev); 2964 } else { 2965 printk(KERN_INFO "md: created %s\n", mdname(mddev)); 2966 ITERATE_RDEV_GENERIC(candidates,rdev,tmp) { 2967 list_del_init(&rdev->same_set); 2968 if (bind_rdev_to_array(rdev, mddev)) 2969 export_rdev(rdev); 2970 } 2971 autorun_array(mddev); 2972 mddev_unlock(mddev); 2973 } 2974 /* on success, candidates will be empty, on error 2975 * it won't... 2976 */ 2977 ITERATE_RDEV_GENERIC(candidates,rdev,tmp) 2978 export_rdev(rdev); 2979 mddev_put(mddev); 2980 } 2981 printk(KERN_INFO "md: ... autorun DONE.\n"); 2982 } 2983 2984 /* 2985 * import RAID devices based on one partition 2986 * if possible, the array gets run as well. 2987 */ 2988 2989 static int autostart_array(dev_t startdev) 2990 { 2991 char b[BDEVNAME_SIZE]; 2992 int err = -EINVAL, i; 2993 mdp_super_t *sb = NULL; 2994 mdk_rdev_t *start_rdev = NULL, *rdev; 2995 2996 start_rdev = md_import_device(startdev, 0, 0); 2997 if (IS_ERR(start_rdev)) 2998 return err; 2999 3000 3001 /* NOTE: this can only work for 0.90.0 superblocks */ 3002 sb = (mdp_super_t*)page_address(start_rdev->sb_page); 3003 if (sb->major_version != 0 || 3004 sb->minor_version != 90 ) { 3005 printk(KERN_WARNING "md: can only autostart 0.90.0 arrays\n"); 3006 export_rdev(start_rdev); 3007 return err; 3008 } 3009 3010 if (test_bit(Faulty, &start_rdev->flags)) { 3011 printk(KERN_WARNING 3012 "md: can not autostart based on faulty %s!\n", 3013 bdevname(start_rdev->bdev,b)); 3014 export_rdev(start_rdev); 3015 return err; 3016 } 3017 list_add(&start_rdev->same_set, &pending_raid_disks); 3018 3019 for (i = 0; i < MD_SB_DISKS; i++) { 3020 mdp_disk_t *desc = sb->disks + i; 3021 dev_t dev = MKDEV(desc->major, desc->minor); 3022 3023 if (!dev) 3024 continue; 3025 if (dev == startdev) 3026 continue; 3027 if (MAJOR(dev) != desc->major || MINOR(dev) != desc->minor) 3028 continue; 3029 rdev = md_import_device(dev, 0, 0); 3030 if (IS_ERR(rdev)) 3031 continue; 3032 3033 list_add(&rdev->same_set, &pending_raid_disks); 3034 } 3035 3036 /* 3037 * possibly return codes 3038 */ 3039 autorun_devices(0); 3040 return 0; 3041 3042 } 3043 3044 3045 static int get_version(void __user * arg) 3046 { 3047 mdu_version_t ver; 3048 3049 ver.major = MD_MAJOR_VERSION; 3050 ver.minor = MD_MINOR_VERSION; 3051 ver.patchlevel = MD_PATCHLEVEL_VERSION; 3052 3053 if (copy_to_user(arg, &ver, sizeof(ver))) 3054 return -EFAULT; 3055 3056 return 0; 3057 } 3058 3059 static int get_array_info(mddev_t * mddev, void __user * arg) 3060 { 3061 mdu_array_info_t info; 3062 int nr,working,active,failed,spare; 3063 mdk_rdev_t *rdev; 3064 struct list_head *tmp; 3065 3066 nr=working=active=failed=spare=0; 3067 ITERATE_RDEV(mddev,rdev,tmp) { 3068 nr++; 3069 if (test_bit(Faulty, &rdev->flags)) 3070 failed++; 3071 else { 3072 working++; 3073 if (test_bit(In_sync, &rdev->flags)) 3074 active++; 3075 else 3076 spare++; 3077 } 3078 } 3079 3080 info.major_version = mddev->major_version; 3081 info.minor_version = mddev->minor_version; 3082 info.patch_version = MD_PATCHLEVEL_VERSION; 3083 info.ctime = mddev->ctime; 3084 info.level = mddev->level; 3085 info.size = mddev->size; 3086 if (info.size != mddev->size) /* overflow */ 3087 info.size = -1; 3088 info.nr_disks = nr; 3089 info.raid_disks = mddev->raid_disks; 3090 info.md_minor = mddev->md_minor; 3091 info.not_persistent= !mddev->persistent; 3092 3093 info.utime = mddev->utime; 3094 info.state = 0; 3095 if (mddev->in_sync) 3096 info.state = (1<<MD_SB_CLEAN); 3097 if (mddev->bitmap && mddev->bitmap_offset) 3098 info.state = (1<<MD_SB_BITMAP_PRESENT); 3099 info.active_disks = active; 3100 info.working_disks = working; 3101 info.failed_disks = failed; 3102 info.spare_disks = spare; 3103 3104 info.layout = mddev->layout; 3105 info.chunk_size = mddev->chunk_size; 3106 3107 if (copy_to_user(arg, &info, sizeof(info))) 3108 return -EFAULT; 3109 3110 return 0; 3111 } 3112 3113 static int get_bitmap_file(mddev_t * mddev, void __user * arg) 3114 { 3115 mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */ 3116 char *ptr, *buf = NULL; 3117 int err = -ENOMEM; 3118 3119 file = kmalloc(sizeof(*file), GFP_KERNEL); 3120 if (!file) 3121 goto out; 3122 3123 /* bitmap disabled, zero the first byte and copy out */ 3124 if (!mddev->bitmap || !mddev->bitmap->file) { 3125 file->pathname[0] = '\0'; 3126 goto copy_out; 3127 } 3128 3129 buf = kmalloc(sizeof(file->pathname), GFP_KERNEL); 3130 if (!buf) 3131 goto out; 3132 3133 ptr = file_path(mddev->bitmap->file, buf, sizeof(file->pathname)); 3134 if (!ptr) 3135 goto out; 3136 3137 strcpy(file->pathname, ptr); 3138 3139 copy_out: 3140 err = 0; 3141 if (copy_to_user(arg, file, sizeof(*file))) 3142 err = -EFAULT; 3143 out: 3144 kfree(buf); 3145 kfree(file); 3146 return err; 3147 } 3148 3149 static int get_disk_info(mddev_t * mddev, void __user * arg) 3150 { 3151 mdu_disk_info_t info; 3152 unsigned int nr; 3153 mdk_rdev_t *rdev; 3154 3155 if (copy_from_user(&info, arg, sizeof(info))) 3156 return -EFAULT; 3157 3158 nr = info.number; 3159 3160 rdev = find_rdev_nr(mddev, nr); 3161 if (rdev) { 3162 info.major = MAJOR(rdev->bdev->bd_dev); 3163 info.minor = MINOR(rdev->bdev->bd_dev); 3164 info.raid_disk = rdev->raid_disk; 3165 info.state = 0; 3166 if (test_bit(Faulty, &rdev->flags)) 3167 info.state |= (1<<MD_DISK_FAULTY); 3168 else if (test_bit(In_sync, &rdev->flags)) { 3169 info.state |= (1<<MD_DISK_ACTIVE); 3170 info.state |= (1<<MD_DISK_SYNC); 3171 } 3172 if (test_bit(WriteMostly, &rdev->flags)) 3173 info.state |= (1<<MD_DISK_WRITEMOSTLY); 3174 } else { 3175 info.major = info.minor = 0; 3176 info.raid_disk = -1; 3177 info.state = (1<<MD_DISK_REMOVED); 3178 } 3179 3180 if (copy_to_user(arg, &info, sizeof(info))) 3181 return -EFAULT; 3182 3183 return 0; 3184 } 3185 3186 static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info) 3187 { 3188 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 3189 mdk_rdev_t *rdev; 3190 dev_t dev = MKDEV(info->major,info->minor); 3191 3192 if (info->major != MAJOR(dev) || info->minor != MINOR(dev)) 3193 return -EOVERFLOW; 3194 3195 if (!mddev->raid_disks) { 3196 int err; 3197 /* expecting a device which has a superblock */ 3198 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version); 3199 if (IS_ERR(rdev)) { 3200 printk(KERN_WARNING 3201 "md: md_import_device returned %ld\n", 3202 PTR_ERR(rdev)); 3203 return PTR_ERR(rdev); 3204 } 3205 if (!list_empty(&mddev->disks)) { 3206 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next, 3207 mdk_rdev_t, same_set); 3208 int err = super_types[mddev->major_version] 3209 .load_super(rdev, rdev0, mddev->minor_version); 3210 if (err < 0) { 3211 printk(KERN_WARNING 3212 "md: %s has different UUID to %s\n", 3213 bdevname(rdev->bdev,b), 3214 bdevname(rdev0->bdev,b2)); 3215 export_rdev(rdev); 3216 return -EINVAL; 3217 } 3218 } 3219 err = bind_rdev_to_array(rdev, mddev); 3220 if (err) 3221 export_rdev(rdev); 3222 return err; 3223 } 3224 3225 /* 3226 * add_new_disk can be used once the array is assembled 3227 * to add "hot spares". They must already have a superblock 3228 * written 3229 */ 3230 if (mddev->pers) { 3231 int err; 3232 if (!mddev->pers->hot_add_disk) { 3233 printk(KERN_WARNING 3234 "%s: personality does not support diskops!\n", 3235 mdname(mddev)); 3236 return -EINVAL; 3237 } 3238 if (mddev->persistent) 3239 rdev = md_import_device(dev, mddev->major_version, 3240 mddev->minor_version); 3241 else 3242 rdev = md_import_device(dev, -1, -1); 3243 if (IS_ERR(rdev)) { 3244 printk(KERN_WARNING 3245 "md: md_import_device returned %ld\n", 3246 PTR_ERR(rdev)); 3247 return PTR_ERR(rdev); 3248 } 3249 /* set save_raid_disk if appropriate */ 3250 if (!mddev->persistent) { 3251 if (info->state & (1<<MD_DISK_SYNC) && 3252 info->raid_disk < mddev->raid_disks) 3253 rdev->raid_disk = info->raid_disk; 3254 else 3255 rdev->raid_disk = -1; 3256 } else 3257 super_types[mddev->major_version]. 3258 validate_super(mddev, rdev); 3259 rdev->saved_raid_disk = rdev->raid_disk; 3260 3261 clear_bit(In_sync, &rdev->flags); /* just to be sure */ 3262 if (info->state & (1<<MD_DISK_WRITEMOSTLY)) 3263 set_bit(WriteMostly, &rdev->flags); 3264 3265 rdev->raid_disk = -1; 3266 err = bind_rdev_to_array(rdev, mddev); 3267 if (err) 3268 export_rdev(rdev); 3269 3270 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3271 md_wakeup_thread(mddev->thread); 3272 return err; 3273 } 3274 3275 /* otherwise, add_new_disk is only allowed 3276 * for major_version==0 superblocks 3277 */ 3278 if (mddev->major_version != 0) { 3279 printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n", 3280 mdname(mddev)); 3281 return -EINVAL; 3282 } 3283 3284 if (!(info->state & (1<<MD_DISK_FAULTY))) { 3285 int err; 3286 rdev = md_import_device (dev, -1, 0); 3287 if (IS_ERR(rdev)) { 3288 printk(KERN_WARNING 3289 "md: error, md_import_device() returned %ld\n", 3290 PTR_ERR(rdev)); 3291 return PTR_ERR(rdev); 3292 } 3293 rdev->desc_nr = info->number; 3294 if (info->raid_disk < mddev->raid_disks) 3295 rdev->raid_disk = info->raid_disk; 3296 else 3297 rdev->raid_disk = -1; 3298 3299 rdev->flags = 0; 3300 3301 if (rdev->raid_disk < mddev->raid_disks) 3302 if (info->state & (1<<MD_DISK_SYNC)) 3303 set_bit(In_sync, &rdev->flags); 3304 3305 if (info->state & (1<<MD_DISK_WRITEMOSTLY)) 3306 set_bit(WriteMostly, &rdev->flags); 3307 3308 if (!mddev->persistent) { 3309 printk(KERN_INFO "md: nonpersistent superblock ...\n"); 3310 rdev->sb_offset = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; 3311 } else 3312 rdev->sb_offset = calc_dev_sboffset(rdev->bdev); 3313 rdev->size = calc_dev_size(rdev, mddev->chunk_size); 3314 3315 err = bind_rdev_to_array(rdev, mddev); 3316 if (err) { 3317 export_rdev(rdev); 3318 return err; 3319 } 3320 } 3321 3322 return 0; 3323 } 3324 3325 static int hot_remove_disk(mddev_t * mddev, dev_t dev) 3326 { 3327 char b[BDEVNAME_SIZE]; 3328 mdk_rdev_t *rdev; 3329 3330 if (!mddev->pers) 3331 return -ENODEV; 3332 3333 rdev = find_rdev(mddev, dev); 3334 if (!rdev) 3335 return -ENXIO; 3336 3337 if (rdev->raid_disk >= 0) 3338 goto busy; 3339 3340 kick_rdev_from_array(rdev); 3341 md_update_sb(mddev); 3342 md_new_event(mddev); 3343 3344 return 0; 3345 busy: 3346 printk(KERN_WARNING "md: cannot remove active disk %s from %s ... \n", 3347 bdevname(rdev->bdev,b), mdname(mddev)); 3348 return -EBUSY; 3349 } 3350 3351 static int hot_add_disk(mddev_t * mddev, dev_t dev) 3352 { 3353 char b[BDEVNAME_SIZE]; 3354 int err; 3355 unsigned int size; 3356 mdk_rdev_t *rdev; 3357 3358 if (!mddev->pers) 3359 return -ENODEV; 3360 3361 if (mddev->major_version != 0) { 3362 printk(KERN_WARNING "%s: HOT_ADD may only be used with" 3363 " version-0 superblocks.\n", 3364 mdname(mddev)); 3365 return -EINVAL; 3366 } 3367 if (!mddev->pers->hot_add_disk) { 3368 printk(KERN_WARNING 3369 "%s: personality does not support diskops!\n", 3370 mdname(mddev)); 3371 return -EINVAL; 3372 } 3373 3374 rdev = md_import_device (dev, -1, 0); 3375 if (IS_ERR(rdev)) { 3376 printk(KERN_WARNING 3377 "md: error, md_import_device() returned %ld\n", 3378 PTR_ERR(rdev)); 3379 return -EINVAL; 3380 } 3381 3382 if (mddev->persistent) 3383 rdev->sb_offset = calc_dev_sboffset(rdev->bdev); 3384 else 3385 rdev->sb_offset = 3386 rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; 3387 3388 size = calc_dev_size(rdev, mddev->chunk_size); 3389 rdev->size = size; 3390 3391 if (test_bit(Faulty, &rdev->flags)) { 3392 printk(KERN_WARNING 3393 "md: can not hot-add faulty %s disk to %s!\n", 3394 bdevname(rdev->bdev,b), mdname(mddev)); 3395 err = -EINVAL; 3396 goto abort_export; 3397 } 3398 clear_bit(In_sync, &rdev->flags); 3399 rdev->desc_nr = -1; 3400 err = bind_rdev_to_array(rdev, mddev); 3401 if (err) 3402 goto abort_export; 3403 3404 /* 3405 * The rest should better be atomic, we can have disk failures 3406 * noticed in interrupt contexts ... 3407 */ 3408 3409 if (rdev->desc_nr == mddev->max_disks) { 3410 printk(KERN_WARNING "%s: can not hot-add to full array!\n", 3411 mdname(mddev)); 3412 err = -EBUSY; 3413 goto abort_unbind_export; 3414 } 3415 3416 rdev->raid_disk = -1; 3417 3418 md_update_sb(mddev); 3419 3420 /* 3421 * Kick recovery, maybe this spare has to be added to the 3422 * array immediately. 3423 */ 3424 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3425 md_wakeup_thread(mddev->thread); 3426 md_new_event(mddev); 3427 return 0; 3428 3429 abort_unbind_export: 3430 unbind_rdev_from_array(rdev); 3431 3432 abort_export: 3433 export_rdev(rdev); 3434 return err; 3435 } 3436 3437 /* similar to deny_write_access, but accounts for our holding a reference 3438 * to the file ourselves */ 3439 static int deny_bitmap_write_access(struct file * file) 3440 { 3441 struct inode *inode = file->f_mapping->host; 3442 3443 spin_lock(&inode->i_lock); 3444 if (atomic_read(&inode->i_writecount) > 1) { 3445 spin_unlock(&inode->i_lock); 3446 return -ETXTBSY; 3447 } 3448 atomic_set(&inode->i_writecount, -1); 3449 spin_unlock(&inode->i_lock); 3450 3451 return 0; 3452 } 3453 3454 static int set_bitmap_file(mddev_t *mddev, int fd) 3455 { 3456 int err; 3457 3458 if (mddev->pers) { 3459 if (!mddev->pers->quiesce) 3460 return -EBUSY; 3461 if (mddev->recovery || mddev->sync_thread) 3462 return -EBUSY; 3463 /* we should be able to change the bitmap.. */ 3464 } 3465 3466 3467 if (fd >= 0) { 3468 if (mddev->bitmap) 3469 return -EEXIST; /* cannot add when bitmap is present */ 3470 mddev->bitmap_file = fget(fd); 3471 3472 if (mddev->bitmap_file == NULL) { 3473 printk(KERN_ERR "%s: error: failed to get bitmap file\n", 3474 mdname(mddev)); 3475 return -EBADF; 3476 } 3477 3478 err = deny_bitmap_write_access(mddev->bitmap_file); 3479 if (err) { 3480 printk(KERN_ERR "%s: error: bitmap file is already in use\n", 3481 mdname(mddev)); 3482 fput(mddev->bitmap_file); 3483 mddev->bitmap_file = NULL; 3484 return err; 3485 } 3486 mddev->bitmap_offset = 0; /* file overrides offset */ 3487 } else if (mddev->bitmap == NULL) 3488 return -ENOENT; /* cannot remove what isn't there */ 3489 err = 0; 3490 if (mddev->pers) { 3491 mddev->pers->quiesce(mddev, 1); 3492 if (fd >= 0) 3493 err = bitmap_create(mddev); 3494 if (fd < 0 || err) 3495 bitmap_destroy(mddev); 3496 mddev->pers->quiesce(mddev, 0); 3497 } else if (fd < 0) { 3498 if (mddev->bitmap_file) 3499 fput(mddev->bitmap_file); 3500 mddev->bitmap_file = NULL; 3501 } 3502 3503 return err; 3504 } 3505 3506 /* 3507 * set_array_info is used two different ways 3508 * The original usage is when creating a new array. 3509 * In this usage, raid_disks is > 0 and it together with 3510 * level, size, not_persistent,layout,chunksize determine the 3511 * shape of the array. 3512 * This will always create an array with a type-0.90.0 superblock. 3513 * The newer usage is when assembling an array. 3514 * In this case raid_disks will be 0, and the major_version field is 3515 * use to determine which style super-blocks are to be found on the devices. 3516 * The minor and patch _version numbers are also kept incase the 3517 * super_block handler wishes to interpret them. 3518 */ 3519 static int set_array_info(mddev_t * mddev, mdu_array_info_t *info) 3520 { 3521 3522 if (info->raid_disks == 0) { 3523 /* just setting version number for superblock loading */ 3524 if (info->major_version < 0 || 3525 info->major_version >= sizeof(super_types)/sizeof(super_types[0]) || 3526 super_types[info->major_version].name == NULL) { 3527 /* maybe try to auto-load a module? */ 3528 printk(KERN_INFO 3529 "md: superblock version %d not known\n", 3530 info->major_version); 3531 return -EINVAL; 3532 } 3533 mddev->major_version = info->major_version; 3534 mddev->minor_version = info->minor_version; 3535 mddev->patch_version = info->patch_version; 3536 return 0; 3537 } 3538 mddev->major_version = MD_MAJOR_VERSION; 3539 mddev->minor_version = MD_MINOR_VERSION; 3540 mddev->patch_version = MD_PATCHLEVEL_VERSION; 3541 mddev->ctime = get_seconds(); 3542 3543 mddev->level = info->level; 3544 mddev->clevel[0] = 0; 3545 mddev->size = info->size; 3546 mddev->raid_disks = info->raid_disks; 3547 /* don't set md_minor, it is determined by which /dev/md* was 3548 * openned 3549 */ 3550 if (info->state & (1<<MD_SB_CLEAN)) 3551 mddev->recovery_cp = MaxSector; 3552 else 3553 mddev->recovery_cp = 0; 3554 mddev->persistent = ! info->not_persistent; 3555 3556 mddev->layout = info->layout; 3557 mddev->chunk_size = info->chunk_size; 3558 3559 mddev->max_disks = MD_SB_DISKS; 3560 3561 mddev->sb_dirty = 1; 3562 3563 mddev->default_bitmap_offset = MD_SB_BYTES >> 9; 3564 mddev->bitmap_offset = 0; 3565 3566 mddev->reshape_position = MaxSector; 3567 3568 /* 3569 * Generate a 128 bit UUID 3570 */ 3571 get_random_bytes(mddev->uuid, 16); 3572 3573 mddev->new_level = mddev->level; 3574 mddev->new_chunk = mddev->chunk_size; 3575 mddev->new_layout = mddev->layout; 3576 mddev->delta_disks = 0; 3577 3578 return 0; 3579 } 3580 3581 static int update_size(mddev_t *mddev, unsigned long size) 3582 { 3583 mdk_rdev_t * rdev; 3584 int rv; 3585 struct list_head *tmp; 3586 int fit = (size == 0); 3587 3588 if (mddev->pers->resize == NULL) 3589 return -EINVAL; 3590 /* The "size" is the amount of each device that is used. 3591 * This can only make sense for arrays with redundancy. 3592 * linear and raid0 always use whatever space is available 3593 * We can only consider changing the size if no resync 3594 * or reconstruction is happening, and if the new size 3595 * is acceptable. It must fit before the sb_offset or, 3596 * if that is <data_offset, it must fit before the 3597 * size of each device. 3598 * If size is zero, we find the largest size that fits. 3599 */ 3600 if (mddev->sync_thread) 3601 return -EBUSY; 3602 ITERATE_RDEV(mddev,rdev,tmp) { 3603 sector_t avail; 3604 if (rdev->sb_offset > rdev->data_offset) 3605 avail = (rdev->sb_offset*2) - rdev->data_offset; 3606 else 3607 avail = get_capacity(rdev->bdev->bd_disk) 3608 - rdev->data_offset; 3609 if (fit && (size == 0 || size > avail/2)) 3610 size = avail/2; 3611 if (avail < ((sector_t)size << 1)) 3612 return -ENOSPC; 3613 } 3614 rv = mddev->pers->resize(mddev, (sector_t)size *2); 3615 if (!rv) { 3616 struct block_device *bdev; 3617 3618 bdev = bdget_disk(mddev->gendisk, 0); 3619 if (bdev) { 3620 mutex_lock(&bdev->bd_inode->i_mutex); 3621 i_size_write(bdev->bd_inode, (loff_t)mddev->array_size << 10); 3622 mutex_unlock(&bdev->bd_inode->i_mutex); 3623 bdput(bdev); 3624 } 3625 } 3626 return rv; 3627 } 3628 3629 static int update_raid_disks(mddev_t *mddev, int raid_disks) 3630 { 3631 int rv; 3632 /* change the number of raid disks */ 3633 if (mddev->pers->check_reshape == NULL) 3634 return -EINVAL; 3635 if (raid_disks <= 0 || 3636 raid_disks >= mddev->max_disks) 3637 return -EINVAL; 3638 if (mddev->sync_thread || mddev->reshape_position != MaxSector) 3639 return -EBUSY; 3640 mddev->delta_disks = raid_disks - mddev->raid_disks; 3641 3642 rv = mddev->pers->check_reshape(mddev); 3643 return rv; 3644 } 3645 3646 3647 /* 3648 * update_array_info is used to change the configuration of an 3649 * on-line array. 3650 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size 3651 * fields in the info are checked against the array. 3652 * Any differences that cannot be handled will cause an error. 3653 * Normally, only one change can be managed at a time. 3654 */ 3655 static int update_array_info(mddev_t *mddev, mdu_array_info_t *info) 3656 { 3657 int rv = 0; 3658 int cnt = 0; 3659 int state = 0; 3660 3661 /* calculate expected state,ignoring low bits */ 3662 if (mddev->bitmap && mddev->bitmap_offset) 3663 state |= (1 << MD_SB_BITMAP_PRESENT); 3664 3665 if (mddev->major_version != info->major_version || 3666 mddev->minor_version != info->minor_version || 3667 /* mddev->patch_version != info->patch_version || */ 3668 mddev->ctime != info->ctime || 3669 mddev->level != info->level || 3670 /* mddev->layout != info->layout || */ 3671 !mddev->persistent != info->not_persistent|| 3672 mddev->chunk_size != info->chunk_size || 3673 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */ 3674 ((state^info->state) & 0xfffffe00) 3675 ) 3676 return -EINVAL; 3677 /* Check there is only one change */ 3678 if (info->size >= 0 && mddev->size != info->size) cnt++; 3679 if (mddev->raid_disks != info->raid_disks) cnt++; 3680 if (mddev->layout != info->layout) cnt++; 3681 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) cnt++; 3682 if (cnt == 0) return 0; 3683 if (cnt > 1) return -EINVAL; 3684 3685 if (mddev->layout != info->layout) { 3686 /* Change layout 3687 * we don't need to do anything at the md level, the 3688 * personality will take care of it all. 3689 */ 3690 if (mddev->pers->reconfig == NULL) 3691 return -EINVAL; 3692 else 3693 return mddev->pers->reconfig(mddev, info->layout, -1); 3694 } 3695 if (info->size >= 0 && mddev->size != info->size) 3696 rv = update_size(mddev, info->size); 3697 3698 if (mddev->raid_disks != info->raid_disks) 3699 rv = update_raid_disks(mddev, info->raid_disks); 3700 3701 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) { 3702 if (mddev->pers->quiesce == NULL) 3703 return -EINVAL; 3704 if (mddev->recovery || mddev->sync_thread) 3705 return -EBUSY; 3706 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) { 3707 /* add the bitmap */ 3708 if (mddev->bitmap) 3709 return -EEXIST; 3710 if (mddev->default_bitmap_offset == 0) 3711 return -EINVAL; 3712 mddev->bitmap_offset = mddev->default_bitmap_offset; 3713 mddev->pers->quiesce(mddev, 1); 3714 rv = bitmap_create(mddev); 3715 if (rv) 3716 bitmap_destroy(mddev); 3717 mddev->pers->quiesce(mddev, 0); 3718 } else { 3719 /* remove the bitmap */ 3720 if (!mddev->bitmap) 3721 return -ENOENT; 3722 if (mddev->bitmap->file) 3723 return -EINVAL; 3724 mddev->pers->quiesce(mddev, 1); 3725 bitmap_destroy(mddev); 3726 mddev->pers->quiesce(mddev, 0); 3727 mddev->bitmap_offset = 0; 3728 } 3729 } 3730 md_update_sb(mddev); 3731 return rv; 3732 } 3733 3734 static int set_disk_faulty(mddev_t *mddev, dev_t dev) 3735 { 3736 mdk_rdev_t *rdev; 3737 3738 if (mddev->pers == NULL) 3739 return -ENODEV; 3740 3741 rdev = find_rdev(mddev, dev); 3742 if (!rdev) 3743 return -ENODEV; 3744 3745 md_error(mddev, rdev); 3746 return 0; 3747 } 3748 3749 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo) 3750 { 3751 mddev_t *mddev = bdev->bd_disk->private_data; 3752 3753 geo->heads = 2; 3754 geo->sectors = 4; 3755 geo->cylinders = get_capacity(mddev->gendisk) / 8; 3756 return 0; 3757 } 3758 3759 static int md_ioctl(struct inode *inode, struct file *file, 3760 unsigned int cmd, unsigned long arg) 3761 { 3762 int err = 0; 3763 void __user *argp = (void __user *)arg; 3764 mddev_t *mddev = NULL; 3765 3766 if (!capable(CAP_SYS_ADMIN)) 3767 return -EACCES; 3768 3769 /* 3770 * Commands dealing with the RAID driver but not any 3771 * particular array: 3772 */ 3773 switch (cmd) 3774 { 3775 case RAID_VERSION: 3776 err = get_version(argp); 3777 goto done; 3778 3779 case PRINT_RAID_DEBUG: 3780 err = 0; 3781 md_print_devices(); 3782 goto done; 3783 3784 #ifndef MODULE 3785 case RAID_AUTORUN: 3786 err = 0; 3787 autostart_arrays(arg); 3788 goto done; 3789 #endif 3790 default:; 3791 } 3792 3793 /* 3794 * Commands creating/starting a new array: 3795 */ 3796 3797 mddev = inode->i_bdev->bd_disk->private_data; 3798 3799 if (!mddev) { 3800 BUG(); 3801 goto abort; 3802 } 3803 3804 3805 if (cmd == START_ARRAY) { 3806 /* START_ARRAY doesn't need to lock the array as autostart_array 3807 * does the locking, and it could even be a different array 3808 */ 3809 static int cnt = 3; 3810 if (cnt > 0 ) { 3811 printk(KERN_WARNING 3812 "md: %s(pid %d) used deprecated START_ARRAY ioctl. " 3813 "This will not be supported beyond July 2006\n", 3814 current->comm, current->pid); 3815 cnt--; 3816 } 3817 err = autostart_array(new_decode_dev(arg)); 3818 if (err) { 3819 printk(KERN_WARNING "md: autostart failed!\n"); 3820 goto abort; 3821 } 3822 goto done; 3823 } 3824 3825 err = mddev_lock(mddev); 3826 if (err) { 3827 printk(KERN_INFO 3828 "md: ioctl lock interrupted, reason %d, cmd %d\n", 3829 err, cmd); 3830 goto abort; 3831 } 3832 3833 switch (cmd) 3834 { 3835 case SET_ARRAY_INFO: 3836 { 3837 mdu_array_info_t info; 3838 if (!arg) 3839 memset(&info, 0, sizeof(info)); 3840 else if (copy_from_user(&info, argp, sizeof(info))) { 3841 err = -EFAULT; 3842 goto abort_unlock; 3843 } 3844 if (mddev->pers) { 3845 err = update_array_info(mddev, &info); 3846 if (err) { 3847 printk(KERN_WARNING "md: couldn't update" 3848 " array info. %d\n", err); 3849 goto abort_unlock; 3850 } 3851 goto done_unlock; 3852 } 3853 if (!list_empty(&mddev->disks)) { 3854 printk(KERN_WARNING 3855 "md: array %s already has disks!\n", 3856 mdname(mddev)); 3857 err = -EBUSY; 3858 goto abort_unlock; 3859 } 3860 if (mddev->raid_disks) { 3861 printk(KERN_WARNING 3862 "md: array %s already initialised!\n", 3863 mdname(mddev)); 3864 err = -EBUSY; 3865 goto abort_unlock; 3866 } 3867 err = set_array_info(mddev, &info); 3868 if (err) { 3869 printk(KERN_WARNING "md: couldn't set" 3870 " array info. %d\n", err); 3871 goto abort_unlock; 3872 } 3873 } 3874 goto done_unlock; 3875 3876 default:; 3877 } 3878 3879 /* 3880 * Commands querying/configuring an existing array: 3881 */ 3882 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY, 3883 * RUN_ARRAY, and SET_BITMAP_FILE are allowed */ 3884 if (!mddev->raid_disks && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY 3885 && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE) { 3886 err = -ENODEV; 3887 goto abort_unlock; 3888 } 3889 3890 /* 3891 * Commands even a read-only array can execute: 3892 */ 3893 switch (cmd) 3894 { 3895 case GET_ARRAY_INFO: 3896 err = get_array_info(mddev, argp); 3897 goto done_unlock; 3898 3899 case GET_BITMAP_FILE: 3900 err = get_bitmap_file(mddev, argp); 3901 goto done_unlock; 3902 3903 case GET_DISK_INFO: 3904 err = get_disk_info(mddev, argp); 3905 goto done_unlock; 3906 3907 case RESTART_ARRAY_RW: 3908 err = restart_array(mddev); 3909 goto done_unlock; 3910 3911 case STOP_ARRAY: 3912 err = do_md_stop (mddev, 0); 3913 goto done_unlock; 3914 3915 case STOP_ARRAY_RO: 3916 err = do_md_stop (mddev, 1); 3917 goto done_unlock; 3918 3919 /* 3920 * We have a problem here : there is no easy way to give a CHS 3921 * virtual geometry. We currently pretend that we have a 2 heads 3922 * 4 sectors (with a BIG number of cylinders...). This drives 3923 * dosfs just mad... ;-) 3924 */ 3925 } 3926 3927 /* 3928 * The remaining ioctls are changing the state of the 3929 * superblock, so we do not allow them on read-only arrays. 3930 * However non-MD ioctls (e.g. get-size) will still come through 3931 * here and hit the 'default' below, so only disallow 3932 * 'md' ioctls, and switch to rw mode if started auto-readonly. 3933 */ 3934 if (_IOC_TYPE(cmd) == MD_MAJOR && 3935 mddev->ro && mddev->pers) { 3936 if (mddev->ro == 2) { 3937 mddev->ro = 0; 3938 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3939 md_wakeup_thread(mddev->thread); 3940 3941 } else { 3942 err = -EROFS; 3943 goto abort_unlock; 3944 } 3945 } 3946 3947 switch (cmd) 3948 { 3949 case ADD_NEW_DISK: 3950 { 3951 mdu_disk_info_t info; 3952 if (copy_from_user(&info, argp, sizeof(info))) 3953 err = -EFAULT; 3954 else 3955 err = add_new_disk(mddev, &info); 3956 goto done_unlock; 3957 } 3958 3959 case HOT_REMOVE_DISK: 3960 err = hot_remove_disk(mddev, new_decode_dev(arg)); 3961 goto done_unlock; 3962 3963 case HOT_ADD_DISK: 3964 err = hot_add_disk(mddev, new_decode_dev(arg)); 3965 goto done_unlock; 3966 3967 case SET_DISK_FAULTY: 3968 err = set_disk_faulty(mddev, new_decode_dev(arg)); 3969 goto done_unlock; 3970 3971 case RUN_ARRAY: 3972 err = do_md_run (mddev); 3973 goto done_unlock; 3974 3975 case SET_BITMAP_FILE: 3976 err = set_bitmap_file(mddev, (int)arg); 3977 goto done_unlock; 3978 3979 default: 3980 if (_IOC_TYPE(cmd) == MD_MAJOR) 3981 printk(KERN_WARNING "md: %s(pid %d) used" 3982 " obsolete MD ioctl, upgrade your" 3983 " software to use new ictls.\n", 3984 current->comm, current->pid); 3985 err = -EINVAL; 3986 goto abort_unlock; 3987 } 3988 3989 done_unlock: 3990 abort_unlock: 3991 mddev_unlock(mddev); 3992 3993 return err; 3994 done: 3995 if (err) 3996 MD_BUG(); 3997 abort: 3998 return err; 3999 } 4000 4001 static int md_open(struct inode *inode, struct file *file) 4002 { 4003 /* 4004 * Succeed if we can lock the mddev, which confirms that 4005 * it isn't being stopped right now. 4006 */ 4007 mddev_t *mddev = inode->i_bdev->bd_disk->private_data; 4008 int err; 4009 4010 if ((err = mddev_lock(mddev))) 4011 goto out; 4012 4013 err = 0; 4014 mddev_get(mddev); 4015 mddev_unlock(mddev); 4016 4017 check_disk_change(inode->i_bdev); 4018 out: 4019 return err; 4020 } 4021 4022 static int md_release(struct inode *inode, struct file * file) 4023 { 4024 mddev_t *mddev = inode->i_bdev->bd_disk->private_data; 4025 4026 if (!mddev) 4027 BUG(); 4028 mddev_put(mddev); 4029 4030 return 0; 4031 } 4032 4033 static int md_media_changed(struct gendisk *disk) 4034 { 4035 mddev_t *mddev = disk->private_data; 4036 4037 return mddev->changed; 4038 } 4039 4040 static int md_revalidate(struct gendisk *disk) 4041 { 4042 mddev_t *mddev = disk->private_data; 4043 4044 mddev->changed = 0; 4045 return 0; 4046 } 4047 static struct block_device_operations md_fops = 4048 { 4049 .owner = THIS_MODULE, 4050 .open = md_open, 4051 .release = md_release, 4052 .ioctl = md_ioctl, 4053 .getgeo = md_getgeo, 4054 .media_changed = md_media_changed, 4055 .revalidate_disk= md_revalidate, 4056 }; 4057 4058 static int md_thread(void * arg) 4059 { 4060 mdk_thread_t *thread = arg; 4061 4062 /* 4063 * md_thread is a 'system-thread', it's priority should be very 4064 * high. We avoid resource deadlocks individually in each 4065 * raid personality. (RAID5 does preallocation) We also use RR and 4066 * the very same RT priority as kswapd, thus we will never get 4067 * into a priority inversion deadlock. 4068 * 4069 * we definitely have to have equal or higher priority than 4070 * bdflush, otherwise bdflush will deadlock if there are too 4071 * many dirty RAID5 blocks. 4072 */ 4073 4074 allow_signal(SIGKILL); 4075 while (!kthread_should_stop()) { 4076 4077 /* We need to wait INTERRUPTIBLE so that 4078 * we don't add to the load-average. 4079 * That means we need to be sure no signals are 4080 * pending 4081 */ 4082 if (signal_pending(current)) 4083 flush_signals(current); 4084 4085 wait_event_interruptible_timeout 4086 (thread->wqueue, 4087 test_bit(THREAD_WAKEUP, &thread->flags) 4088 || kthread_should_stop(), 4089 thread->timeout); 4090 try_to_freeze(); 4091 4092 clear_bit(THREAD_WAKEUP, &thread->flags); 4093 4094 thread->run(thread->mddev); 4095 } 4096 4097 return 0; 4098 } 4099 4100 void md_wakeup_thread(mdk_thread_t *thread) 4101 { 4102 if (thread) { 4103 dprintk("md: waking up MD thread %s.\n", thread->tsk->comm); 4104 set_bit(THREAD_WAKEUP, &thread->flags); 4105 wake_up(&thread->wqueue); 4106 } 4107 } 4108 4109 mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev, 4110 const char *name) 4111 { 4112 mdk_thread_t *thread; 4113 4114 thread = kzalloc(sizeof(mdk_thread_t), GFP_KERNEL); 4115 if (!thread) 4116 return NULL; 4117 4118 init_waitqueue_head(&thread->wqueue); 4119 4120 thread->run = run; 4121 thread->mddev = mddev; 4122 thread->timeout = MAX_SCHEDULE_TIMEOUT; 4123 thread->tsk = kthread_run(md_thread, thread, name, mdname(thread->mddev)); 4124 if (IS_ERR(thread->tsk)) { 4125 kfree(thread); 4126 return NULL; 4127 } 4128 return thread; 4129 } 4130 4131 void md_unregister_thread(mdk_thread_t *thread) 4132 { 4133 dprintk("interrupting MD-thread pid %d\n", thread->tsk->pid); 4134 4135 kthread_stop(thread->tsk); 4136 kfree(thread); 4137 } 4138 4139 void md_error(mddev_t *mddev, mdk_rdev_t *rdev) 4140 { 4141 if (!mddev) { 4142 MD_BUG(); 4143 return; 4144 } 4145 4146 if (!rdev || test_bit(Faulty, &rdev->flags)) 4147 return; 4148 /* 4149 dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n", 4150 mdname(mddev), 4151 MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev), 4152 __builtin_return_address(0),__builtin_return_address(1), 4153 __builtin_return_address(2),__builtin_return_address(3)); 4154 */ 4155 if (!mddev->pers->error_handler) 4156 return; 4157 mddev->pers->error_handler(mddev,rdev); 4158 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 4159 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4160 md_wakeup_thread(mddev->thread); 4161 md_new_event_inintr(mddev); 4162 } 4163 4164 /* seq_file implementation /proc/mdstat */ 4165 4166 static void status_unused(struct seq_file *seq) 4167 { 4168 int i = 0; 4169 mdk_rdev_t *rdev; 4170 struct list_head *tmp; 4171 4172 seq_printf(seq, "unused devices: "); 4173 4174 ITERATE_RDEV_PENDING(rdev,tmp) { 4175 char b[BDEVNAME_SIZE]; 4176 i++; 4177 seq_printf(seq, "%s ", 4178 bdevname(rdev->bdev,b)); 4179 } 4180 if (!i) 4181 seq_printf(seq, "<none>"); 4182 4183 seq_printf(seq, "\n"); 4184 } 4185 4186 4187 static void status_resync(struct seq_file *seq, mddev_t * mddev) 4188 { 4189 sector_t max_blocks, resync, res; 4190 unsigned long dt, db, rt; 4191 int scale; 4192 unsigned int per_milli; 4193 4194 resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active))/2; 4195 4196 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 4197 max_blocks = mddev->resync_max_sectors >> 1; 4198 else 4199 max_blocks = mddev->size; 4200 4201 /* 4202 * Should not happen. 4203 */ 4204 if (!max_blocks) { 4205 MD_BUG(); 4206 return; 4207 } 4208 /* Pick 'scale' such that (resync>>scale)*1000 will fit 4209 * in a sector_t, and (max_blocks>>scale) will fit in a 4210 * u32, as those are the requirements for sector_div. 4211 * Thus 'scale' must be at least 10 4212 */ 4213 scale = 10; 4214 if (sizeof(sector_t) > sizeof(unsigned long)) { 4215 while ( max_blocks/2 > (1ULL<<(scale+32))) 4216 scale++; 4217 } 4218 res = (resync>>scale)*1000; 4219 sector_div(res, (u32)((max_blocks>>scale)+1)); 4220 4221 per_milli = res; 4222 { 4223 int i, x = per_milli/50, y = 20-x; 4224 seq_printf(seq, "["); 4225 for (i = 0; i < x; i++) 4226 seq_printf(seq, "="); 4227 seq_printf(seq, ">"); 4228 for (i = 0; i < y; i++) 4229 seq_printf(seq, "."); 4230 seq_printf(seq, "] "); 4231 } 4232 seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)", 4233 (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)? 4234 "reshape" : 4235 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ? 4236 "resync" : "recovery")), 4237 per_milli/10, per_milli % 10, 4238 (unsigned long long) resync, 4239 (unsigned long long) max_blocks); 4240 4241 /* 4242 * We do not want to overflow, so the order of operands and 4243 * the * 100 / 100 trick are important. We do a +1 to be 4244 * safe against division by zero. We only estimate anyway. 4245 * 4246 * dt: time from mark until now 4247 * db: blocks written from mark until now 4248 * rt: remaining time 4249 */ 4250 dt = ((jiffies - mddev->resync_mark) / HZ); 4251 if (!dt) dt++; 4252 db = resync - (mddev->resync_mark_cnt/2); 4253 rt = (dt * ((unsigned long)(max_blocks-resync) / (db/100+1)))/100; 4254 4255 seq_printf(seq, " finish=%lu.%lumin", rt / 60, (rt % 60)/6); 4256 4257 seq_printf(seq, " speed=%ldK/sec", db/dt); 4258 } 4259 4260 static void *md_seq_start(struct seq_file *seq, loff_t *pos) 4261 { 4262 struct list_head *tmp; 4263 loff_t l = *pos; 4264 mddev_t *mddev; 4265 4266 if (l >= 0x10000) 4267 return NULL; 4268 if (!l--) 4269 /* header */ 4270 return (void*)1; 4271 4272 spin_lock(&all_mddevs_lock); 4273 list_for_each(tmp,&all_mddevs) 4274 if (!l--) { 4275 mddev = list_entry(tmp, mddev_t, all_mddevs); 4276 mddev_get(mddev); 4277 spin_unlock(&all_mddevs_lock); 4278 return mddev; 4279 } 4280 spin_unlock(&all_mddevs_lock); 4281 if (!l--) 4282 return (void*)2;/* tail */ 4283 return NULL; 4284 } 4285 4286 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos) 4287 { 4288 struct list_head *tmp; 4289 mddev_t *next_mddev, *mddev = v; 4290 4291 ++*pos; 4292 if (v == (void*)2) 4293 return NULL; 4294 4295 spin_lock(&all_mddevs_lock); 4296 if (v == (void*)1) 4297 tmp = all_mddevs.next; 4298 else 4299 tmp = mddev->all_mddevs.next; 4300 if (tmp != &all_mddevs) 4301 next_mddev = mddev_get(list_entry(tmp,mddev_t,all_mddevs)); 4302 else { 4303 next_mddev = (void*)2; 4304 *pos = 0x10000; 4305 } 4306 spin_unlock(&all_mddevs_lock); 4307 4308 if (v != (void*)1) 4309 mddev_put(mddev); 4310 return next_mddev; 4311 4312 } 4313 4314 static void md_seq_stop(struct seq_file *seq, void *v) 4315 { 4316 mddev_t *mddev = v; 4317 4318 if (mddev && v != (void*)1 && v != (void*)2) 4319 mddev_put(mddev); 4320 } 4321 4322 struct mdstat_info { 4323 int event; 4324 }; 4325 4326 static int md_seq_show(struct seq_file *seq, void *v) 4327 { 4328 mddev_t *mddev = v; 4329 sector_t size; 4330 struct list_head *tmp2; 4331 mdk_rdev_t *rdev; 4332 struct mdstat_info *mi = seq->private; 4333 struct bitmap *bitmap; 4334 4335 if (v == (void*)1) { 4336 struct mdk_personality *pers; 4337 seq_printf(seq, "Personalities : "); 4338 spin_lock(&pers_lock); 4339 list_for_each_entry(pers, &pers_list, list) 4340 seq_printf(seq, "[%s] ", pers->name); 4341 4342 spin_unlock(&pers_lock); 4343 seq_printf(seq, "\n"); 4344 mi->event = atomic_read(&md_event_count); 4345 return 0; 4346 } 4347 if (v == (void*)2) { 4348 status_unused(seq); 4349 return 0; 4350 } 4351 4352 if (mddev_lock(mddev) < 0) 4353 return -EINTR; 4354 4355 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) { 4356 seq_printf(seq, "%s : %sactive", mdname(mddev), 4357 mddev->pers ? "" : "in"); 4358 if (mddev->pers) { 4359 if (mddev->ro==1) 4360 seq_printf(seq, " (read-only)"); 4361 if (mddev->ro==2) 4362 seq_printf(seq, "(auto-read-only)"); 4363 seq_printf(seq, " %s", mddev->pers->name); 4364 } 4365 4366 size = 0; 4367 ITERATE_RDEV(mddev,rdev,tmp2) { 4368 char b[BDEVNAME_SIZE]; 4369 seq_printf(seq, " %s[%d]", 4370 bdevname(rdev->bdev,b), rdev->desc_nr); 4371 if (test_bit(WriteMostly, &rdev->flags)) 4372 seq_printf(seq, "(W)"); 4373 if (test_bit(Faulty, &rdev->flags)) { 4374 seq_printf(seq, "(F)"); 4375 continue; 4376 } else if (rdev->raid_disk < 0) 4377 seq_printf(seq, "(S)"); /* spare */ 4378 size += rdev->size; 4379 } 4380 4381 if (!list_empty(&mddev->disks)) { 4382 if (mddev->pers) 4383 seq_printf(seq, "\n %llu blocks", 4384 (unsigned long long)mddev->array_size); 4385 else 4386 seq_printf(seq, "\n %llu blocks", 4387 (unsigned long long)size); 4388 } 4389 if (mddev->persistent) { 4390 if (mddev->major_version != 0 || 4391 mddev->minor_version != 90) { 4392 seq_printf(seq," super %d.%d", 4393 mddev->major_version, 4394 mddev->minor_version); 4395 } 4396 } else 4397 seq_printf(seq, " super non-persistent"); 4398 4399 if (mddev->pers) { 4400 mddev->pers->status (seq, mddev); 4401 seq_printf(seq, "\n "); 4402 if (mddev->pers->sync_request) { 4403 if (mddev->curr_resync > 2) { 4404 status_resync (seq, mddev); 4405 seq_printf(seq, "\n "); 4406 } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2) 4407 seq_printf(seq, "\tresync=DELAYED\n "); 4408 else if (mddev->recovery_cp < MaxSector) 4409 seq_printf(seq, "\tresync=PENDING\n "); 4410 } 4411 } else 4412 seq_printf(seq, "\n "); 4413 4414 if ((bitmap = mddev->bitmap)) { 4415 unsigned long chunk_kb; 4416 unsigned long flags; 4417 spin_lock_irqsave(&bitmap->lock, flags); 4418 chunk_kb = bitmap->chunksize >> 10; 4419 seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], " 4420 "%lu%s chunk", 4421 bitmap->pages - bitmap->missing_pages, 4422 bitmap->pages, 4423 (bitmap->pages - bitmap->missing_pages) 4424 << (PAGE_SHIFT - 10), 4425 chunk_kb ? chunk_kb : bitmap->chunksize, 4426 chunk_kb ? "KB" : "B"); 4427 if (bitmap->file) { 4428 seq_printf(seq, ", file: "); 4429 seq_path(seq, bitmap->file->f_vfsmnt, 4430 bitmap->file->f_dentry," \t\n"); 4431 } 4432 4433 seq_printf(seq, "\n"); 4434 spin_unlock_irqrestore(&bitmap->lock, flags); 4435 } 4436 4437 seq_printf(seq, "\n"); 4438 } 4439 mddev_unlock(mddev); 4440 4441 return 0; 4442 } 4443 4444 static struct seq_operations md_seq_ops = { 4445 .start = md_seq_start, 4446 .next = md_seq_next, 4447 .stop = md_seq_stop, 4448 .show = md_seq_show, 4449 }; 4450 4451 static int md_seq_open(struct inode *inode, struct file *file) 4452 { 4453 int error; 4454 struct mdstat_info *mi = kmalloc(sizeof(*mi), GFP_KERNEL); 4455 if (mi == NULL) 4456 return -ENOMEM; 4457 4458 error = seq_open(file, &md_seq_ops); 4459 if (error) 4460 kfree(mi); 4461 else { 4462 struct seq_file *p = file->private_data; 4463 p->private = mi; 4464 mi->event = atomic_read(&md_event_count); 4465 } 4466 return error; 4467 } 4468 4469 static int md_seq_release(struct inode *inode, struct file *file) 4470 { 4471 struct seq_file *m = file->private_data; 4472 struct mdstat_info *mi = m->private; 4473 m->private = NULL; 4474 kfree(mi); 4475 return seq_release(inode, file); 4476 } 4477 4478 static unsigned int mdstat_poll(struct file *filp, poll_table *wait) 4479 { 4480 struct seq_file *m = filp->private_data; 4481 struct mdstat_info *mi = m->private; 4482 int mask; 4483 4484 poll_wait(filp, &md_event_waiters, wait); 4485 4486 /* always allow read */ 4487 mask = POLLIN | POLLRDNORM; 4488 4489 if (mi->event != atomic_read(&md_event_count)) 4490 mask |= POLLERR | POLLPRI; 4491 return mask; 4492 } 4493 4494 static struct file_operations md_seq_fops = { 4495 .open = md_seq_open, 4496 .read = seq_read, 4497 .llseek = seq_lseek, 4498 .release = md_seq_release, 4499 .poll = mdstat_poll, 4500 }; 4501 4502 int register_md_personality(struct mdk_personality *p) 4503 { 4504 spin_lock(&pers_lock); 4505 list_add_tail(&p->list, &pers_list); 4506 printk(KERN_INFO "md: %s personality registered for level %d\n", p->name, p->level); 4507 spin_unlock(&pers_lock); 4508 return 0; 4509 } 4510 4511 int unregister_md_personality(struct mdk_personality *p) 4512 { 4513 printk(KERN_INFO "md: %s personality unregistered\n", p->name); 4514 spin_lock(&pers_lock); 4515 list_del_init(&p->list); 4516 spin_unlock(&pers_lock); 4517 return 0; 4518 } 4519 4520 static int is_mddev_idle(mddev_t *mddev) 4521 { 4522 mdk_rdev_t * rdev; 4523 struct list_head *tmp; 4524 int idle; 4525 unsigned long curr_events; 4526 4527 idle = 1; 4528 ITERATE_RDEV(mddev,rdev,tmp) { 4529 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk; 4530 curr_events = disk_stat_read(disk, sectors[0]) + 4531 disk_stat_read(disk, sectors[1]) - 4532 atomic_read(&disk->sync_io); 4533 /* The difference between curr_events and last_events 4534 * will be affected by any new non-sync IO (making 4535 * curr_events bigger) and any difference in the amount of 4536 * in-flight syncio (making current_events bigger or smaller) 4537 * The amount in-flight is currently limited to 4538 * 32*64K in raid1/10 and 256*PAGE_SIZE in raid5/6 4539 * which is at most 4096 sectors. 4540 * These numbers are fairly fragile and should be made 4541 * more robust, probably by enforcing the 4542 * 'window size' that md_do_sync sort-of uses. 4543 * 4544 * Note: the following is an unsigned comparison. 4545 */ 4546 if ((curr_events - rdev->last_events + 4096) > 8192) { 4547 rdev->last_events = curr_events; 4548 idle = 0; 4549 } 4550 } 4551 return idle; 4552 } 4553 4554 void md_done_sync(mddev_t *mddev, int blocks, int ok) 4555 { 4556 /* another "blocks" (512byte) blocks have been synced */ 4557 atomic_sub(blocks, &mddev->recovery_active); 4558 wake_up(&mddev->recovery_wait); 4559 if (!ok) { 4560 set_bit(MD_RECOVERY_ERR, &mddev->recovery); 4561 md_wakeup_thread(mddev->thread); 4562 // stop recovery, signal do_sync .... 4563 } 4564 } 4565 4566 4567 /* md_write_start(mddev, bi) 4568 * If we need to update some array metadata (e.g. 'active' flag 4569 * in superblock) before writing, schedule a superblock update 4570 * and wait for it to complete. 4571 */ 4572 void md_write_start(mddev_t *mddev, struct bio *bi) 4573 { 4574 if (bio_data_dir(bi) != WRITE) 4575 return; 4576 4577 BUG_ON(mddev->ro == 1); 4578 if (mddev->ro == 2) { 4579 /* need to switch to read/write */ 4580 mddev->ro = 0; 4581 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4582 md_wakeup_thread(mddev->thread); 4583 } 4584 atomic_inc(&mddev->writes_pending); 4585 if (mddev->in_sync) { 4586 spin_lock_irq(&mddev->write_lock); 4587 if (mddev->in_sync) { 4588 mddev->in_sync = 0; 4589 mddev->sb_dirty = 1; 4590 md_wakeup_thread(mddev->thread); 4591 } 4592 spin_unlock_irq(&mddev->write_lock); 4593 } 4594 wait_event(mddev->sb_wait, mddev->sb_dirty==0); 4595 } 4596 4597 void md_write_end(mddev_t *mddev) 4598 { 4599 if (atomic_dec_and_test(&mddev->writes_pending)) { 4600 if (mddev->safemode == 2) 4601 md_wakeup_thread(mddev->thread); 4602 else 4603 mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay); 4604 } 4605 } 4606 4607 static DECLARE_WAIT_QUEUE_HEAD(resync_wait); 4608 4609 #define SYNC_MARKS 10 4610 #define SYNC_MARK_STEP (3*HZ) 4611 void md_do_sync(mddev_t *mddev) 4612 { 4613 mddev_t *mddev2; 4614 unsigned int currspeed = 0, 4615 window; 4616 sector_t max_sectors,j, io_sectors; 4617 unsigned long mark[SYNC_MARKS]; 4618 sector_t mark_cnt[SYNC_MARKS]; 4619 int last_mark,m; 4620 struct list_head *tmp; 4621 sector_t last_check; 4622 int skipped = 0; 4623 4624 /* just incase thread restarts... */ 4625 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) 4626 return; 4627 4628 /* we overload curr_resync somewhat here. 4629 * 0 == not engaged in resync at all 4630 * 2 == checking that there is no conflict with another sync 4631 * 1 == like 2, but have yielded to allow conflicting resync to 4632 * commense 4633 * other == active in resync - this many blocks 4634 * 4635 * Before starting a resync we must have set curr_resync to 4636 * 2, and then checked that every "conflicting" array has curr_resync 4637 * less than ours. When we find one that is the same or higher 4638 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync 4639 * to 1 if we choose to yield (based arbitrarily on address of mddev structure). 4640 * This will mean we have to start checking from the beginning again. 4641 * 4642 */ 4643 4644 do { 4645 mddev->curr_resync = 2; 4646 4647 try_again: 4648 if (kthread_should_stop()) { 4649 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 4650 goto skip; 4651 } 4652 ITERATE_MDDEV(mddev2,tmp) { 4653 if (mddev2 == mddev) 4654 continue; 4655 if (mddev2->curr_resync && 4656 match_mddev_units(mddev,mddev2)) { 4657 DEFINE_WAIT(wq); 4658 if (mddev < mddev2 && mddev->curr_resync == 2) { 4659 /* arbitrarily yield */ 4660 mddev->curr_resync = 1; 4661 wake_up(&resync_wait); 4662 } 4663 if (mddev > mddev2 && mddev->curr_resync == 1) 4664 /* no need to wait here, we can wait the next 4665 * time 'round when curr_resync == 2 4666 */ 4667 continue; 4668 prepare_to_wait(&resync_wait, &wq, TASK_UNINTERRUPTIBLE); 4669 if (!kthread_should_stop() && 4670 mddev2->curr_resync >= mddev->curr_resync) { 4671 printk(KERN_INFO "md: delaying resync of %s" 4672 " until %s has finished resync (they" 4673 " share one or more physical units)\n", 4674 mdname(mddev), mdname(mddev2)); 4675 mddev_put(mddev2); 4676 schedule(); 4677 finish_wait(&resync_wait, &wq); 4678 goto try_again; 4679 } 4680 finish_wait(&resync_wait, &wq); 4681 } 4682 } 4683 } while (mddev->curr_resync < 2); 4684 4685 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 4686 /* resync follows the size requested by the personality, 4687 * which defaults to physical size, but can be virtual size 4688 */ 4689 max_sectors = mddev->resync_max_sectors; 4690 mddev->resync_mismatches = 0; 4691 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 4692 max_sectors = mddev->size << 1; 4693 else 4694 /* recovery follows the physical size of devices */ 4695 max_sectors = mddev->size << 1; 4696 4697 printk(KERN_INFO "md: syncing RAID array %s\n", mdname(mddev)); 4698 printk(KERN_INFO "md: minimum _guaranteed_ reconstruction speed:" 4699 " %d KB/sec/disc.\n", speed_min(mddev)); 4700 printk(KERN_INFO "md: using maximum available idle IO bandwidth " 4701 "(but not more than %d KB/sec) for reconstruction.\n", 4702 speed_max(mddev)); 4703 4704 is_mddev_idle(mddev); /* this also initializes IO event counters */ 4705 /* we don't use the checkpoint if there's a bitmap */ 4706 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && !mddev->bitmap 4707 && ! test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 4708 j = mddev->recovery_cp; 4709 else 4710 j = 0; 4711 io_sectors = 0; 4712 for (m = 0; m < SYNC_MARKS; m++) { 4713 mark[m] = jiffies; 4714 mark_cnt[m] = io_sectors; 4715 } 4716 last_mark = 0; 4717 mddev->resync_mark = mark[last_mark]; 4718 mddev->resync_mark_cnt = mark_cnt[last_mark]; 4719 4720 /* 4721 * Tune reconstruction: 4722 */ 4723 window = 32*(PAGE_SIZE/512); 4724 printk(KERN_INFO "md: using %dk window, over a total of %llu blocks.\n", 4725 window/2,(unsigned long long) max_sectors/2); 4726 4727 atomic_set(&mddev->recovery_active, 0); 4728 init_waitqueue_head(&mddev->recovery_wait); 4729 last_check = 0; 4730 4731 if (j>2) { 4732 printk(KERN_INFO 4733 "md: resuming recovery of %s from checkpoint.\n", 4734 mdname(mddev)); 4735 mddev->curr_resync = j; 4736 } 4737 4738 while (j < max_sectors) { 4739 sector_t sectors; 4740 4741 skipped = 0; 4742 sectors = mddev->pers->sync_request(mddev, j, &skipped, 4743 currspeed < speed_min(mddev)); 4744 if (sectors == 0) { 4745 set_bit(MD_RECOVERY_ERR, &mddev->recovery); 4746 goto out; 4747 } 4748 4749 if (!skipped) { /* actual IO requested */ 4750 io_sectors += sectors; 4751 atomic_add(sectors, &mddev->recovery_active); 4752 } 4753 4754 j += sectors; 4755 if (j>1) mddev->curr_resync = j; 4756 if (last_check == 0) 4757 /* this is the earliers that rebuilt will be 4758 * visible in /proc/mdstat 4759 */ 4760 md_new_event(mddev); 4761 4762 if (last_check + window > io_sectors || j == max_sectors) 4763 continue; 4764 4765 last_check = io_sectors; 4766 4767 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery) || 4768 test_bit(MD_RECOVERY_ERR, &mddev->recovery)) 4769 break; 4770 4771 repeat: 4772 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) { 4773 /* step marks */ 4774 int next = (last_mark+1) % SYNC_MARKS; 4775 4776 mddev->resync_mark = mark[next]; 4777 mddev->resync_mark_cnt = mark_cnt[next]; 4778 mark[next] = jiffies; 4779 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active); 4780 last_mark = next; 4781 } 4782 4783 4784 if (kthread_should_stop()) { 4785 /* 4786 * got a signal, exit. 4787 */ 4788 printk(KERN_INFO 4789 "md: md_do_sync() got signal ... exiting\n"); 4790 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 4791 goto out; 4792 } 4793 4794 /* 4795 * this loop exits only if either when we are slower than 4796 * the 'hard' speed limit, or the system was IO-idle for 4797 * a jiffy. 4798 * the system might be non-idle CPU-wise, but we only care 4799 * about not overloading the IO subsystem. (things like an 4800 * e2fsck being done on the RAID array should execute fast) 4801 */ 4802 mddev->queue->unplug_fn(mddev->queue); 4803 cond_resched(); 4804 4805 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2 4806 /((jiffies-mddev->resync_mark)/HZ +1) +1; 4807 4808 if (currspeed > speed_min(mddev)) { 4809 if ((currspeed > speed_max(mddev)) || 4810 !is_mddev_idle(mddev)) { 4811 msleep(500); 4812 goto repeat; 4813 } 4814 } 4815 } 4816 printk(KERN_INFO "md: %s: sync done.\n",mdname(mddev)); 4817 /* 4818 * this also signals 'finished resyncing' to md_stop 4819 */ 4820 out: 4821 mddev->queue->unplug_fn(mddev->queue); 4822 4823 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); 4824 4825 /* tell personality that we are finished */ 4826 mddev->pers->sync_request(mddev, max_sectors, &skipped, 1); 4827 4828 if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) && 4829 test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && 4830 !test_bit(MD_RECOVERY_CHECK, &mddev->recovery) && 4831 mddev->curr_resync > 2 && 4832 mddev->curr_resync >= mddev->recovery_cp) { 4833 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 4834 printk(KERN_INFO 4835 "md: checkpointing recovery of %s.\n", 4836 mdname(mddev)); 4837 mddev->recovery_cp = mddev->curr_resync; 4838 } else 4839 mddev->recovery_cp = MaxSector; 4840 } 4841 4842 skip: 4843 mddev->curr_resync = 0; 4844 wake_up(&resync_wait); 4845 set_bit(MD_RECOVERY_DONE, &mddev->recovery); 4846 md_wakeup_thread(mddev->thread); 4847 } 4848 EXPORT_SYMBOL_GPL(md_do_sync); 4849 4850 4851 /* 4852 * This routine is regularly called by all per-raid-array threads to 4853 * deal with generic issues like resync and super-block update. 4854 * Raid personalities that don't have a thread (linear/raid0) do not 4855 * need this as they never do any recovery or update the superblock. 4856 * 4857 * It does not do any resync itself, but rather "forks" off other threads 4858 * to do that as needed. 4859 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in 4860 * "->recovery" and create a thread at ->sync_thread. 4861 * When the thread finishes it sets MD_RECOVERY_DONE (and might set MD_RECOVERY_ERR) 4862 * and wakeups up this thread which will reap the thread and finish up. 4863 * This thread also removes any faulty devices (with nr_pending == 0). 4864 * 4865 * The overall approach is: 4866 * 1/ if the superblock needs updating, update it. 4867 * 2/ If a recovery thread is running, don't do anything else. 4868 * 3/ If recovery has finished, clean up, possibly marking spares active. 4869 * 4/ If there are any faulty devices, remove them. 4870 * 5/ If array is degraded, try to add spares devices 4871 * 6/ If array has spares or is not in-sync, start a resync thread. 4872 */ 4873 void md_check_recovery(mddev_t *mddev) 4874 { 4875 mdk_rdev_t *rdev; 4876 struct list_head *rtmp; 4877 4878 4879 if (mddev->bitmap) 4880 bitmap_daemon_work(mddev->bitmap); 4881 4882 if (mddev->ro) 4883 return; 4884 4885 if (signal_pending(current)) { 4886 if (mddev->pers->sync_request) { 4887 printk(KERN_INFO "md: %s in immediate safe mode\n", 4888 mdname(mddev)); 4889 mddev->safemode = 2; 4890 } 4891 flush_signals(current); 4892 } 4893 4894 if ( ! ( 4895 mddev->sb_dirty || 4896 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || 4897 test_bit(MD_RECOVERY_DONE, &mddev->recovery) || 4898 (mddev->safemode == 1) || 4899 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending) 4900 && !mddev->in_sync && mddev->recovery_cp == MaxSector) 4901 )) 4902 return; 4903 4904 if (mddev_trylock(mddev)) { 4905 int spares =0; 4906 4907 spin_lock_irq(&mddev->write_lock); 4908 if (mddev->safemode && !atomic_read(&mddev->writes_pending) && 4909 !mddev->in_sync && mddev->recovery_cp == MaxSector) { 4910 mddev->in_sync = 1; 4911 mddev->sb_dirty = 1; 4912 } 4913 if (mddev->safemode == 1) 4914 mddev->safemode = 0; 4915 spin_unlock_irq(&mddev->write_lock); 4916 4917 if (mddev->sb_dirty) 4918 md_update_sb(mddev); 4919 4920 4921 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && 4922 !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) { 4923 /* resync/recovery still happening */ 4924 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4925 goto unlock; 4926 } 4927 if (mddev->sync_thread) { 4928 /* resync has finished, collect result */ 4929 md_unregister_thread(mddev->sync_thread); 4930 mddev->sync_thread = NULL; 4931 if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) && 4932 !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 4933 /* success...*/ 4934 /* activate any spares */ 4935 mddev->pers->spare_active(mddev); 4936 } 4937 md_update_sb(mddev); 4938 4939 /* if array is no-longer degraded, then any saved_raid_disk 4940 * information must be scrapped 4941 */ 4942 if (!mddev->degraded) 4943 ITERATE_RDEV(mddev,rdev,rtmp) 4944 rdev->saved_raid_disk = -1; 4945 4946 mddev->recovery = 0; 4947 /* flag recovery needed just to double check */ 4948 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4949 md_new_event(mddev); 4950 goto unlock; 4951 } 4952 /* Clear some bits that don't mean anything, but 4953 * might be left set 4954 */ 4955 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4956 clear_bit(MD_RECOVERY_ERR, &mddev->recovery); 4957 clear_bit(MD_RECOVERY_INTR, &mddev->recovery); 4958 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); 4959 4960 /* no recovery is running. 4961 * remove any failed drives, then 4962 * add spares if possible. 4963 * Spare are also removed and re-added, to allow 4964 * the personality to fail the re-add. 4965 */ 4966 ITERATE_RDEV(mddev,rdev,rtmp) 4967 if (rdev->raid_disk >= 0 && 4968 (test_bit(Faulty, &rdev->flags) || ! test_bit(In_sync, &rdev->flags)) && 4969 atomic_read(&rdev->nr_pending)==0) { 4970 if (mddev->pers->hot_remove_disk(mddev, rdev->raid_disk)==0) { 4971 char nm[20]; 4972 sprintf(nm,"rd%d", rdev->raid_disk); 4973 sysfs_remove_link(&mddev->kobj, nm); 4974 rdev->raid_disk = -1; 4975 } 4976 } 4977 4978 if (mddev->degraded) { 4979 ITERATE_RDEV(mddev,rdev,rtmp) 4980 if (rdev->raid_disk < 0 4981 && !test_bit(Faulty, &rdev->flags)) { 4982 if (mddev->pers->hot_add_disk(mddev,rdev)) { 4983 char nm[20]; 4984 sprintf(nm, "rd%d", rdev->raid_disk); 4985 sysfs_create_link(&mddev->kobj, &rdev->kobj, nm); 4986 spares++; 4987 md_new_event(mddev); 4988 } else 4989 break; 4990 } 4991 } 4992 4993 if (spares) { 4994 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 4995 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 4996 } else if (mddev->recovery_cp < MaxSector) { 4997 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 4998 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 4999 /* nothing to be done ... */ 5000 goto unlock; 5001 5002 if (mddev->pers->sync_request) { 5003 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 5004 if (spares && mddev->bitmap && ! mddev->bitmap->file) { 5005 /* We are adding a device or devices to an array 5006 * which has the bitmap stored on all devices. 5007 * So make sure all bitmap pages get written 5008 */ 5009 bitmap_write_all(mddev->bitmap); 5010 } 5011 mddev->sync_thread = md_register_thread(md_do_sync, 5012 mddev, 5013 "%s_resync"); 5014 if (!mddev->sync_thread) { 5015 printk(KERN_ERR "%s: could not start resync" 5016 " thread...\n", 5017 mdname(mddev)); 5018 /* leave the spares where they are, it shouldn't hurt */ 5019 mddev->recovery = 0; 5020 } else 5021 md_wakeup_thread(mddev->sync_thread); 5022 md_new_event(mddev); 5023 } 5024 unlock: 5025 mddev_unlock(mddev); 5026 } 5027 } 5028 5029 static int md_notify_reboot(struct notifier_block *this, 5030 unsigned long code, void *x) 5031 { 5032 struct list_head *tmp; 5033 mddev_t *mddev; 5034 5035 if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) { 5036 5037 printk(KERN_INFO "md: stopping all md devices.\n"); 5038 5039 ITERATE_MDDEV(mddev,tmp) 5040 if (mddev_trylock(mddev)) { 5041 do_md_stop (mddev, 1); 5042 mddev_unlock(mddev); 5043 } 5044 /* 5045 * certain more exotic SCSI devices are known to be 5046 * volatile wrt too early system reboots. While the 5047 * right place to handle this issue is the given 5048 * driver, we do want to have a safe RAID driver ... 5049 */ 5050 mdelay(1000*1); 5051 } 5052 return NOTIFY_DONE; 5053 } 5054 5055 static struct notifier_block md_notifier = { 5056 .notifier_call = md_notify_reboot, 5057 .next = NULL, 5058 .priority = INT_MAX, /* before any real devices */ 5059 }; 5060 5061 static void md_geninit(void) 5062 { 5063 struct proc_dir_entry *p; 5064 5065 dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t)); 5066 5067 p = create_proc_entry("mdstat", S_IRUGO, NULL); 5068 if (p) 5069 p->proc_fops = &md_seq_fops; 5070 } 5071 5072 static int __init md_init(void) 5073 { 5074 int minor; 5075 5076 printk(KERN_INFO "md: md driver %d.%d.%d MAX_MD_DEVS=%d," 5077 " MD_SB_DISKS=%d\n", 5078 MD_MAJOR_VERSION, MD_MINOR_VERSION, 5079 MD_PATCHLEVEL_VERSION, MAX_MD_DEVS, MD_SB_DISKS); 5080 printk(KERN_INFO "md: bitmap version %d.%d\n", BITMAP_MAJOR_HI, 5081 BITMAP_MINOR); 5082 5083 if (register_blkdev(MAJOR_NR, "md")) 5084 return -1; 5085 if ((mdp_major=register_blkdev(0, "mdp"))<=0) { 5086 unregister_blkdev(MAJOR_NR, "md"); 5087 return -1; 5088 } 5089 devfs_mk_dir("md"); 5090 blk_register_region(MKDEV(MAJOR_NR, 0), MAX_MD_DEVS, THIS_MODULE, 5091 md_probe, NULL, NULL); 5092 blk_register_region(MKDEV(mdp_major, 0), MAX_MD_DEVS<<MdpMinorShift, THIS_MODULE, 5093 md_probe, NULL, NULL); 5094 5095 for (minor=0; minor < MAX_MD_DEVS; ++minor) 5096 devfs_mk_bdev(MKDEV(MAJOR_NR, minor), 5097 S_IFBLK|S_IRUSR|S_IWUSR, 5098 "md/%d", minor); 5099 5100 for (minor=0; minor < MAX_MD_DEVS; ++minor) 5101 devfs_mk_bdev(MKDEV(mdp_major, minor<<MdpMinorShift), 5102 S_IFBLK|S_IRUSR|S_IWUSR, 5103 "md/mdp%d", minor); 5104 5105 5106 register_reboot_notifier(&md_notifier); 5107 raid_table_header = register_sysctl_table(raid_root_table, 1); 5108 5109 md_geninit(); 5110 return (0); 5111 } 5112 5113 5114 #ifndef MODULE 5115 5116 /* 5117 * Searches all registered partitions for autorun RAID arrays 5118 * at boot time. 5119 */ 5120 static dev_t detected_devices[128]; 5121 static int dev_cnt; 5122 5123 void md_autodetect_dev(dev_t dev) 5124 { 5125 if (dev_cnt >= 0 && dev_cnt < 127) 5126 detected_devices[dev_cnt++] = dev; 5127 } 5128 5129 5130 static void autostart_arrays(int part) 5131 { 5132 mdk_rdev_t *rdev; 5133 int i; 5134 5135 printk(KERN_INFO "md: Autodetecting RAID arrays.\n"); 5136 5137 for (i = 0; i < dev_cnt; i++) { 5138 dev_t dev = detected_devices[i]; 5139 5140 rdev = md_import_device(dev,0, 0); 5141 if (IS_ERR(rdev)) 5142 continue; 5143 5144 if (test_bit(Faulty, &rdev->flags)) { 5145 MD_BUG(); 5146 continue; 5147 } 5148 list_add(&rdev->same_set, &pending_raid_disks); 5149 } 5150 dev_cnt = 0; 5151 5152 autorun_devices(part); 5153 } 5154 5155 #endif 5156 5157 static __exit void md_exit(void) 5158 { 5159 mddev_t *mddev; 5160 struct list_head *tmp; 5161 int i; 5162 blk_unregister_region(MKDEV(MAJOR_NR,0), MAX_MD_DEVS); 5163 blk_unregister_region(MKDEV(mdp_major,0), MAX_MD_DEVS << MdpMinorShift); 5164 for (i=0; i < MAX_MD_DEVS; i++) 5165 devfs_remove("md/%d", i); 5166 for (i=0; i < MAX_MD_DEVS; i++) 5167 devfs_remove("md/d%d", i); 5168 5169 devfs_remove("md"); 5170 5171 unregister_blkdev(MAJOR_NR,"md"); 5172 unregister_blkdev(mdp_major, "mdp"); 5173 unregister_reboot_notifier(&md_notifier); 5174 unregister_sysctl_table(raid_table_header); 5175 remove_proc_entry("mdstat", NULL); 5176 ITERATE_MDDEV(mddev,tmp) { 5177 struct gendisk *disk = mddev->gendisk; 5178 if (!disk) 5179 continue; 5180 export_array(mddev); 5181 del_gendisk(disk); 5182 put_disk(disk); 5183 mddev->gendisk = NULL; 5184 mddev_put(mddev); 5185 } 5186 } 5187 5188 module_init(md_init) 5189 module_exit(md_exit) 5190 5191 static int get_ro(char *buffer, struct kernel_param *kp) 5192 { 5193 return sprintf(buffer, "%d", start_readonly); 5194 } 5195 static int set_ro(const char *val, struct kernel_param *kp) 5196 { 5197 char *e; 5198 int num = simple_strtoul(val, &e, 10); 5199 if (*val && (*e == '\0' || *e == '\n')) { 5200 start_readonly = num; 5201 return 0; 5202 } 5203 return -EINVAL; 5204 } 5205 5206 module_param_call(start_ro, set_ro, get_ro, NULL, 0600); 5207 module_param(start_dirty_degraded, int, 0644); 5208 5209 5210 EXPORT_SYMBOL(register_md_personality); 5211 EXPORT_SYMBOL(unregister_md_personality); 5212 EXPORT_SYMBOL(md_error); 5213 EXPORT_SYMBOL(md_done_sync); 5214 EXPORT_SYMBOL(md_write_start); 5215 EXPORT_SYMBOL(md_write_end); 5216 EXPORT_SYMBOL(md_register_thread); 5217 EXPORT_SYMBOL(md_unregister_thread); 5218 EXPORT_SYMBOL(md_wakeup_thread); 5219 EXPORT_SYMBOL(md_print_devices); 5220 EXPORT_SYMBOL(md_check_recovery); 5221 MODULE_LICENSE("GPL"); 5222 MODULE_ALIAS("md"); 5223 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR); 5224