1 /* 2 md.c : Multiple Devices driver for Linux 3 Copyright (C) 1998, 1999, 2000 Ingo Molnar 4 5 completely rewritten, based on the MD driver code from Marc Zyngier 6 7 Changes: 8 9 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar 10 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com> 11 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net> 12 - kerneld support by Boris Tobotras <boris@xtalk.msk.su> 13 - kmod support by: Cyrus Durgin 14 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com> 15 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au> 16 17 - lots of fixes and improvements to the RAID1/RAID5 and generic 18 RAID code (such as request based resynchronization): 19 20 Neil Brown <neilb@cse.unsw.edu.au>. 21 22 - persistent bitmap code 23 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc. 24 25 This program is free software; you can redistribute it and/or modify 26 it under the terms of the GNU General Public License as published by 27 the Free Software Foundation; either version 2, or (at your option) 28 any later version. 29 30 You should have received a copy of the GNU General Public License 31 (for example /usr/src/linux/COPYING); if not, write to the Free 32 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 33 */ 34 35 #include <linux/module.h> 36 #include <linux/config.h> 37 #include <linux/kthread.h> 38 #include <linux/linkage.h> 39 #include <linux/raid/md.h> 40 #include <linux/raid/bitmap.h> 41 #include <linux/sysctl.h> 42 #include <linux/devfs_fs_kernel.h> 43 #include <linux/buffer_head.h> /* for invalidate_bdev */ 44 #include <linux/suspend.h> 45 #include <linux/poll.h> 46 #include <linux/mutex.h> 47 48 #include <linux/init.h> 49 50 #include <linux/file.h> 51 52 #ifdef CONFIG_KMOD 53 #include <linux/kmod.h> 54 #endif 55 56 #include <asm/unaligned.h> 57 58 #define MAJOR_NR MD_MAJOR 59 #define MD_DRIVER 60 61 /* 63 partitions with the alternate major number (mdp) */ 62 #define MdpMinorShift 6 63 64 #define DEBUG 0 65 #define dprintk(x...) ((void)(DEBUG && printk(x))) 66 67 68 #ifndef MODULE 69 static void autostart_arrays (int part); 70 #endif 71 72 static LIST_HEAD(pers_list); 73 static DEFINE_SPINLOCK(pers_lock); 74 75 /* 76 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit' 77 * is 1000 KB/sec, so the extra system load does not show up that much. 78 * Increase it if you want to have more _guaranteed_ speed. Note that 79 * the RAID driver will use the maximum available bandwidth if the IO 80 * subsystem is idle. There is also an 'absolute maximum' reconstruction 81 * speed limit - in case reconstruction slows down your system despite 82 * idle IO detection. 83 * 84 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max. 85 * or /sys/block/mdX/md/sync_speed_{min,max} 86 */ 87 88 static int sysctl_speed_limit_min = 1000; 89 static int sysctl_speed_limit_max = 200000; 90 static inline int speed_min(mddev_t *mddev) 91 { 92 return mddev->sync_speed_min ? 93 mddev->sync_speed_min : sysctl_speed_limit_min; 94 } 95 96 static inline int speed_max(mddev_t *mddev) 97 { 98 return mddev->sync_speed_max ? 99 mddev->sync_speed_max : sysctl_speed_limit_max; 100 } 101 102 static struct ctl_table_header *raid_table_header; 103 104 static ctl_table raid_table[] = { 105 { 106 .ctl_name = DEV_RAID_SPEED_LIMIT_MIN, 107 .procname = "speed_limit_min", 108 .data = &sysctl_speed_limit_min, 109 .maxlen = sizeof(int), 110 .mode = 0644, 111 .proc_handler = &proc_dointvec, 112 }, 113 { 114 .ctl_name = DEV_RAID_SPEED_LIMIT_MAX, 115 .procname = "speed_limit_max", 116 .data = &sysctl_speed_limit_max, 117 .maxlen = sizeof(int), 118 .mode = 0644, 119 .proc_handler = &proc_dointvec, 120 }, 121 { .ctl_name = 0 } 122 }; 123 124 static ctl_table raid_dir_table[] = { 125 { 126 .ctl_name = DEV_RAID, 127 .procname = "raid", 128 .maxlen = 0, 129 .mode = 0555, 130 .child = raid_table, 131 }, 132 { .ctl_name = 0 } 133 }; 134 135 static ctl_table raid_root_table[] = { 136 { 137 .ctl_name = CTL_DEV, 138 .procname = "dev", 139 .maxlen = 0, 140 .mode = 0555, 141 .child = raid_dir_table, 142 }, 143 { .ctl_name = 0 } 144 }; 145 146 static struct block_device_operations md_fops; 147 148 static int start_readonly; 149 150 /* 151 * We have a system wide 'event count' that is incremented 152 * on any 'interesting' event, and readers of /proc/mdstat 153 * can use 'poll' or 'select' to find out when the event 154 * count increases. 155 * 156 * Events are: 157 * start array, stop array, error, add device, remove device, 158 * start build, activate spare 159 */ 160 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters); 161 static atomic_t md_event_count; 162 void md_new_event(mddev_t *mddev) 163 { 164 atomic_inc(&md_event_count); 165 wake_up(&md_event_waiters); 166 } 167 EXPORT_SYMBOL_GPL(md_new_event); 168 169 /* 170 * Enables to iterate over all existing md arrays 171 * all_mddevs_lock protects this list. 172 */ 173 static LIST_HEAD(all_mddevs); 174 static DEFINE_SPINLOCK(all_mddevs_lock); 175 176 177 /* 178 * iterates through all used mddevs in the system. 179 * We take care to grab the all_mddevs_lock whenever navigating 180 * the list, and to always hold a refcount when unlocked. 181 * Any code which breaks out of this loop while own 182 * a reference to the current mddev and must mddev_put it. 183 */ 184 #define ITERATE_MDDEV(mddev,tmp) \ 185 \ 186 for (({ spin_lock(&all_mddevs_lock); \ 187 tmp = all_mddevs.next; \ 188 mddev = NULL;}); \ 189 ({ if (tmp != &all_mddevs) \ 190 mddev_get(list_entry(tmp, mddev_t, all_mddevs));\ 191 spin_unlock(&all_mddevs_lock); \ 192 if (mddev) mddev_put(mddev); \ 193 mddev = list_entry(tmp, mddev_t, all_mddevs); \ 194 tmp != &all_mddevs;}); \ 195 ({ spin_lock(&all_mddevs_lock); \ 196 tmp = tmp->next;}) \ 197 ) 198 199 200 static int md_fail_request (request_queue_t *q, struct bio *bio) 201 { 202 bio_io_error(bio, bio->bi_size); 203 return 0; 204 } 205 206 static inline mddev_t *mddev_get(mddev_t *mddev) 207 { 208 atomic_inc(&mddev->active); 209 return mddev; 210 } 211 212 static void mddev_put(mddev_t *mddev) 213 { 214 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock)) 215 return; 216 if (!mddev->raid_disks && list_empty(&mddev->disks)) { 217 list_del(&mddev->all_mddevs); 218 spin_unlock(&all_mddevs_lock); 219 blk_cleanup_queue(mddev->queue); 220 kobject_unregister(&mddev->kobj); 221 } else 222 spin_unlock(&all_mddevs_lock); 223 } 224 225 static mddev_t * mddev_find(dev_t unit) 226 { 227 mddev_t *mddev, *new = NULL; 228 229 retry: 230 spin_lock(&all_mddevs_lock); 231 list_for_each_entry(mddev, &all_mddevs, all_mddevs) 232 if (mddev->unit == unit) { 233 mddev_get(mddev); 234 spin_unlock(&all_mddevs_lock); 235 kfree(new); 236 return mddev; 237 } 238 239 if (new) { 240 list_add(&new->all_mddevs, &all_mddevs); 241 spin_unlock(&all_mddevs_lock); 242 return new; 243 } 244 spin_unlock(&all_mddevs_lock); 245 246 new = kzalloc(sizeof(*new), GFP_KERNEL); 247 if (!new) 248 return NULL; 249 250 new->unit = unit; 251 if (MAJOR(unit) == MD_MAJOR) 252 new->md_minor = MINOR(unit); 253 else 254 new->md_minor = MINOR(unit) >> MdpMinorShift; 255 256 mutex_init(&new->reconfig_mutex); 257 INIT_LIST_HEAD(&new->disks); 258 INIT_LIST_HEAD(&new->all_mddevs); 259 init_timer(&new->safemode_timer); 260 atomic_set(&new->active, 1); 261 spin_lock_init(&new->write_lock); 262 init_waitqueue_head(&new->sb_wait); 263 264 new->queue = blk_alloc_queue(GFP_KERNEL); 265 if (!new->queue) { 266 kfree(new); 267 return NULL; 268 } 269 set_bit(QUEUE_FLAG_CLUSTER, &new->queue->queue_flags); 270 271 blk_queue_make_request(new->queue, md_fail_request); 272 273 goto retry; 274 } 275 276 static inline int mddev_lock(mddev_t * mddev) 277 { 278 return mutex_lock_interruptible(&mddev->reconfig_mutex); 279 } 280 281 static inline void mddev_lock_uninterruptible(mddev_t * mddev) 282 { 283 mutex_lock(&mddev->reconfig_mutex); 284 } 285 286 static inline int mddev_trylock(mddev_t * mddev) 287 { 288 return mutex_trylock(&mddev->reconfig_mutex); 289 } 290 291 static inline void mddev_unlock(mddev_t * mddev) 292 { 293 mutex_unlock(&mddev->reconfig_mutex); 294 295 md_wakeup_thread(mddev->thread); 296 } 297 298 static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr) 299 { 300 mdk_rdev_t * rdev; 301 struct list_head *tmp; 302 303 ITERATE_RDEV(mddev,rdev,tmp) { 304 if (rdev->desc_nr == nr) 305 return rdev; 306 } 307 return NULL; 308 } 309 310 static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev) 311 { 312 struct list_head *tmp; 313 mdk_rdev_t *rdev; 314 315 ITERATE_RDEV(mddev,rdev,tmp) { 316 if (rdev->bdev->bd_dev == dev) 317 return rdev; 318 } 319 return NULL; 320 } 321 322 static struct mdk_personality *find_pers(int level, char *clevel) 323 { 324 struct mdk_personality *pers; 325 list_for_each_entry(pers, &pers_list, list) { 326 if (level != LEVEL_NONE && pers->level == level) 327 return pers; 328 if (strcmp(pers->name, clevel)==0) 329 return pers; 330 } 331 return NULL; 332 } 333 334 static inline sector_t calc_dev_sboffset(struct block_device *bdev) 335 { 336 sector_t size = bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; 337 return MD_NEW_SIZE_BLOCKS(size); 338 } 339 340 static sector_t calc_dev_size(mdk_rdev_t *rdev, unsigned chunk_size) 341 { 342 sector_t size; 343 344 size = rdev->sb_offset; 345 346 if (chunk_size) 347 size &= ~((sector_t)chunk_size/1024 - 1); 348 return size; 349 } 350 351 static int alloc_disk_sb(mdk_rdev_t * rdev) 352 { 353 if (rdev->sb_page) 354 MD_BUG(); 355 356 rdev->sb_page = alloc_page(GFP_KERNEL); 357 if (!rdev->sb_page) { 358 printk(KERN_ALERT "md: out of memory.\n"); 359 return -EINVAL; 360 } 361 362 return 0; 363 } 364 365 static void free_disk_sb(mdk_rdev_t * rdev) 366 { 367 if (rdev->sb_page) { 368 put_page(rdev->sb_page); 369 rdev->sb_loaded = 0; 370 rdev->sb_page = NULL; 371 rdev->sb_offset = 0; 372 rdev->size = 0; 373 } 374 } 375 376 377 static int super_written(struct bio *bio, unsigned int bytes_done, int error) 378 { 379 mdk_rdev_t *rdev = bio->bi_private; 380 mddev_t *mddev = rdev->mddev; 381 if (bio->bi_size) 382 return 1; 383 384 if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) 385 md_error(mddev, rdev); 386 387 if (atomic_dec_and_test(&mddev->pending_writes)) 388 wake_up(&mddev->sb_wait); 389 bio_put(bio); 390 return 0; 391 } 392 393 static int super_written_barrier(struct bio *bio, unsigned int bytes_done, int error) 394 { 395 struct bio *bio2 = bio->bi_private; 396 mdk_rdev_t *rdev = bio2->bi_private; 397 mddev_t *mddev = rdev->mddev; 398 if (bio->bi_size) 399 return 1; 400 401 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && 402 error == -EOPNOTSUPP) { 403 unsigned long flags; 404 /* barriers don't appear to be supported :-( */ 405 set_bit(BarriersNotsupp, &rdev->flags); 406 mddev->barriers_work = 0; 407 spin_lock_irqsave(&mddev->write_lock, flags); 408 bio2->bi_next = mddev->biolist; 409 mddev->biolist = bio2; 410 spin_unlock_irqrestore(&mddev->write_lock, flags); 411 wake_up(&mddev->sb_wait); 412 bio_put(bio); 413 return 0; 414 } 415 bio_put(bio2); 416 bio->bi_private = rdev; 417 return super_written(bio, bytes_done, error); 418 } 419 420 void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, 421 sector_t sector, int size, struct page *page) 422 { 423 /* write first size bytes of page to sector of rdev 424 * Increment mddev->pending_writes before returning 425 * and decrement it on completion, waking up sb_wait 426 * if zero is reached. 427 * If an error occurred, call md_error 428 * 429 * As we might need to resubmit the request if BIO_RW_BARRIER 430 * causes ENOTSUPP, we allocate a spare bio... 431 */ 432 struct bio *bio = bio_alloc(GFP_NOIO, 1); 433 int rw = (1<<BIO_RW) | (1<<BIO_RW_SYNC); 434 435 bio->bi_bdev = rdev->bdev; 436 bio->bi_sector = sector; 437 bio_add_page(bio, page, size, 0); 438 bio->bi_private = rdev; 439 bio->bi_end_io = super_written; 440 bio->bi_rw = rw; 441 442 atomic_inc(&mddev->pending_writes); 443 if (!test_bit(BarriersNotsupp, &rdev->flags)) { 444 struct bio *rbio; 445 rw |= (1<<BIO_RW_BARRIER); 446 rbio = bio_clone(bio, GFP_NOIO); 447 rbio->bi_private = bio; 448 rbio->bi_end_io = super_written_barrier; 449 submit_bio(rw, rbio); 450 } else 451 submit_bio(rw, bio); 452 } 453 454 void md_super_wait(mddev_t *mddev) 455 { 456 /* wait for all superblock writes that were scheduled to complete. 457 * if any had to be retried (due to BARRIER problems), retry them 458 */ 459 DEFINE_WAIT(wq); 460 for(;;) { 461 prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE); 462 if (atomic_read(&mddev->pending_writes)==0) 463 break; 464 while (mddev->biolist) { 465 struct bio *bio; 466 spin_lock_irq(&mddev->write_lock); 467 bio = mddev->biolist; 468 mddev->biolist = bio->bi_next ; 469 bio->bi_next = NULL; 470 spin_unlock_irq(&mddev->write_lock); 471 submit_bio(bio->bi_rw, bio); 472 } 473 schedule(); 474 } 475 finish_wait(&mddev->sb_wait, &wq); 476 } 477 478 static int bi_complete(struct bio *bio, unsigned int bytes_done, int error) 479 { 480 if (bio->bi_size) 481 return 1; 482 483 complete((struct completion*)bio->bi_private); 484 return 0; 485 } 486 487 int sync_page_io(struct block_device *bdev, sector_t sector, int size, 488 struct page *page, int rw) 489 { 490 struct bio *bio = bio_alloc(GFP_NOIO, 1); 491 struct completion event; 492 int ret; 493 494 rw |= (1 << BIO_RW_SYNC); 495 496 bio->bi_bdev = bdev; 497 bio->bi_sector = sector; 498 bio_add_page(bio, page, size, 0); 499 init_completion(&event); 500 bio->bi_private = &event; 501 bio->bi_end_io = bi_complete; 502 submit_bio(rw, bio); 503 wait_for_completion(&event); 504 505 ret = test_bit(BIO_UPTODATE, &bio->bi_flags); 506 bio_put(bio); 507 return ret; 508 } 509 EXPORT_SYMBOL_GPL(sync_page_io); 510 511 static int read_disk_sb(mdk_rdev_t * rdev, int size) 512 { 513 char b[BDEVNAME_SIZE]; 514 if (!rdev->sb_page) { 515 MD_BUG(); 516 return -EINVAL; 517 } 518 if (rdev->sb_loaded) 519 return 0; 520 521 522 if (!sync_page_io(rdev->bdev, rdev->sb_offset<<1, size, rdev->sb_page, READ)) 523 goto fail; 524 rdev->sb_loaded = 1; 525 return 0; 526 527 fail: 528 printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n", 529 bdevname(rdev->bdev,b)); 530 return -EINVAL; 531 } 532 533 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2) 534 { 535 if ( (sb1->set_uuid0 == sb2->set_uuid0) && 536 (sb1->set_uuid1 == sb2->set_uuid1) && 537 (sb1->set_uuid2 == sb2->set_uuid2) && 538 (sb1->set_uuid3 == sb2->set_uuid3)) 539 540 return 1; 541 542 return 0; 543 } 544 545 546 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2) 547 { 548 int ret; 549 mdp_super_t *tmp1, *tmp2; 550 551 tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL); 552 tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL); 553 554 if (!tmp1 || !tmp2) { 555 ret = 0; 556 printk(KERN_INFO "md.c: sb1 is not equal to sb2!\n"); 557 goto abort; 558 } 559 560 *tmp1 = *sb1; 561 *tmp2 = *sb2; 562 563 /* 564 * nr_disks is not constant 565 */ 566 tmp1->nr_disks = 0; 567 tmp2->nr_disks = 0; 568 569 if (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4)) 570 ret = 0; 571 else 572 ret = 1; 573 574 abort: 575 kfree(tmp1); 576 kfree(tmp2); 577 return ret; 578 } 579 580 static unsigned int calc_sb_csum(mdp_super_t * sb) 581 { 582 unsigned int disk_csum, csum; 583 584 disk_csum = sb->sb_csum; 585 sb->sb_csum = 0; 586 csum = csum_partial((void *)sb, MD_SB_BYTES, 0); 587 sb->sb_csum = disk_csum; 588 return csum; 589 } 590 591 592 /* 593 * Handle superblock details. 594 * We want to be able to handle multiple superblock formats 595 * so we have a common interface to them all, and an array of 596 * different handlers. 597 * We rely on user-space to write the initial superblock, and support 598 * reading and updating of superblocks. 599 * Interface methods are: 600 * int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version) 601 * loads and validates a superblock on dev. 602 * if refdev != NULL, compare superblocks on both devices 603 * Return: 604 * 0 - dev has a superblock that is compatible with refdev 605 * 1 - dev has a superblock that is compatible and newer than refdev 606 * so dev should be used as the refdev in future 607 * -EINVAL superblock incompatible or invalid 608 * -othererror e.g. -EIO 609 * 610 * int validate_super(mddev_t *mddev, mdk_rdev_t *dev) 611 * Verify that dev is acceptable into mddev. 612 * The first time, mddev->raid_disks will be 0, and data from 613 * dev should be merged in. Subsequent calls check that dev 614 * is new enough. Return 0 or -EINVAL 615 * 616 * void sync_super(mddev_t *mddev, mdk_rdev_t *dev) 617 * Update the superblock for rdev with data in mddev 618 * This does not write to disc. 619 * 620 */ 621 622 struct super_type { 623 char *name; 624 struct module *owner; 625 int (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version); 626 int (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev); 627 void (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev); 628 }; 629 630 /* 631 * load_super for 0.90.0 632 */ 633 static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) 634 { 635 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 636 mdp_super_t *sb; 637 int ret; 638 sector_t sb_offset; 639 640 /* 641 * Calculate the position of the superblock, 642 * it's at the end of the disk. 643 * 644 * It also happens to be a multiple of 4Kb. 645 */ 646 sb_offset = calc_dev_sboffset(rdev->bdev); 647 rdev->sb_offset = sb_offset; 648 649 ret = read_disk_sb(rdev, MD_SB_BYTES); 650 if (ret) return ret; 651 652 ret = -EINVAL; 653 654 bdevname(rdev->bdev, b); 655 sb = (mdp_super_t*)page_address(rdev->sb_page); 656 657 if (sb->md_magic != MD_SB_MAGIC) { 658 printk(KERN_ERR "md: invalid raid superblock magic on %s\n", 659 b); 660 goto abort; 661 } 662 663 if (sb->major_version != 0 || 664 sb->minor_version < 90 || 665 sb->minor_version > 91) { 666 printk(KERN_WARNING "Bad version number %d.%d on %s\n", 667 sb->major_version, sb->minor_version, 668 b); 669 goto abort; 670 } 671 672 if (sb->raid_disks <= 0) 673 goto abort; 674 675 if (csum_fold(calc_sb_csum(sb)) != csum_fold(sb->sb_csum)) { 676 printk(KERN_WARNING "md: invalid superblock checksum on %s\n", 677 b); 678 goto abort; 679 } 680 681 rdev->preferred_minor = sb->md_minor; 682 rdev->data_offset = 0; 683 rdev->sb_size = MD_SB_BYTES; 684 685 if (sb->level == LEVEL_MULTIPATH) 686 rdev->desc_nr = -1; 687 else 688 rdev->desc_nr = sb->this_disk.number; 689 690 if (refdev == 0) 691 ret = 1; 692 else { 693 __u64 ev1, ev2; 694 mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page); 695 if (!uuid_equal(refsb, sb)) { 696 printk(KERN_WARNING "md: %s has different UUID to %s\n", 697 b, bdevname(refdev->bdev,b2)); 698 goto abort; 699 } 700 if (!sb_equal(refsb, sb)) { 701 printk(KERN_WARNING "md: %s has same UUID" 702 " but different superblock to %s\n", 703 b, bdevname(refdev->bdev, b2)); 704 goto abort; 705 } 706 ev1 = md_event(sb); 707 ev2 = md_event(refsb); 708 if (ev1 > ev2) 709 ret = 1; 710 else 711 ret = 0; 712 } 713 rdev->size = calc_dev_size(rdev, sb->chunk_size); 714 715 if (rdev->size < sb->size && sb->level > 1) 716 /* "this cannot possibly happen" ... */ 717 ret = -EINVAL; 718 719 abort: 720 return ret; 721 } 722 723 /* 724 * validate_super for 0.90.0 725 */ 726 static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev) 727 { 728 mdp_disk_t *desc; 729 mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page); 730 731 rdev->raid_disk = -1; 732 rdev->flags = 0; 733 if (mddev->raid_disks == 0) { 734 mddev->major_version = 0; 735 mddev->minor_version = sb->minor_version; 736 mddev->patch_version = sb->patch_version; 737 mddev->persistent = ! sb->not_persistent; 738 mddev->chunk_size = sb->chunk_size; 739 mddev->ctime = sb->ctime; 740 mddev->utime = sb->utime; 741 mddev->level = sb->level; 742 mddev->clevel[0] = 0; 743 mddev->layout = sb->layout; 744 mddev->raid_disks = sb->raid_disks; 745 mddev->size = sb->size; 746 mddev->events = md_event(sb); 747 mddev->bitmap_offset = 0; 748 mddev->default_bitmap_offset = MD_SB_BYTES >> 9; 749 750 if (mddev->minor_version >= 91) { 751 mddev->reshape_position = sb->reshape_position; 752 mddev->delta_disks = sb->delta_disks; 753 mddev->new_level = sb->new_level; 754 mddev->new_layout = sb->new_layout; 755 mddev->new_chunk = sb->new_chunk; 756 } else { 757 mddev->reshape_position = MaxSector; 758 mddev->delta_disks = 0; 759 mddev->new_level = mddev->level; 760 mddev->new_layout = mddev->layout; 761 mddev->new_chunk = mddev->chunk_size; 762 } 763 764 if (sb->state & (1<<MD_SB_CLEAN)) 765 mddev->recovery_cp = MaxSector; 766 else { 767 if (sb->events_hi == sb->cp_events_hi && 768 sb->events_lo == sb->cp_events_lo) { 769 mddev->recovery_cp = sb->recovery_cp; 770 } else 771 mddev->recovery_cp = 0; 772 } 773 774 memcpy(mddev->uuid+0, &sb->set_uuid0, 4); 775 memcpy(mddev->uuid+4, &sb->set_uuid1, 4); 776 memcpy(mddev->uuid+8, &sb->set_uuid2, 4); 777 memcpy(mddev->uuid+12,&sb->set_uuid3, 4); 778 779 mddev->max_disks = MD_SB_DISKS; 780 781 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) && 782 mddev->bitmap_file == NULL) { 783 if (mddev->level != 1 && mddev->level != 4 784 && mddev->level != 5 && mddev->level != 6 785 && mddev->level != 10) { 786 /* FIXME use a better test */ 787 printk(KERN_WARNING "md: bitmaps not supported for this level.\n"); 788 return -EINVAL; 789 } 790 mddev->bitmap_offset = mddev->default_bitmap_offset; 791 } 792 793 } else if (mddev->pers == NULL) { 794 /* Insist on good event counter while assembling */ 795 __u64 ev1 = md_event(sb); 796 ++ev1; 797 if (ev1 < mddev->events) 798 return -EINVAL; 799 } else if (mddev->bitmap) { 800 /* if adding to array with a bitmap, then we can accept an 801 * older device ... but not too old. 802 */ 803 __u64 ev1 = md_event(sb); 804 if (ev1 < mddev->bitmap->events_cleared) 805 return 0; 806 } else /* just a hot-add of a new device, leave raid_disk at -1 */ 807 return 0; 808 809 if (mddev->level != LEVEL_MULTIPATH) { 810 desc = sb->disks + rdev->desc_nr; 811 812 if (desc->state & (1<<MD_DISK_FAULTY)) 813 set_bit(Faulty, &rdev->flags); 814 else if (desc->state & (1<<MD_DISK_SYNC) && 815 desc->raid_disk < mddev->raid_disks) { 816 set_bit(In_sync, &rdev->flags); 817 rdev->raid_disk = desc->raid_disk; 818 } 819 if (desc->state & (1<<MD_DISK_WRITEMOSTLY)) 820 set_bit(WriteMostly, &rdev->flags); 821 } else /* MULTIPATH are always insync */ 822 set_bit(In_sync, &rdev->flags); 823 return 0; 824 } 825 826 /* 827 * sync_super for 0.90.0 828 */ 829 static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev) 830 { 831 mdp_super_t *sb; 832 struct list_head *tmp; 833 mdk_rdev_t *rdev2; 834 int next_spare = mddev->raid_disks; 835 836 837 /* make rdev->sb match mddev data.. 838 * 839 * 1/ zero out disks 840 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare); 841 * 3/ any empty disks < next_spare become removed 842 * 843 * disks[0] gets initialised to REMOVED because 844 * we cannot be sure from other fields if it has 845 * been initialised or not. 846 */ 847 int i; 848 int active=0, working=0,failed=0,spare=0,nr_disks=0; 849 850 rdev->sb_size = MD_SB_BYTES; 851 852 sb = (mdp_super_t*)page_address(rdev->sb_page); 853 854 memset(sb, 0, sizeof(*sb)); 855 856 sb->md_magic = MD_SB_MAGIC; 857 sb->major_version = mddev->major_version; 858 sb->patch_version = mddev->patch_version; 859 sb->gvalid_words = 0; /* ignored */ 860 memcpy(&sb->set_uuid0, mddev->uuid+0, 4); 861 memcpy(&sb->set_uuid1, mddev->uuid+4, 4); 862 memcpy(&sb->set_uuid2, mddev->uuid+8, 4); 863 memcpy(&sb->set_uuid3, mddev->uuid+12,4); 864 865 sb->ctime = mddev->ctime; 866 sb->level = mddev->level; 867 sb->size = mddev->size; 868 sb->raid_disks = mddev->raid_disks; 869 sb->md_minor = mddev->md_minor; 870 sb->not_persistent = !mddev->persistent; 871 sb->utime = mddev->utime; 872 sb->state = 0; 873 sb->events_hi = (mddev->events>>32); 874 sb->events_lo = (u32)mddev->events; 875 876 if (mddev->reshape_position == MaxSector) 877 sb->minor_version = 90; 878 else { 879 sb->minor_version = 91; 880 sb->reshape_position = mddev->reshape_position; 881 sb->new_level = mddev->new_level; 882 sb->delta_disks = mddev->delta_disks; 883 sb->new_layout = mddev->new_layout; 884 sb->new_chunk = mddev->new_chunk; 885 } 886 mddev->minor_version = sb->minor_version; 887 if (mddev->in_sync) 888 { 889 sb->recovery_cp = mddev->recovery_cp; 890 sb->cp_events_hi = (mddev->events>>32); 891 sb->cp_events_lo = (u32)mddev->events; 892 if (mddev->recovery_cp == MaxSector) 893 sb->state = (1<< MD_SB_CLEAN); 894 } else 895 sb->recovery_cp = 0; 896 897 sb->layout = mddev->layout; 898 sb->chunk_size = mddev->chunk_size; 899 900 if (mddev->bitmap && mddev->bitmap_file == NULL) 901 sb->state |= (1<<MD_SB_BITMAP_PRESENT); 902 903 sb->disks[0].state = (1<<MD_DISK_REMOVED); 904 ITERATE_RDEV(mddev,rdev2,tmp) { 905 mdp_disk_t *d; 906 int desc_nr; 907 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags) 908 && !test_bit(Faulty, &rdev2->flags)) 909 desc_nr = rdev2->raid_disk; 910 else 911 desc_nr = next_spare++; 912 rdev2->desc_nr = desc_nr; 913 d = &sb->disks[rdev2->desc_nr]; 914 nr_disks++; 915 d->number = rdev2->desc_nr; 916 d->major = MAJOR(rdev2->bdev->bd_dev); 917 d->minor = MINOR(rdev2->bdev->bd_dev); 918 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags) 919 && !test_bit(Faulty, &rdev2->flags)) 920 d->raid_disk = rdev2->raid_disk; 921 else 922 d->raid_disk = rdev2->desc_nr; /* compatibility */ 923 if (test_bit(Faulty, &rdev2->flags)) 924 d->state = (1<<MD_DISK_FAULTY); 925 else if (test_bit(In_sync, &rdev2->flags)) { 926 d->state = (1<<MD_DISK_ACTIVE); 927 d->state |= (1<<MD_DISK_SYNC); 928 active++; 929 working++; 930 } else { 931 d->state = 0; 932 spare++; 933 working++; 934 } 935 if (test_bit(WriteMostly, &rdev2->flags)) 936 d->state |= (1<<MD_DISK_WRITEMOSTLY); 937 } 938 /* now set the "removed" and "faulty" bits on any missing devices */ 939 for (i=0 ; i < mddev->raid_disks ; i++) { 940 mdp_disk_t *d = &sb->disks[i]; 941 if (d->state == 0 && d->number == 0) { 942 d->number = i; 943 d->raid_disk = i; 944 d->state = (1<<MD_DISK_REMOVED); 945 d->state |= (1<<MD_DISK_FAULTY); 946 failed++; 947 } 948 } 949 sb->nr_disks = nr_disks; 950 sb->active_disks = active; 951 sb->working_disks = working; 952 sb->failed_disks = failed; 953 sb->spare_disks = spare; 954 955 sb->this_disk = sb->disks[rdev->desc_nr]; 956 sb->sb_csum = calc_sb_csum(sb); 957 } 958 959 /* 960 * version 1 superblock 961 */ 962 963 static unsigned int calc_sb_1_csum(struct mdp_superblock_1 * sb) 964 { 965 unsigned int disk_csum, csum; 966 unsigned long long newcsum; 967 int size = 256 + le32_to_cpu(sb->max_dev)*2; 968 unsigned int *isuper = (unsigned int*)sb; 969 int i; 970 971 disk_csum = sb->sb_csum; 972 sb->sb_csum = 0; 973 newcsum = 0; 974 for (i=0; size>=4; size -= 4 ) 975 newcsum += le32_to_cpu(*isuper++); 976 977 if (size == 2) 978 newcsum += le16_to_cpu(*(unsigned short*) isuper); 979 980 csum = (newcsum & 0xffffffff) + (newcsum >> 32); 981 sb->sb_csum = disk_csum; 982 return cpu_to_le32(csum); 983 } 984 985 static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) 986 { 987 struct mdp_superblock_1 *sb; 988 int ret; 989 sector_t sb_offset; 990 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 991 int bmask; 992 993 /* 994 * Calculate the position of the superblock. 995 * It is always aligned to a 4K boundary and 996 * depeding on minor_version, it can be: 997 * 0: At least 8K, but less than 12K, from end of device 998 * 1: At start of device 999 * 2: 4K from start of device. 1000 */ 1001 switch(minor_version) { 1002 case 0: 1003 sb_offset = rdev->bdev->bd_inode->i_size >> 9; 1004 sb_offset -= 8*2; 1005 sb_offset &= ~(sector_t)(4*2-1); 1006 /* convert from sectors to K */ 1007 sb_offset /= 2; 1008 break; 1009 case 1: 1010 sb_offset = 0; 1011 break; 1012 case 2: 1013 sb_offset = 4; 1014 break; 1015 default: 1016 return -EINVAL; 1017 } 1018 rdev->sb_offset = sb_offset; 1019 1020 /* superblock is rarely larger than 1K, but it can be larger, 1021 * and it is safe to read 4k, so we do that 1022 */ 1023 ret = read_disk_sb(rdev, 4096); 1024 if (ret) return ret; 1025 1026 1027 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page); 1028 1029 if (sb->magic != cpu_to_le32(MD_SB_MAGIC) || 1030 sb->major_version != cpu_to_le32(1) || 1031 le32_to_cpu(sb->max_dev) > (4096-256)/2 || 1032 le64_to_cpu(sb->super_offset) != (rdev->sb_offset<<1) || 1033 (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0) 1034 return -EINVAL; 1035 1036 if (calc_sb_1_csum(sb) != sb->sb_csum) { 1037 printk("md: invalid superblock checksum on %s\n", 1038 bdevname(rdev->bdev,b)); 1039 return -EINVAL; 1040 } 1041 if (le64_to_cpu(sb->data_size) < 10) { 1042 printk("md: data_size too small on %s\n", 1043 bdevname(rdev->bdev,b)); 1044 return -EINVAL; 1045 } 1046 rdev->preferred_minor = 0xffff; 1047 rdev->data_offset = le64_to_cpu(sb->data_offset); 1048 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); 1049 1050 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; 1051 bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1; 1052 if (rdev->sb_size & bmask) 1053 rdev-> sb_size = (rdev->sb_size | bmask)+1; 1054 1055 if (refdev == 0) 1056 ret = 1; 1057 else { 1058 __u64 ev1, ev2; 1059 struct mdp_superblock_1 *refsb = 1060 (struct mdp_superblock_1*)page_address(refdev->sb_page); 1061 1062 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 || 1063 sb->level != refsb->level || 1064 sb->layout != refsb->layout || 1065 sb->chunksize != refsb->chunksize) { 1066 printk(KERN_WARNING "md: %s has strangely different" 1067 " superblock to %s\n", 1068 bdevname(rdev->bdev,b), 1069 bdevname(refdev->bdev,b2)); 1070 return -EINVAL; 1071 } 1072 ev1 = le64_to_cpu(sb->events); 1073 ev2 = le64_to_cpu(refsb->events); 1074 1075 if (ev1 > ev2) 1076 ret = 1; 1077 else 1078 ret = 0; 1079 } 1080 if (minor_version) 1081 rdev->size = ((rdev->bdev->bd_inode->i_size>>9) - le64_to_cpu(sb->data_offset)) / 2; 1082 else 1083 rdev->size = rdev->sb_offset; 1084 if (rdev->size < le64_to_cpu(sb->data_size)/2) 1085 return -EINVAL; 1086 rdev->size = le64_to_cpu(sb->data_size)/2; 1087 if (le32_to_cpu(sb->chunksize)) 1088 rdev->size &= ~((sector_t)le32_to_cpu(sb->chunksize)/2 - 1); 1089 1090 if (le32_to_cpu(sb->size) > rdev->size*2) 1091 return -EINVAL; 1092 return ret; 1093 } 1094 1095 static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev) 1096 { 1097 struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page); 1098 1099 rdev->raid_disk = -1; 1100 rdev->flags = 0; 1101 if (mddev->raid_disks == 0) { 1102 mddev->major_version = 1; 1103 mddev->patch_version = 0; 1104 mddev->persistent = 1; 1105 mddev->chunk_size = le32_to_cpu(sb->chunksize) << 9; 1106 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1); 1107 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1); 1108 mddev->level = le32_to_cpu(sb->level); 1109 mddev->clevel[0] = 0; 1110 mddev->layout = le32_to_cpu(sb->layout); 1111 mddev->raid_disks = le32_to_cpu(sb->raid_disks); 1112 mddev->size = le64_to_cpu(sb->size)/2; 1113 mddev->events = le64_to_cpu(sb->events); 1114 mddev->bitmap_offset = 0; 1115 mddev->default_bitmap_offset = 1024 >> 9; 1116 1117 mddev->recovery_cp = le64_to_cpu(sb->resync_offset); 1118 memcpy(mddev->uuid, sb->set_uuid, 16); 1119 1120 mddev->max_disks = (4096-256)/2; 1121 1122 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) && 1123 mddev->bitmap_file == NULL ) { 1124 if (mddev->level != 1 && mddev->level != 5 && mddev->level != 6 1125 && mddev->level != 10) { 1126 printk(KERN_WARNING "md: bitmaps not supported for this level.\n"); 1127 return -EINVAL; 1128 } 1129 mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset); 1130 } 1131 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) { 1132 mddev->reshape_position = le64_to_cpu(sb->reshape_position); 1133 mddev->delta_disks = le32_to_cpu(sb->delta_disks); 1134 mddev->new_level = le32_to_cpu(sb->new_level); 1135 mddev->new_layout = le32_to_cpu(sb->new_layout); 1136 mddev->new_chunk = le32_to_cpu(sb->new_chunk)<<9; 1137 } else { 1138 mddev->reshape_position = MaxSector; 1139 mddev->delta_disks = 0; 1140 mddev->new_level = mddev->level; 1141 mddev->new_layout = mddev->layout; 1142 mddev->new_chunk = mddev->chunk_size; 1143 } 1144 1145 } else if (mddev->pers == NULL) { 1146 /* Insist of good event counter while assembling */ 1147 __u64 ev1 = le64_to_cpu(sb->events); 1148 ++ev1; 1149 if (ev1 < mddev->events) 1150 return -EINVAL; 1151 } else if (mddev->bitmap) { 1152 /* If adding to array with a bitmap, then we can accept an 1153 * older device, but not too old. 1154 */ 1155 __u64 ev1 = le64_to_cpu(sb->events); 1156 if (ev1 < mddev->bitmap->events_cleared) 1157 return 0; 1158 } else /* just a hot-add of a new device, leave raid_disk at -1 */ 1159 return 0; 1160 1161 if (mddev->level != LEVEL_MULTIPATH) { 1162 int role; 1163 rdev->desc_nr = le32_to_cpu(sb->dev_number); 1164 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]); 1165 switch(role) { 1166 case 0xffff: /* spare */ 1167 break; 1168 case 0xfffe: /* faulty */ 1169 set_bit(Faulty, &rdev->flags); 1170 break; 1171 default: 1172 set_bit(In_sync, &rdev->flags); 1173 rdev->raid_disk = role; 1174 break; 1175 } 1176 if (sb->devflags & WriteMostly1) 1177 set_bit(WriteMostly, &rdev->flags); 1178 } else /* MULTIPATH are always insync */ 1179 set_bit(In_sync, &rdev->flags); 1180 1181 return 0; 1182 } 1183 1184 static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev) 1185 { 1186 struct mdp_superblock_1 *sb; 1187 struct list_head *tmp; 1188 mdk_rdev_t *rdev2; 1189 int max_dev, i; 1190 /* make rdev->sb match mddev and rdev data. */ 1191 1192 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page); 1193 1194 sb->feature_map = 0; 1195 sb->pad0 = 0; 1196 memset(sb->pad1, 0, sizeof(sb->pad1)); 1197 memset(sb->pad2, 0, sizeof(sb->pad2)); 1198 memset(sb->pad3, 0, sizeof(sb->pad3)); 1199 1200 sb->utime = cpu_to_le64((__u64)mddev->utime); 1201 sb->events = cpu_to_le64(mddev->events); 1202 if (mddev->in_sync) 1203 sb->resync_offset = cpu_to_le64(mddev->recovery_cp); 1204 else 1205 sb->resync_offset = cpu_to_le64(0); 1206 1207 sb->cnt_corrected_read = atomic_read(&rdev->corrected_errors); 1208 1209 sb->raid_disks = cpu_to_le32(mddev->raid_disks); 1210 sb->size = cpu_to_le64(mddev->size<<1); 1211 1212 if (mddev->bitmap && mddev->bitmap_file == NULL) { 1213 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset); 1214 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET); 1215 } 1216 if (mddev->reshape_position != MaxSector) { 1217 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE); 1218 sb->reshape_position = cpu_to_le64(mddev->reshape_position); 1219 sb->new_layout = cpu_to_le32(mddev->new_layout); 1220 sb->delta_disks = cpu_to_le32(mddev->delta_disks); 1221 sb->new_level = cpu_to_le32(mddev->new_level); 1222 sb->new_chunk = cpu_to_le32(mddev->new_chunk>>9); 1223 } 1224 1225 max_dev = 0; 1226 ITERATE_RDEV(mddev,rdev2,tmp) 1227 if (rdev2->desc_nr+1 > max_dev) 1228 max_dev = rdev2->desc_nr+1; 1229 1230 sb->max_dev = cpu_to_le32(max_dev); 1231 for (i=0; i<max_dev;i++) 1232 sb->dev_roles[i] = cpu_to_le16(0xfffe); 1233 1234 ITERATE_RDEV(mddev,rdev2,tmp) { 1235 i = rdev2->desc_nr; 1236 if (test_bit(Faulty, &rdev2->flags)) 1237 sb->dev_roles[i] = cpu_to_le16(0xfffe); 1238 else if (test_bit(In_sync, &rdev2->flags)) 1239 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); 1240 else 1241 sb->dev_roles[i] = cpu_to_le16(0xffff); 1242 } 1243 1244 sb->recovery_offset = cpu_to_le64(0); /* not supported yet */ 1245 sb->sb_csum = calc_sb_1_csum(sb); 1246 } 1247 1248 1249 static struct super_type super_types[] = { 1250 [0] = { 1251 .name = "0.90.0", 1252 .owner = THIS_MODULE, 1253 .load_super = super_90_load, 1254 .validate_super = super_90_validate, 1255 .sync_super = super_90_sync, 1256 }, 1257 [1] = { 1258 .name = "md-1", 1259 .owner = THIS_MODULE, 1260 .load_super = super_1_load, 1261 .validate_super = super_1_validate, 1262 .sync_super = super_1_sync, 1263 }, 1264 }; 1265 1266 static mdk_rdev_t * match_dev_unit(mddev_t *mddev, mdk_rdev_t *dev) 1267 { 1268 struct list_head *tmp; 1269 mdk_rdev_t *rdev; 1270 1271 ITERATE_RDEV(mddev,rdev,tmp) 1272 if (rdev->bdev->bd_contains == dev->bdev->bd_contains) 1273 return rdev; 1274 1275 return NULL; 1276 } 1277 1278 static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2) 1279 { 1280 struct list_head *tmp; 1281 mdk_rdev_t *rdev; 1282 1283 ITERATE_RDEV(mddev1,rdev,tmp) 1284 if (match_dev_unit(mddev2, rdev)) 1285 return 1; 1286 1287 return 0; 1288 } 1289 1290 static LIST_HEAD(pending_raid_disks); 1291 1292 static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev) 1293 { 1294 mdk_rdev_t *same_pdev; 1295 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 1296 struct kobject *ko; 1297 char *s; 1298 1299 if (rdev->mddev) { 1300 MD_BUG(); 1301 return -EINVAL; 1302 } 1303 /* make sure rdev->size exceeds mddev->size */ 1304 if (rdev->size && (mddev->size == 0 || rdev->size < mddev->size)) { 1305 if (mddev->pers) 1306 /* Cannot change size, so fail */ 1307 return -ENOSPC; 1308 else 1309 mddev->size = rdev->size; 1310 } 1311 same_pdev = match_dev_unit(mddev, rdev); 1312 if (same_pdev) 1313 printk(KERN_WARNING 1314 "%s: WARNING: %s appears to be on the same physical" 1315 " disk as %s. True\n protection against single-disk" 1316 " failure might be compromised.\n", 1317 mdname(mddev), bdevname(rdev->bdev,b), 1318 bdevname(same_pdev->bdev,b2)); 1319 1320 /* Verify rdev->desc_nr is unique. 1321 * If it is -1, assign a free number, else 1322 * check number is not in use 1323 */ 1324 if (rdev->desc_nr < 0) { 1325 int choice = 0; 1326 if (mddev->pers) choice = mddev->raid_disks; 1327 while (find_rdev_nr(mddev, choice)) 1328 choice++; 1329 rdev->desc_nr = choice; 1330 } else { 1331 if (find_rdev_nr(mddev, rdev->desc_nr)) 1332 return -EBUSY; 1333 } 1334 bdevname(rdev->bdev,b); 1335 if (kobject_set_name(&rdev->kobj, "dev-%s", b) < 0) 1336 return -ENOMEM; 1337 while ( (s=strchr(rdev->kobj.k_name, '/')) != NULL) 1338 *s = '!'; 1339 1340 list_add(&rdev->same_set, &mddev->disks); 1341 rdev->mddev = mddev; 1342 printk(KERN_INFO "md: bind<%s>\n", b); 1343 1344 rdev->kobj.parent = &mddev->kobj; 1345 kobject_add(&rdev->kobj); 1346 1347 if (rdev->bdev->bd_part) 1348 ko = &rdev->bdev->bd_part->kobj; 1349 else 1350 ko = &rdev->bdev->bd_disk->kobj; 1351 sysfs_create_link(&rdev->kobj, ko, "block"); 1352 bd_claim_by_disk(rdev->bdev, rdev, mddev->gendisk); 1353 return 0; 1354 } 1355 1356 static void unbind_rdev_from_array(mdk_rdev_t * rdev) 1357 { 1358 char b[BDEVNAME_SIZE]; 1359 if (!rdev->mddev) { 1360 MD_BUG(); 1361 return; 1362 } 1363 bd_release_from_disk(rdev->bdev, rdev->mddev->gendisk); 1364 list_del_init(&rdev->same_set); 1365 printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b)); 1366 rdev->mddev = NULL; 1367 sysfs_remove_link(&rdev->kobj, "block"); 1368 kobject_del(&rdev->kobj); 1369 } 1370 1371 /* 1372 * prevent the device from being mounted, repartitioned or 1373 * otherwise reused by a RAID array (or any other kernel 1374 * subsystem), by bd_claiming the device. 1375 */ 1376 static int lock_rdev(mdk_rdev_t *rdev, dev_t dev) 1377 { 1378 int err = 0; 1379 struct block_device *bdev; 1380 char b[BDEVNAME_SIZE]; 1381 1382 bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE); 1383 if (IS_ERR(bdev)) { 1384 printk(KERN_ERR "md: could not open %s.\n", 1385 __bdevname(dev, b)); 1386 return PTR_ERR(bdev); 1387 } 1388 err = bd_claim(bdev, rdev); 1389 if (err) { 1390 printk(KERN_ERR "md: could not bd_claim %s.\n", 1391 bdevname(bdev, b)); 1392 blkdev_put(bdev); 1393 return err; 1394 } 1395 rdev->bdev = bdev; 1396 return err; 1397 } 1398 1399 static void unlock_rdev(mdk_rdev_t *rdev) 1400 { 1401 struct block_device *bdev = rdev->bdev; 1402 rdev->bdev = NULL; 1403 if (!bdev) 1404 MD_BUG(); 1405 bd_release(bdev); 1406 blkdev_put(bdev); 1407 } 1408 1409 void md_autodetect_dev(dev_t dev); 1410 1411 static void export_rdev(mdk_rdev_t * rdev) 1412 { 1413 char b[BDEVNAME_SIZE]; 1414 printk(KERN_INFO "md: export_rdev(%s)\n", 1415 bdevname(rdev->bdev,b)); 1416 if (rdev->mddev) 1417 MD_BUG(); 1418 free_disk_sb(rdev); 1419 list_del_init(&rdev->same_set); 1420 #ifndef MODULE 1421 md_autodetect_dev(rdev->bdev->bd_dev); 1422 #endif 1423 unlock_rdev(rdev); 1424 kobject_put(&rdev->kobj); 1425 } 1426 1427 static void kick_rdev_from_array(mdk_rdev_t * rdev) 1428 { 1429 unbind_rdev_from_array(rdev); 1430 export_rdev(rdev); 1431 } 1432 1433 static void export_array(mddev_t *mddev) 1434 { 1435 struct list_head *tmp; 1436 mdk_rdev_t *rdev; 1437 1438 ITERATE_RDEV(mddev,rdev,tmp) { 1439 if (!rdev->mddev) { 1440 MD_BUG(); 1441 continue; 1442 } 1443 kick_rdev_from_array(rdev); 1444 } 1445 if (!list_empty(&mddev->disks)) 1446 MD_BUG(); 1447 mddev->raid_disks = 0; 1448 mddev->major_version = 0; 1449 } 1450 1451 static void print_desc(mdp_disk_t *desc) 1452 { 1453 printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number, 1454 desc->major,desc->minor,desc->raid_disk,desc->state); 1455 } 1456 1457 static void print_sb(mdp_super_t *sb) 1458 { 1459 int i; 1460 1461 printk(KERN_INFO 1462 "md: SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n", 1463 sb->major_version, sb->minor_version, sb->patch_version, 1464 sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3, 1465 sb->ctime); 1466 printk(KERN_INFO "md: L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n", 1467 sb->level, sb->size, sb->nr_disks, sb->raid_disks, 1468 sb->md_minor, sb->layout, sb->chunk_size); 1469 printk(KERN_INFO "md: UT:%08x ST:%d AD:%d WD:%d" 1470 " FD:%d SD:%d CSUM:%08x E:%08lx\n", 1471 sb->utime, sb->state, sb->active_disks, sb->working_disks, 1472 sb->failed_disks, sb->spare_disks, 1473 sb->sb_csum, (unsigned long)sb->events_lo); 1474 1475 printk(KERN_INFO); 1476 for (i = 0; i < MD_SB_DISKS; i++) { 1477 mdp_disk_t *desc; 1478 1479 desc = sb->disks + i; 1480 if (desc->number || desc->major || desc->minor || 1481 desc->raid_disk || (desc->state && (desc->state != 4))) { 1482 printk(" D %2d: ", i); 1483 print_desc(desc); 1484 } 1485 } 1486 printk(KERN_INFO "md: THIS: "); 1487 print_desc(&sb->this_disk); 1488 1489 } 1490 1491 static void print_rdev(mdk_rdev_t *rdev) 1492 { 1493 char b[BDEVNAME_SIZE]; 1494 printk(KERN_INFO "md: rdev %s, SZ:%08llu F:%d S:%d DN:%u\n", 1495 bdevname(rdev->bdev,b), (unsigned long long)rdev->size, 1496 test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags), 1497 rdev->desc_nr); 1498 if (rdev->sb_loaded) { 1499 printk(KERN_INFO "md: rdev superblock:\n"); 1500 print_sb((mdp_super_t*)page_address(rdev->sb_page)); 1501 } else 1502 printk(KERN_INFO "md: no rdev superblock!\n"); 1503 } 1504 1505 void md_print_devices(void) 1506 { 1507 struct list_head *tmp, *tmp2; 1508 mdk_rdev_t *rdev; 1509 mddev_t *mddev; 1510 char b[BDEVNAME_SIZE]; 1511 1512 printk("\n"); 1513 printk("md: **********************************\n"); 1514 printk("md: * <COMPLETE RAID STATE PRINTOUT> *\n"); 1515 printk("md: **********************************\n"); 1516 ITERATE_MDDEV(mddev,tmp) { 1517 1518 if (mddev->bitmap) 1519 bitmap_print_sb(mddev->bitmap); 1520 else 1521 printk("%s: ", mdname(mddev)); 1522 ITERATE_RDEV(mddev,rdev,tmp2) 1523 printk("<%s>", bdevname(rdev->bdev,b)); 1524 printk("\n"); 1525 1526 ITERATE_RDEV(mddev,rdev,tmp2) 1527 print_rdev(rdev); 1528 } 1529 printk("md: **********************************\n"); 1530 printk("\n"); 1531 } 1532 1533 1534 static void sync_sbs(mddev_t * mddev) 1535 { 1536 mdk_rdev_t *rdev; 1537 struct list_head *tmp; 1538 1539 ITERATE_RDEV(mddev,rdev,tmp) { 1540 super_types[mddev->major_version]. 1541 sync_super(mddev, rdev); 1542 rdev->sb_loaded = 1; 1543 } 1544 } 1545 1546 void md_update_sb(mddev_t * mddev) 1547 { 1548 int err; 1549 struct list_head *tmp; 1550 mdk_rdev_t *rdev; 1551 int sync_req; 1552 1553 repeat: 1554 spin_lock_irq(&mddev->write_lock); 1555 sync_req = mddev->in_sync; 1556 mddev->utime = get_seconds(); 1557 mddev->events ++; 1558 1559 if (!mddev->events) { 1560 /* 1561 * oops, this 64-bit counter should never wrap. 1562 * Either we are in around ~1 trillion A.C., assuming 1563 * 1 reboot per second, or we have a bug: 1564 */ 1565 MD_BUG(); 1566 mddev->events --; 1567 } 1568 mddev->sb_dirty = 2; 1569 sync_sbs(mddev); 1570 1571 /* 1572 * do not write anything to disk if using 1573 * nonpersistent superblocks 1574 */ 1575 if (!mddev->persistent) { 1576 mddev->sb_dirty = 0; 1577 spin_unlock_irq(&mddev->write_lock); 1578 wake_up(&mddev->sb_wait); 1579 return; 1580 } 1581 spin_unlock_irq(&mddev->write_lock); 1582 1583 dprintk(KERN_INFO 1584 "md: updating %s RAID superblock on device (in sync %d)\n", 1585 mdname(mddev),mddev->in_sync); 1586 1587 err = bitmap_update_sb(mddev->bitmap); 1588 ITERATE_RDEV(mddev,rdev,tmp) { 1589 char b[BDEVNAME_SIZE]; 1590 dprintk(KERN_INFO "md: "); 1591 if (test_bit(Faulty, &rdev->flags)) 1592 dprintk("(skipping faulty "); 1593 1594 dprintk("%s ", bdevname(rdev->bdev,b)); 1595 if (!test_bit(Faulty, &rdev->flags)) { 1596 md_super_write(mddev,rdev, 1597 rdev->sb_offset<<1, rdev->sb_size, 1598 rdev->sb_page); 1599 dprintk(KERN_INFO "(write) %s's sb offset: %llu\n", 1600 bdevname(rdev->bdev,b), 1601 (unsigned long long)rdev->sb_offset); 1602 1603 } else 1604 dprintk(")\n"); 1605 if (mddev->level == LEVEL_MULTIPATH) 1606 /* only need to write one superblock... */ 1607 break; 1608 } 1609 md_super_wait(mddev); 1610 /* if there was a failure, sb_dirty was set to 1, and we re-write super */ 1611 1612 spin_lock_irq(&mddev->write_lock); 1613 if (mddev->in_sync != sync_req|| mddev->sb_dirty == 1) { 1614 /* have to write it out again */ 1615 spin_unlock_irq(&mddev->write_lock); 1616 goto repeat; 1617 } 1618 mddev->sb_dirty = 0; 1619 spin_unlock_irq(&mddev->write_lock); 1620 wake_up(&mddev->sb_wait); 1621 1622 } 1623 EXPORT_SYMBOL_GPL(md_update_sb); 1624 1625 /* words written to sysfs files may, or my not, be \n terminated. 1626 * We want to accept with case. For this we use cmd_match. 1627 */ 1628 static int cmd_match(const char *cmd, const char *str) 1629 { 1630 /* See if cmd, written into a sysfs file, matches 1631 * str. They must either be the same, or cmd can 1632 * have a trailing newline 1633 */ 1634 while (*cmd && *str && *cmd == *str) { 1635 cmd++; 1636 str++; 1637 } 1638 if (*cmd == '\n') 1639 cmd++; 1640 if (*str || *cmd) 1641 return 0; 1642 return 1; 1643 } 1644 1645 struct rdev_sysfs_entry { 1646 struct attribute attr; 1647 ssize_t (*show)(mdk_rdev_t *, char *); 1648 ssize_t (*store)(mdk_rdev_t *, const char *, size_t); 1649 }; 1650 1651 static ssize_t 1652 state_show(mdk_rdev_t *rdev, char *page) 1653 { 1654 char *sep = ""; 1655 int len=0; 1656 1657 if (test_bit(Faulty, &rdev->flags)) { 1658 len+= sprintf(page+len, "%sfaulty",sep); 1659 sep = ","; 1660 } 1661 if (test_bit(In_sync, &rdev->flags)) { 1662 len += sprintf(page+len, "%sin_sync",sep); 1663 sep = ","; 1664 } 1665 if (!test_bit(Faulty, &rdev->flags) && 1666 !test_bit(In_sync, &rdev->flags)) { 1667 len += sprintf(page+len, "%sspare", sep); 1668 sep = ","; 1669 } 1670 return len+sprintf(page+len, "\n"); 1671 } 1672 1673 static struct rdev_sysfs_entry 1674 rdev_state = __ATTR_RO(state); 1675 1676 static ssize_t 1677 super_show(mdk_rdev_t *rdev, char *page) 1678 { 1679 if (rdev->sb_loaded && rdev->sb_size) { 1680 memcpy(page, page_address(rdev->sb_page), rdev->sb_size); 1681 return rdev->sb_size; 1682 } else 1683 return 0; 1684 } 1685 static struct rdev_sysfs_entry rdev_super = __ATTR_RO(super); 1686 1687 static ssize_t 1688 errors_show(mdk_rdev_t *rdev, char *page) 1689 { 1690 return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors)); 1691 } 1692 1693 static ssize_t 1694 errors_store(mdk_rdev_t *rdev, const char *buf, size_t len) 1695 { 1696 char *e; 1697 unsigned long n = simple_strtoul(buf, &e, 10); 1698 if (*buf && (*e == 0 || *e == '\n')) { 1699 atomic_set(&rdev->corrected_errors, n); 1700 return len; 1701 } 1702 return -EINVAL; 1703 } 1704 static struct rdev_sysfs_entry rdev_errors = 1705 __ATTR(errors, 0644, errors_show, errors_store); 1706 1707 static ssize_t 1708 slot_show(mdk_rdev_t *rdev, char *page) 1709 { 1710 if (rdev->raid_disk < 0) 1711 return sprintf(page, "none\n"); 1712 else 1713 return sprintf(page, "%d\n", rdev->raid_disk); 1714 } 1715 1716 static ssize_t 1717 slot_store(mdk_rdev_t *rdev, const char *buf, size_t len) 1718 { 1719 char *e; 1720 int slot = simple_strtoul(buf, &e, 10); 1721 if (strncmp(buf, "none", 4)==0) 1722 slot = -1; 1723 else if (e==buf || (*e && *e!= '\n')) 1724 return -EINVAL; 1725 if (rdev->mddev->pers) 1726 /* Cannot set slot in active array (yet) */ 1727 return -EBUSY; 1728 if (slot >= rdev->mddev->raid_disks) 1729 return -ENOSPC; 1730 rdev->raid_disk = slot; 1731 /* assume it is working */ 1732 rdev->flags = 0; 1733 set_bit(In_sync, &rdev->flags); 1734 return len; 1735 } 1736 1737 1738 static struct rdev_sysfs_entry rdev_slot = 1739 __ATTR(slot, 0644, slot_show, slot_store); 1740 1741 static ssize_t 1742 offset_show(mdk_rdev_t *rdev, char *page) 1743 { 1744 return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset); 1745 } 1746 1747 static ssize_t 1748 offset_store(mdk_rdev_t *rdev, const char *buf, size_t len) 1749 { 1750 char *e; 1751 unsigned long long offset = simple_strtoull(buf, &e, 10); 1752 if (e==buf || (*e && *e != '\n')) 1753 return -EINVAL; 1754 if (rdev->mddev->pers) 1755 return -EBUSY; 1756 rdev->data_offset = offset; 1757 return len; 1758 } 1759 1760 static struct rdev_sysfs_entry rdev_offset = 1761 __ATTR(offset, 0644, offset_show, offset_store); 1762 1763 static ssize_t 1764 rdev_size_show(mdk_rdev_t *rdev, char *page) 1765 { 1766 return sprintf(page, "%llu\n", (unsigned long long)rdev->size); 1767 } 1768 1769 static ssize_t 1770 rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len) 1771 { 1772 char *e; 1773 unsigned long long size = simple_strtoull(buf, &e, 10); 1774 if (e==buf || (*e && *e != '\n')) 1775 return -EINVAL; 1776 if (rdev->mddev->pers) 1777 return -EBUSY; 1778 rdev->size = size; 1779 if (size < rdev->mddev->size || rdev->mddev->size == 0) 1780 rdev->mddev->size = size; 1781 return len; 1782 } 1783 1784 static struct rdev_sysfs_entry rdev_size = 1785 __ATTR(size, 0644, rdev_size_show, rdev_size_store); 1786 1787 static struct attribute *rdev_default_attrs[] = { 1788 &rdev_state.attr, 1789 &rdev_super.attr, 1790 &rdev_errors.attr, 1791 &rdev_slot.attr, 1792 &rdev_offset.attr, 1793 &rdev_size.attr, 1794 NULL, 1795 }; 1796 static ssize_t 1797 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 1798 { 1799 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); 1800 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj); 1801 1802 if (!entry->show) 1803 return -EIO; 1804 return entry->show(rdev, page); 1805 } 1806 1807 static ssize_t 1808 rdev_attr_store(struct kobject *kobj, struct attribute *attr, 1809 const char *page, size_t length) 1810 { 1811 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); 1812 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj); 1813 1814 if (!entry->store) 1815 return -EIO; 1816 return entry->store(rdev, page, length); 1817 } 1818 1819 static void rdev_free(struct kobject *ko) 1820 { 1821 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj); 1822 kfree(rdev); 1823 } 1824 static struct sysfs_ops rdev_sysfs_ops = { 1825 .show = rdev_attr_show, 1826 .store = rdev_attr_store, 1827 }; 1828 static struct kobj_type rdev_ktype = { 1829 .release = rdev_free, 1830 .sysfs_ops = &rdev_sysfs_ops, 1831 .default_attrs = rdev_default_attrs, 1832 }; 1833 1834 /* 1835 * Import a device. If 'super_format' >= 0, then sanity check the superblock 1836 * 1837 * mark the device faulty if: 1838 * 1839 * - the device is nonexistent (zero size) 1840 * - the device has no valid superblock 1841 * 1842 * a faulty rdev _never_ has rdev->sb set. 1843 */ 1844 static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_minor) 1845 { 1846 char b[BDEVNAME_SIZE]; 1847 int err; 1848 mdk_rdev_t *rdev; 1849 sector_t size; 1850 1851 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); 1852 if (!rdev) { 1853 printk(KERN_ERR "md: could not alloc mem for new device!\n"); 1854 return ERR_PTR(-ENOMEM); 1855 } 1856 1857 if ((err = alloc_disk_sb(rdev))) 1858 goto abort_free; 1859 1860 err = lock_rdev(rdev, newdev); 1861 if (err) 1862 goto abort_free; 1863 1864 rdev->kobj.parent = NULL; 1865 rdev->kobj.ktype = &rdev_ktype; 1866 kobject_init(&rdev->kobj); 1867 1868 rdev->desc_nr = -1; 1869 rdev->flags = 0; 1870 rdev->data_offset = 0; 1871 atomic_set(&rdev->nr_pending, 0); 1872 atomic_set(&rdev->read_errors, 0); 1873 atomic_set(&rdev->corrected_errors, 0); 1874 1875 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; 1876 if (!size) { 1877 printk(KERN_WARNING 1878 "md: %s has zero or unknown size, marking faulty!\n", 1879 bdevname(rdev->bdev,b)); 1880 err = -EINVAL; 1881 goto abort_free; 1882 } 1883 1884 if (super_format >= 0) { 1885 err = super_types[super_format]. 1886 load_super(rdev, NULL, super_minor); 1887 if (err == -EINVAL) { 1888 printk(KERN_WARNING 1889 "md: %s has invalid sb, not importing!\n", 1890 bdevname(rdev->bdev,b)); 1891 goto abort_free; 1892 } 1893 if (err < 0) { 1894 printk(KERN_WARNING 1895 "md: could not read %s's sb, not importing!\n", 1896 bdevname(rdev->bdev,b)); 1897 goto abort_free; 1898 } 1899 } 1900 INIT_LIST_HEAD(&rdev->same_set); 1901 1902 return rdev; 1903 1904 abort_free: 1905 if (rdev->sb_page) { 1906 if (rdev->bdev) 1907 unlock_rdev(rdev); 1908 free_disk_sb(rdev); 1909 } 1910 kfree(rdev); 1911 return ERR_PTR(err); 1912 } 1913 1914 /* 1915 * Check a full RAID array for plausibility 1916 */ 1917 1918 1919 static void analyze_sbs(mddev_t * mddev) 1920 { 1921 int i; 1922 struct list_head *tmp; 1923 mdk_rdev_t *rdev, *freshest; 1924 char b[BDEVNAME_SIZE]; 1925 1926 freshest = NULL; 1927 ITERATE_RDEV(mddev,rdev,tmp) 1928 switch (super_types[mddev->major_version]. 1929 load_super(rdev, freshest, mddev->minor_version)) { 1930 case 1: 1931 freshest = rdev; 1932 break; 1933 case 0: 1934 break; 1935 default: 1936 printk( KERN_ERR \ 1937 "md: fatal superblock inconsistency in %s" 1938 " -- removing from array\n", 1939 bdevname(rdev->bdev,b)); 1940 kick_rdev_from_array(rdev); 1941 } 1942 1943 1944 super_types[mddev->major_version]. 1945 validate_super(mddev, freshest); 1946 1947 i = 0; 1948 ITERATE_RDEV(mddev,rdev,tmp) { 1949 if (rdev != freshest) 1950 if (super_types[mddev->major_version]. 1951 validate_super(mddev, rdev)) { 1952 printk(KERN_WARNING "md: kicking non-fresh %s" 1953 " from array!\n", 1954 bdevname(rdev->bdev,b)); 1955 kick_rdev_from_array(rdev); 1956 continue; 1957 } 1958 if (mddev->level == LEVEL_MULTIPATH) { 1959 rdev->desc_nr = i++; 1960 rdev->raid_disk = rdev->desc_nr; 1961 set_bit(In_sync, &rdev->flags); 1962 } 1963 } 1964 1965 1966 1967 if (mddev->recovery_cp != MaxSector && 1968 mddev->level >= 1) 1969 printk(KERN_ERR "md: %s: raid array is not clean" 1970 " -- starting background reconstruction\n", 1971 mdname(mddev)); 1972 1973 } 1974 1975 static ssize_t 1976 level_show(mddev_t *mddev, char *page) 1977 { 1978 struct mdk_personality *p = mddev->pers; 1979 if (p) 1980 return sprintf(page, "%s\n", p->name); 1981 else if (mddev->clevel[0]) 1982 return sprintf(page, "%s\n", mddev->clevel); 1983 else if (mddev->level != LEVEL_NONE) 1984 return sprintf(page, "%d\n", mddev->level); 1985 else 1986 return 0; 1987 } 1988 1989 static ssize_t 1990 level_store(mddev_t *mddev, const char *buf, size_t len) 1991 { 1992 int rv = len; 1993 if (mddev->pers) 1994 return -EBUSY; 1995 if (len == 0) 1996 return 0; 1997 if (len >= sizeof(mddev->clevel)) 1998 return -ENOSPC; 1999 strncpy(mddev->clevel, buf, len); 2000 if (mddev->clevel[len-1] == '\n') 2001 len--; 2002 mddev->clevel[len] = 0; 2003 mddev->level = LEVEL_NONE; 2004 return rv; 2005 } 2006 2007 static struct md_sysfs_entry md_level = 2008 __ATTR(level, 0644, level_show, level_store); 2009 2010 static ssize_t 2011 raid_disks_show(mddev_t *mddev, char *page) 2012 { 2013 if (mddev->raid_disks == 0) 2014 return 0; 2015 return sprintf(page, "%d\n", mddev->raid_disks); 2016 } 2017 2018 static int update_raid_disks(mddev_t *mddev, int raid_disks); 2019 2020 static ssize_t 2021 raid_disks_store(mddev_t *mddev, const char *buf, size_t len) 2022 { 2023 /* can only set raid_disks if array is not yet active */ 2024 char *e; 2025 int rv = 0; 2026 unsigned long n = simple_strtoul(buf, &e, 10); 2027 2028 if (!*buf || (*e && *e != '\n')) 2029 return -EINVAL; 2030 2031 if (mddev->pers) 2032 rv = update_raid_disks(mddev, n); 2033 else 2034 mddev->raid_disks = n; 2035 return rv ? rv : len; 2036 } 2037 static struct md_sysfs_entry md_raid_disks = 2038 __ATTR(raid_disks, 0644, raid_disks_show, raid_disks_store); 2039 2040 static ssize_t 2041 chunk_size_show(mddev_t *mddev, char *page) 2042 { 2043 return sprintf(page, "%d\n", mddev->chunk_size); 2044 } 2045 2046 static ssize_t 2047 chunk_size_store(mddev_t *mddev, const char *buf, size_t len) 2048 { 2049 /* can only set chunk_size if array is not yet active */ 2050 char *e; 2051 unsigned long n = simple_strtoul(buf, &e, 10); 2052 2053 if (mddev->pers) 2054 return -EBUSY; 2055 if (!*buf || (*e && *e != '\n')) 2056 return -EINVAL; 2057 2058 mddev->chunk_size = n; 2059 return len; 2060 } 2061 static struct md_sysfs_entry md_chunk_size = 2062 __ATTR(chunk_size, 0644, chunk_size_show, chunk_size_store); 2063 2064 static ssize_t 2065 null_show(mddev_t *mddev, char *page) 2066 { 2067 return -EINVAL; 2068 } 2069 2070 static ssize_t 2071 new_dev_store(mddev_t *mddev, const char *buf, size_t len) 2072 { 2073 /* buf must be %d:%d\n? giving major and minor numbers */ 2074 /* The new device is added to the array. 2075 * If the array has a persistent superblock, we read the 2076 * superblock to initialise info and check validity. 2077 * Otherwise, only checking done is that in bind_rdev_to_array, 2078 * which mainly checks size. 2079 */ 2080 char *e; 2081 int major = simple_strtoul(buf, &e, 10); 2082 int minor; 2083 dev_t dev; 2084 mdk_rdev_t *rdev; 2085 int err; 2086 2087 if (!*buf || *e != ':' || !e[1] || e[1] == '\n') 2088 return -EINVAL; 2089 minor = simple_strtoul(e+1, &e, 10); 2090 if (*e && *e != '\n') 2091 return -EINVAL; 2092 dev = MKDEV(major, minor); 2093 if (major != MAJOR(dev) || 2094 minor != MINOR(dev)) 2095 return -EOVERFLOW; 2096 2097 2098 if (mddev->persistent) { 2099 rdev = md_import_device(dev, mddev->major_version, 2100 mddev->minor_version); 2101 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) { 2102 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next, 2103 mdk_rdev_t, same_set); 2104 err = super_types[mddev->major_version] 2105 .load_super(rdev, rdev0, mddev->minor_version); 2106 if (err < 0) 2107 goto out; 2108 } 2109 } else 2110 rdev = md_import_device(dev, -1, -1); 2111 2112 if (IS_ERR(rdev)) 2113 return PTR_ERR(rdev); 2114 err = bind_rdev_to_array(rdev, mddev); 2115 out: 2116 if (err) 2117 export_rdev(rdev); 2118 return err ? err : len; 2119 } 2120 2121 static struct md_sysfs_entry md_new_device = 2122 __ATTR(new_dev, 0200, null_show, new_dev_store); 2123 2124 static ssize_t 2125 size_show(mddev_t *mddev, char *page) 2126 { 2127 return sprintf(page, "%llu\n", (unsigned long long)mddev->size); 2128 } 2129 2130 static int update_size(mddev_t *mddev, unsigned long size); 2131 2132 static ssize_t 2133 size_store(mddev_t *mddev, const char *buf, size_t len) 2134 { 2135 /* If array is inactive, we can reduce the component size, but 2136 * not increase it (except from 0). 2137 * If array is active, we can try an on-line resize 2138 */ 2139 char *e; 2140 int err = 0; 2141 unsigned long long size = simple_strtoull(buf, &e, 10); 2142 if (!*buf || *buf == '\n' || 2143 (*e && *e != '\n')) 2144 return -EINVAL; 2145 2146 if (mddev->pers) { 2147 err = update_size(mddev, size); 2148 md_update_sb(mddev); 2149 } else { 2150 if (mddev->size == 0 || 2151 mddev->size > size) 2152 mddev->size = size; 2153 else 2154 err = -ENOSPC; 2155 } 2156 return err ? err : len; 2157 } 2158 2159 static struct md_sysfs_entry md_size = 2160 __ATTR(component_size, 0644, size_show, size_store); 2161 2162 2163 /* Metdata version. 2164 * This is either 'none' for arrays with externally managed metadata, 2165 * or N.M for internally known formats 2166 */ 2167 static ssize_t 2168 metadata_show(mddev_t *mddev, char *page) 2169 { 2170 if (mddev->persistent) 2171 return sprintf(page, "%d.%d\n", 2172 mddev->major_version, mddev->minor_version); 2173 else 2174 return sprintf(page, "none\n"); 2175 } 2176 2177 static ssize_t 2178 metadata_store(mddev_t *mddev, const char *buf, size_t len) 2179 { 2180 int major, minor; 2181 char *e; 2182 if (!list_empty(&mddev->disks)) 2183 return -EBUSY; 2184 2185 if (cmd_match(buf, "none")) { 2186 mddev->persistent = 0; 2187 mddev->major_version = 0; 2188 mddev->minor_version = 90; 2189 return len; 2190 } 2191 major = simple_strtoul(buf, &e, 10); 2192 if (e==buf || *e != '.') 2193 return -EINVAL; 2194 buf = e+1; 2195 minor = simple_strtoul(buf, &e, 10); 2196 if (e==buf || *e != '\n') 2197 return -EINVAL; 2198 if (major >= sizeof(super_types)/sizeof(super_types[0]) || 2199 super_types[major].name == NULL) 2200 return -ENOENT; 2201 mddev->major_version = major; 2202 mddev->minor_version = minor; 2203 mddev->persistent = 1; 2204 return len; 2205 } 2206 2207 static struct md_sysfs_entry md_metadata = 2208 __ATTR(metadata_version, 0644, metadata_show, metadata_store); 2209 2210 static ssize_t 2211 action_show(mddev_t *mddev, char *page) 2212 { 2213 char *type = "idle"; 2214 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 2215 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) { 2216 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 2217 type = "reshape"; 2218 else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 2219 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 2220 type = "resync"; 2221 else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) 2222 type = "check"; 2223 else 2224 type = "repair"; 2225 } else 2226 type = "recover"; 2227 } 2228 return sprintf(page, "%s\n", type); 2229 } 2230 2231 static ssize_t 2232 action_store(mddev_t *mddev, const char *page, size_t len) 2233 { 2234 if (!mddev->pers || !mddev->pers->sync_request) 2235 return -EINVAL; 2236 2237 if (cmd_match(page, "idle")) { 2238 if (mddev->sync_thread) { 2239 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 2240 md_unregister_thread(mddev->sync_thread); 2241 mddev->sync_thread = NULL; 2242 mddev->recovery = 0; 2243 } 2244 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 2245 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) 2246 return -EBUSY; 2247 else if (cmd_match(page, "resync") || cmd_match(page, "recover")) 2248 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 2249 else if (cmd_match(page, "reshape")) { 2250 int err; 2251 if (mddev->pers->start_reshape == NULL) 2252 return -EINVAL; 2253 err = mddev->pers->start_reshape(mddev); 2254 if (err) 2255 return err; 2256 } else { 2257 if (cmd_match(page, "check")) 2258 set_bit(MD_RECOVERY_CHECK, &mddev->recovery); 2259 else if (cmd_match(page, "repair")) 2260 return -EINVAL; 2261 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 2262 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 2263 } 2264 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 2265 md_wakeup_thread(mddev->thread); 2266 return len; 2267 } 2268 2269 static ssize_t 2270 mismatch_cnt_show(mddev_t *mddev, char *page) 2271 { 2272 return sprintf(page, "%llu\n", 2273 (unsigned long long) mddev->resync_mismatches); 2274 } 2275 2276 static struct md_sysfs_entry 2277 md_scan_mode = __ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store); 2278 2279 2280 static struct md_sysfs_entry 2281 md_mismatches = __ATTR_RO(mismatch_cnt); 2282 2283 static ssize_t 2284 sync_min_show(mddev_t *mddev, char *page) 2285 { 2286 return sprintf(page, "%d (%s)\n", speed_min(mddev), 2287 mddev->sync_speed_min ? "local": "system"); 2288 } 2289 2290 static ssize_t 2291 sync_min_store(mddev_t *mddev, const char *buf, size_t len) 2292 { 2293 int min; 2294 char *e; 2295 if (strncmp(buf, "system", 6)==0) { 2296 mddev->sync_speed_min = 0; 2297 return len; 2298 } 2299 min = simple_strtoul(buf, &e, 10); 2300 if (buf == e || (*e && *e != '\n') || min <= 0) 2301 return -EINVAL; 2302 mddev->sync_speed_min = min; 2303 return len; 2304 } 2305 2306 static struct md_sysfs_entry md_sync_min = 2307 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store); 2308 2309 static ssize_t 2310 sync_max_show(mddev_t *mddev, char *page) 2311 { 2312 return sprintf(page, "%d (%s)\n", speed_max(mddev), 2313 mddev->sync_speed_max ? "local": "system"); 2314 } 2315 2316 static ssize_t 2317 sync_max_store(mddev_t *mddev, const char *buf, size_t len) 2318 { 2319 int max; 2320 char *e; 2321 if (strncmp(buf, "system", 6)==0) { 2322 mddev->sync_speed_max = 0; 2323 return len; 2324 } 2325 max = simple_strtoul(buf, &e, 10); 2326 if (buf == e || (*e && *e != '\n') || max <= 0) 2327 return -EINVAL; 2328 mddev->sync_speed_max = max; 2329 return len; 2330 } 2331 2332 static struct md_sysfs_entry md_sync_max = 2333 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store); 2334 2335 2336 static ssize_t 2337 sync_speed_show(mddev_t *mddev, char *page) 2338 { 2339 unsigned long resync, dt, db; 2340 resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active)); 2341 dt = ((jiffies - mddev->resync_mark) / HZ); 2342 if (!dt) dt++; 2343 db = resync - (mddev->resync_mark_cnt); 2344 return sprintf(page, "%ld\n", db/dt/2); /* K/sec */ 2345 } 2346 2347 static struct md_sysfs_entry 2348 md_sync_speed = __ATTR_RO(sync_speed); 2349 2350 static ssize_t 2351 sync_completed_show(mddev_t *mddev, char *page) 2352 { 2353 unsigned long max_blocks, resync; 2354 2355 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 2356 max_blocks = mddev->resync_max_sectors; 2357 else 2358 max_blocks = mddev->size << 1; 2359 2360 resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active)); 2361 return sprintf(page, "%lu / %lu\n", resync, max_blocks); 2362 } 2363 2364 static struct md_sysfs_entry 2365 md_sync_completed = __ATTR_RO(sync_completed); 2366 2367 static ssize_t 2368 suspend_lo_show(mddev_t *mddev, char *page) 2369 { 2370 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo); 2371 } 2372 2373 static ssize_t 2374 suspend_lo_store(mddev_t *mddev, const char *buf, size_t len) 2375 { 2376 char *e; 2377 unsigned long long new = simple_strtoull(buf, &e, 10); 2378 2379 if (mddev->pers->quiesce == NULL) 2380 return -EINVAL; 2381 if (buf == e || (*e && *e != '\n')) 2382 return -EINVAL; 2383 if (new >= mddev->suspend_hi || 2384 (new > mddev->suspend_lo && new < mddev->suspend_hi)) { 2385 mddev->suspend_lo = new; 2386 mddev->pers->quiesce(mddev, 2); 2387 return len; 2388 } else 2389 return -EINVAL; 2390 } 2391 static struct md_sysfs_entry md_suspend_lo = 2392 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store); 2393 2394 2395 static ssize_t 2396 suspend_hi_show(mddev_t *mddev, char *page) 2397 { 2398 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi); 2399 } 2400 2401 static ssize_t 2402 suspend_hi_store(mddev_t *mddev, const char *buf, size_t len) 2403 { 2404 char *e; 2405 unsigned long long new = simple_strtoull(buf, &e, 10); 2406 2407 if (mddev->pers->quiesce == NULL) 2408 return -EINVAL; 2409 if (buf == e || (*e && *e != '\n')) 2410 return -EINVAL; 2411 if ((new <= mddev->suspend_lo && mddev->suspend_lo >= mddev->suspend_hi) || 2412 (new > mddev->suspend_lo && new > mddev->suspend_hi)) { 2413 mddev->suspend_hi = new; 2414 mddev->pers->quiesce(mddev, 1); 2415 mddev->pers->quiesce(mddev, 0); 2416 return len; 2417 } else 2418 return -EINVAL; 2419 } 2420 static struct md_sysfs_entry md_suspend_hi = 2421 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store); 2422 2423 2424 static struct attribute *md_default_attrs[] = { 2425 &md_level.attr, 2426 &md_raid_disks.attr, 2427 &md_chunk_size.attr, 2428 &md_size.attr, 2429 &md_metadata.attr, 2430 &md_new_device.attr, 2431 NULL, 2432 }; 2433 2434 static struct attribute *md_redundancy_attrs[] = { 2435 &md_scan_mode.attr, 2436 &md_mismatches.attr, 2437 &md_sync_min.attr, 2438 &md_sync_max.attr, 2439 &md_sync_speed.attr, 2440 &md_sync_completed.attr, 2441 &md_suspend_lo.attr, 2442 &md_suspend_hi.attr, 2443 NULL, 2444 }; 2445 static struct attribute_group md_redundancy_group = { 2446 .name = NULL, 2447 .attrs = md_redundancy_attrs, 2448 }; 2449 2450 2451 static ssize_t 2452 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 2453 { 2454 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); 2455 mddev_t *mddev = container_of(kobj, struct mddev_s, kobj); 2456 ssize_t rv; 2457 2458 if (!entry->show) 2459 return -EIO; 2460 mddev_lock(mddev); 2461 rv = entry->show(mddev, page); 2462 mddev_unlock(mddev); 2463 return rv; 2464 } 2465 2466 static ssize_t 2467 md_attr_store(struct kobject *kobj, struct attribute *attr, 2468 const char *page, size_t length) 2469 { 2470 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); 2471 mddev_t *mddev = container_of(kobj, struct mddev_s, kobj); 2472 ssize_t rv; 2473 2474 if (!entry->store) 2475 return -EIO; 2476 mddev_lock(mddev); 2477 rv = entry->store(mddev, page, length); 2478 mddev_unlock(mddev); 2479 return rv; 2480 } 2481 2482 static void md_free(struct kobject *ko) 2483 { 2484 mddev_t *mddev = container_of(ko, mddev_t, kobj); 2485 kfree(mddev); 2486 } 2487 2488 static struct sysfs_ops md_sysfs_ops = { 2489 .show = md_attr_show, 2490 .store = md_attr_store, 2491 }; 2492 static struct kobj_type md_ktype = { 2493 .release = md_free, 2494 .sysfs_ops = &md_sysfs_ops, 2495 .default_attrs = md_default_attrs, 2496 }; 2497 2498 int mdp_major = 0; 2499 2500 static struct kobject *md_probe(dev_t dev, int *part, void *data) 2501 { 2502 static DEFINE_MUTEX(disks_mutex); 2503 mddev_t *mddev = mddev_find(dev); 2504 struct gendisk *disk; 2505 int partitioned = (MAJOR(dev) != MD_MAJOR); 2506 int shift = partitioned ? MdpMinorShift : 0; 2507 int unit = MINOR(dev) >> shift; 2508 2509 if (!mddev) 2510 return NULL; 2511 2512 mutex_lock(&disks_mutex); 2513 if (mddev->gendisk) { 2514 mutex_unlock(&disks_mutex); 2515 mddev_put(mddev); 2516 return NULL; 2517 } 2518 disk = alloc_disk(1 << shift); 2519 if (!disk) { 2520 mutex_unlock(&disks_mutex); 2521 mddev_put(mddev); 2522 return NULL; 2523 } 2524 disk->major = MAJOR(dev); 2525 disk->first_minor = unit << shift; 2526 if (partitioned) { 2527 sprintf(disk->disk_name, "md_d%d", unit); 2528 sprintf(disk->devfs_name, "md/d%d", unit); 2529 } else { 2530 sprintf(disk->disk_name, "md%d", unit); 2531 sprintf(disk->devfs_name, "md/%d", unit); 2532 } 2533 disk->fops = &md_fops; 2534 disk->private_data = mddev; 2535 disk->queue = mddev->queue; 2536 add_disk(disk); 2537 mddev->gendisk = disk; 2538 mutex_unlock(&disks_mutex); 2539 mddev->kobj.parent = &disk->kobj; 2540 mddev->kobj.k_name = NULL; 2541 snprintf(mddev->kobj.name, KOBJ_NAME_LEN, "%s", "md"); 2542 mddev->kobj.ktype = &md_ktype; 2543 kobject_register(&mddev->kobj); 2544 return NULL; 2545 } 2546 2547 void md_wakeup_thread(mdk_thread_t *thread); 2548 2549 static void md_safemode_timeout(unsigned long data) 2550 { 2551 mddev_t *mddev = (mddev_t *) data; 2552 2553 mddev->safemode = 1; 2554 md_wakeup_thread(mddev->thread); 2555 } 2556 2557 static int start_dirty_degraded; 2558 2559 static int do_md_run(mddev_t * mddev) 2560 { 2561 int err; 2562 int chunk_size; 2563 struct list_head *tmp; 2564 mdk_rdev_t *rdev; 2565 struct gendisk *disk; 2566 struct mdk_personality *pers; 2567 char b[BDEVNAME_SIZE]; 2568 2569 if (list_empty(&mddev->disks)) 2570 /* cannot run an array with no devices.. */ 2571 return -EINVAL; 2572 2573 if (mddev->pers) 2574 return -EBUSY; 2575 2576 /* 2577 * Analyze all RAID superblock(s) 2578 */ 2579 if (!mddev->raid_disks) 2580 analyze_sbs(mddev); 2581 2582 chunk_size = mddev->chunk_size; 2583 2584 if (chunk_size) { 2585 if (chunk_size > MAX_CHUNK_SIZE) { 2586 printk(KERN_ERR "too big chunk_size: %d > %d\n", 2587 chunk_size, MAX_CHUNK_SIZE); 2588 return -EINVAL; 2589 } 2590 /* 2591 * chunk-size has to be a power of 2 and multiples of PAGE_SIZE 2592 */ 2593 if ( (1 << ffz(~chunk_size)) != chunk_size) { 2594 printk(KERN_ERR "chunk_size of %d not valid\n", chunk_size); 2595 return -EINVAL; 2596 } 2597 if (chunk_size < PAGE_SIZE) { 2598 printk(KERN_ERR "too small chunk_size: %d < %ld\n", 2599 chunk_size, PAGE_SIZE); 2600 return -EINVAL; 2601 } 2602 2603 /* devices must have minimum size of one chunk */ 2604 ITERATE_RDEV(mddev,rdev,tmp) { 2605 if (test_bit(Faulty, &rdev->flags)) 2606 continue; 2607 if (rdev->size < chunk_size / 1024) { 2608 printk(KERN_WARNING 2609 "md: Dev %s smaller than chunk_size:" 2610 " %lluk < %dk\n", 2611 bdevname(rdev->bdev,b), 2612 (unsigned long long)rdev->size, 2613 chunk_size / 1024); 2614 return -EINVAL; 2615 } 2616 } 2617 } 2618 2619 #ifdef CONFIG_KMOD 2620 if (mddev->level != LEVEL_NONE) 2621 request_module("md-level-%d", mddev->level); 2622 else if (mddev->clevel[0]) 2623 request_module("md-%s", mddev->clevel); 2624 #endif 2625 2626 /* 2627 * Drop all container device buffers, from now on 2628 * the only valid external interface is through the md 2629 * device. 2630 * Also find largest hardsector size 2631 */ 2632 ITERATE_RDEV(mddev,rdev,tmp) { 2633 if (test_bit(Faulty, &rdev->flags)) 2634 continue; 2635 sync_blockdev(rdev->bdev); 2636 invalidate_bdev(rdev->bdev, 0); 2637 } 2638 2639 md_probe(mddev->unit, NULL, NULL); 2640 disk = mddev->gendisk; 2641 if (!disk) 2642 return -ENOMEM; 2643 2644 spin_lock(&pers_lock); 2645 pers = find_pers(mddev->level, mddev->clevel); 2646 if (!pers || !try_module_get(pers->owner)) { 2647 spin_unlock(&pers_lock); 2648 if (mddev->level != LEVEL_NONE) 2649 printk(KERN_WARNING "md: personality for level %d is not loaded!\n", 2650 mddev->level); 2651 else 2652 printk(KERN_WARNING "md: personality for level %s is not loaded!\n", 2653 mddev->clevel); 2654 return -EINVAL; 2655 } 2656 mddev->pers = pers; 2657 spin_unlock(&pers_lock); 2658 mddev->level = pers->level; 2659 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); 2660 2661 if (mddev->reshape_position != MaxSector && 2662 pers->start_reshape == NULL) { 2663 /* This personality cannot handle reshaping... */ 2664 mddev->pers = NULL; 2665 module_put(pers->owner); 2666 return -EINVAL; 2667 } 2668 2669 mddev->recovery = 0; 2670 mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */ 2671 mddev->barriers_work = 1; 2672 mddev->ok_start_degraded = start_dirty_degraded; 2673 2674 if (start_readonly) 2675 mddev->ro = 2; /* read-only, but switch on first write */ 2676 2677 err = mddev->pers->run(mddev); 2678 if (!err && mddev->pers->sync_request) { 2679 err = bitmap_create(mddev); 2680 if (err) { 2681 printk(KERN_ERR "%s: failed to create bitmap (%d)\n", 2682 mdname(mddev), err); 2683 mddev->pers->stop(mddev); 2684 } 2685 } 2686 if (err) { 2687 printk(KERN_ERR "md: pers->run() failed ...\n"); 2688 module_put(mddev->pers->owner); 2689 mddev->pers = NULL; 2690 bitmap_destroy(mddev); 2691 return err; 2692 } 2693 if (mddev->pers->sync_request) 2694 sysfs_create_group(&mddev->kobj, &md_redundancy_group); 2695 else if (mddev->ro == 2) /* auto-readonly not meaningful */ 2696 mddev->ro = 0; 2697 2698 atomic_set(&mddev->writes_pending,0); 2699 mddev->safemode = 0; 2700 mddev->safemode_timer.function = md_safemode_timeout; 2701 mddev->safemode_timer.data = (unsigned long) mddev; 2702 mddev->safemode_delay = (20 * HZ)/1000 +1; /* 20 msec delay */ 2703 mddev->in_sync = 1; 2704 2705 ITERATE_RDEV(mddev,rdev,tmp) 2706 if (rdev->raid_disk >= 0) { 2707 char nm[20]; 2708 sprintf(nm, "rd%d", rdev->raid_disk); 2709 sysfs_create_link(&mddev->kobj, &rdev->kobj, nm); 2710 } 2711 2712 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 2713 md_wakeup_thread(mddev->thread); 2714 2715 if (mddev->sb_dirty) 2716 md_update_sb(mddev); 2717 2718 set_capacity(disk, mddev->array_size<<1); 2719 2720 /* If we call blk_queue_make_request here, it will 2721 * re-initialise max_sectors etc which may have been 2722 * refined inside -> run. So just set the bits we need to set. 2723 * Most initialisation happended when we called 2724 * blk_queue_make_request(..., md_fail_request) 2725 * earlier. 2726 */ 2727 mddev->queue->queuedata = mddev; 2728 mddev->queue->make_request_fn = mddev->pers->make_request; 2729 2730 mddev->changed = 1; 2731 md_new_event(mddev); 2732 return 0; 2733 } 2734 2735 static int restart_array(mddev_t *mddev) 2736 { 2737 struct gendisk *disk = mddev->gendisk; 2738 int err; 2739 2740 /* 2741 * Complain if it has no devices 2742 */ 2743 err = -ENXIO; 2744 if (list_empty(&mddev->disks)) 2745 goto out; 2746 2747 if (mddev->pers) { 2748 err = -EBUSY; 2749 if (!mddev->ro) 2750 goto out; 2751 2752 mddev->safemode = 0; 2753 mddev->ro = 0; 2754 set_disk_ro(disk, 0); 2755 2756 printk(KERN_INFO "md: %s switched to read-write mode.\n", 2757 mdname(mddev)); 2758 /* 2759 * Kick recovery or resync if necessary 2760 */ 2761 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 2762 md_wakeup_thread(mddev->thread); 2763 err = 0; 2764 } else { 2765 printk(KERN_ERR "md: %s has no personality assigned.\n", 2766 mdname(mddev)); 2767 err = -EINVAL; 2768 } 2769 2770 out: 2771 return err; 2772 } 2773 2774 static int do_md_stop(mddev_t * mddev, int ro) 2775 { 2776 int err = 0; 2777 struct gendisk *disk = mddev->gendisk; 2778 2779 if (mddev->pers) { 2780 if (atomic_read(&mddev->active)>2) { 2781 printk("md: %s still in use.\n",mdname(mddev)); 2782 return -EBUSY; 2783 } 2784 2785 if (mddev->sync_thread) { 2786 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 2787 md_unregister_thread(mddev->sync_thread); 2788 mddev->sync_thread = NULL; 2789 } 2790 2791 del_timer_sync(&mddev->safemode_timer); 2792 2793 invalidate_partition(disk, 0); 2794 2795 if (ro) { 2796 err = -ENXIO; 2797 if (mddev->ro==1) 2798 goto out; 2799 mddev->ro = 1; 2800 } else { 2801 bitmap_flush(mddev); 2802 md_super_wait(mddev); 2803 if (mddev->ro) 2804 set_disk_ro(disk, 0); 2805 blk_queue_make_request(mddev->queue, md_fail_request); 2806 mddev->pers->stop(mddev); 2807 if (mddev->pers->sync_request) 2808 sysfs_remove_group(&mddev->kobj, &md_redundancy_group); 2809 2810 module_put(mddev->pers->owner); 2811 mddev->pers = NULL; 2812 if (mddev->ro) 2813 mddev->ro = 0; 2814 } 2815 if (!mddev->in_sync) { 2816 /* mark array as shutdown cleanly */ 2817 mddev->in_sync = 1; 2818 md_update_sb(mddev); 2819 } 2820 if (ro) 2821 set_disk_ro(disk, 1); 2822 } 2823 2824 /* 2825 * Free resources if final stop 2826 */ 2827 if (!ro) { 2828 mdk_rdev_t *rdev; 2829 struct list_head *tmp; 2830 struct gendisk *disk; 2831 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev)); 2832 2833 bitmap_destroy(mddev); 2834 if (mddev->bitmap_file) { 2835 atomic_set(&mddev->bitmap_file->f_dentry->d_inode->i_writecount, 1); 2836 fput(mddev->bitmap_file); 2837 mddev->bitmap_file = NULL; 2838 } 2839 mddev->bitmap_offset = 0; 2840 2841 ITERATE_RDEV(mddev,rdev,tmp) 2842 if (rdev->raid_disk >= 0) { 2843 char nm[20]; 2844 sprintf(nm, "rd%d", rdev->raid_disk); 2845 sysfs_remove_link(&mddev->kobj, nm); 2846 } 2847 2848 export_array(mddev); 2849 2850 mddev->array_size = 0; 2851 disk = mddev->gendisk; 2852 if (disk) 2853 set_capacity(disk, 0); 2854 mddev->changed = 1; 2855 } else 2856 printk(KERN_INFO "md: %s switched to read-only mode.\n", 2857 mdname(mddev)); 2858 err = 0; 2859 md_new_event(mddev); 2860 out: 2861 return err; 2862 } 2863 2864 static void autorun_array(mddev_t *mddev) 2865 { 2866 mdk_rdev_t *rdev; 2867 struct list_head *tmp; 2868 int err; 2869 2870 if (list_empty(&mddev->disks)) 2871 return; 2872 2873 printk(KERN_INFO "md: running: "); 2874 2875 ITERATE_RDEV(mddev,rdev,tmp) { 2876 char b[BDEVNAME_SIZE]; 2877 printk("<%s>", bdevname(rdev->bdev,b)); 2878 } 2879 printk("\n"); 2880 2881 err = do_md_run (mddev); 2882 if (err) { 2883 printk(KERN_WARNING "md: do_md_run() returned %d\n", err); 2884 do_md_stop (mddev, 0); 2885 } 2886 } 2887 2888 /* 2889 * lets try to run arrays based on all disks that have arrived 2890 * until now. (those are in pending_raid_disks) 2891 * 2892 * the method: pick the first pending disk, collect all disks with 2893 * the same UUID, remove all from the pending list and put them into 2894 * the 'same_array' list. Then order this list based on superblock 2895 * update time (freshest comes first), kick out 'old' disks and 2896 * compare superblocks. If everything's fine then run it. 2897 * 2898 * If "unit" is allocated, then bump its reference count 2899 */ 2900 static void autorun_devices(int part) 2901 { 2902 struct list_head *tmp; 2903 mdk_rdev_t *rdev0, *rdev; 2904 mddev_t *mddev; 2905 char b[BDEVNAME_SIZE]; 2906 2907 printk(KERN_INFO "md: autorun ...\n"); 2908 while (!list_empty(&pending_raid_disks)) { 2909 dev_t dev; 2910 LIST_HEAD(candidates); 2911 rdev0 = list_entry(pending_raid_disks.next, 2912 mdk_rdev_t, same_set); 2913 2914 printk(KERN_INFO "md: considering %s ...\n", 2915 bdevname(rdev0->bdev,b)); 2916 INIT_LIST_HEAD(&candidates); 2917 ITERATE_RDEV_PENDING(rdev,tmp) 2918 if (super_90_load(rdev, rdev0, 0) >= 0) { 2919 printk(KERN_INFO "md: adding %s ...\n", 2920 bdevname(rdev->bdev,b)); 2921 list_move(&rdev->same_set, &candidates); 2922 } 2923 /* 2924 * now we have a set of devices, with all of them having 2925 * mostly sane superblocks. It's time to allocate the 2926 * mddev. 2927 */ 2928 if (rdev0->preferred_minor < 0 || rdev0->preferred_minor >= MAX_MD_DEVS) { 2929 printk(KERN_INFO "md: unit number in %s is bad: %d\n", 2930 bdevname(rdev0->bdev, b), rdev0->preferred_minor); 2931 break; 2932 } 2933 if (part) 2934 dev = MKDEV(mdp_major, 2935 rdev0->preferred_minor << MdpMinorShift); 2936 else 2937 dev = MKDEV(MD_MAJOR, rdev0->preferred_minor); 2938 2939 md_probe(dev, NULL, NULL); 2940 mddev = mddev_find(dev); 2941 if (!mddev) { 2942 printk(KERN_ERR 2943 "md: cannot allocate memory for md drive.\n"); 2944 break; 2945 } 2946 if (mddev_lock(mddev)) 2947 printk(KERN_WARNING "md: %s locked, cannot run\n", 2948 mdname(mddev)); 2949 else if (mddev->raid_disks || mddev->major_version 2950 || !list_empty(&mddev->disks)) { 2951 printk(KERN_WARNING 2952 "md: %s already running, cannot run %s\n", 2953 mdname(mddev), bdevname(rdev0->bdev,b)); 2954 mddev_unlock(mddev); 2955 } else { 2956 printk(KERN_INFO "md: created %s\n", mdname(mddev)); 2957 ITERATE_RDEV_GENERIC(candidates,rdev,tmp) { 2958 list_del_init(&rdev->same_set); 2959 if (bind_rdev_to_array(rdev, mddev)) 2960 export_rdev(rdev); 2961 } 2962 autorun_array(mddev); 2963 mddev_unlock(mddev); 2964 } 2965 /* on success, candidates will be empty, on error 2966 * it won't... 2967 */ 2968 ITERATE_RDEV_GENERIC(candidates,rdev,tmp) 2969 export_rdev(rdev); 2970 mddev_put(mddev); 2971 } 2972 printk(KERN_INFO "md: ... autorun DONE.\n"); 2973 } 2974 2975 /* 2976 * import RAID devices based on one partition 2977 * if possible, the array gets run as well. 2978 */ 2979 2980 static int autostart_array(dev_t startdev) 2981 { 2982 char b[BDEVNAME_SIZE]; 2983 int err = -EINVAL, i; 2984 mdp_super_t *sb = NULL; 2985 mdk_rdev_t *start_rdev = NULL, *rdev; 2986 2987 start_rdev = md_import_device(startdev, 0, 0); 2988 if (IS_ERR(start_rdev)) 2989 return err; 2990 2991 2992 /* NOTE: this can only work for 0.90.0 superblocks */ 2993 sb = (mdp_super_t*)page_address(start_rdev->sb_page); 2994 if (sb->major_version != 0 || 2995 sb->minor_version != 90 ) { 2996 printk(KERN_WARNING "md: can only autostart 0.90.0 arrays\n"); 2997 export_rdev(start_rdev); 2998 return err; 2999 } 3000 3001 if (test_bit(Faulty, &start_rdev->flags)) { 3002 printk(KERN_WARNING 3003 "md: can not autostart based on faulty %s!\n", 3004 bdevname(start_rdev->bdev,b)); 3005 export_rdev(start_rdev); 3006 return err; 3007 } 3008 list_add(&start_rdev->same_set, &pending_raid_disks); 3009 3010 for (i = 0; i < MD_SB_DISKS; i++) { 3011 mdp_disk_t *desc = sb->disks + i; 3012 dev_t dev = MKDEV(desc->major, desc->minor); 3013 3014 if (!dev) 3015 continue; 3016 if (dev == startdev) 3017 continue; 3018 if (MAJOR(dev) != desc->major || MINOR(dev) != desc->minor) 3019 continue; 3020 rdev = md_import_device(dev, 0, 0); 3021 if (IS_ERR(rdev)) 3022 continue; 3023 3024 list_add(&rdev->same_set, &pending_raid_disks); 3025 } 3026 3027 /* 3028 * possibly return codes 3029 */ 3030 autorun_devices(0); 3031 return 0; 3032 3033 } 3034 3035 3036 static int get_version(void __user * arg) 3037 { 3038 mdu_version_t ver; 3039 3040 ver.major = MD_MAJOR_VERSION; 3041 ver.minor = MD_MINOR_VERSION; 3042 ver.patchlevel = MD_PATCHLEVEL_VERSION; 3043 3044 if (copy_to_user(arg, &ver, sizeof(ver))) 3045 return -EFAULT; 3046 3047 return 0; 3048 } 3049 3050 static int get_array_info(mddev_t * mddev, void __user * arg) 3051 { 3052 mdu_array_info_t info; 3053 int nr,working,active,failed,spare; 3054 mdk_rdev_t *rdev; 3055 struct list_head *tmp; 3056 3057 nr=working=active=failed=spare=0; 3058 ITERATE_RDEV(mddev,rdev,tmp) { 3059 nr++; 3060 if (test_bit(Faulty, &rdev->flags)) 3061 failed++; 3062 else { 3063 working++; 3064 if (test_bit(In_sync, &rdev->flags)) 3065 active++; 3066 else 3067 spare++; 3068 } 3069 } 3070 3071 info.major_version = mddev->major_version; 3072 info.minor_version = mddev->minor_version; 3073 info.patch_version = MD_PATCHLEVEL_VERSION; 3074 info.ctime = mddev->ctime; 3075 info.level = mddev->level; 3076 info.size = mddev->size; 3077 if (info.size != mddev->size) /* overflow */ 3078 info.size = -1; 3079 info.nr_disks = nr; 3080 info.raid_disks = mddev->raid_disks; 3081 info.md_minor = mddev->md_minor; 3082 info.not_persistent= !mddev->persistent; 3083 3084 info.utime = mddev->utime; 3085 info.state = 0; 3086 if (mddev->in_sync) 3087 info.state = (1<<MD_SB_CLEAN); 3088 if (mddev->bitmap && mddev->bitmap_offset) 3089 info.state = (1<<MD_SB_BITMAP_PRESENT); 3090 info.active_disks = active; 3091 info.working_disks = working; 3092 info.failed_disks = failed; 3093 info.spare_disks = spare; 3094 3095 info.layout = mddev->layout; 3096 info.chunk_size = mddev->chunk_size; 3097 3098 if (copy_to_user(arg, &info, sizeof(info))) 3099 return -EFAULT; 3100 3101 return 0; 3102 } 3103 3104 static int get_bitmap_file(mddev_t * mddev, void __user * arg) 3105 { 3106 mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */ 3107 char *ptr, *buf = NULL; 3108 int err = -ENOMEM; 3109 3110 file = kmalloc(sizeof(*file), GFP_KERNEL); 3111 if (!file) 3112 goto out; 3113 3114 /* bitmap disabled, zero the first byte and copy out */ 3115 if (!mddev->bitmap || !mddev->bitmap->file) { 3116 file->pathname[0] = '\0'; 3117 goto copy_out; 3118 } 3119 3120 buf = kmalloc(sizeof(file->pathname), GFP_KERNEL); 3121 if (!buf) 3122 goto out; 3123 3124 ptr = file_path(mddev->bitmap->file, buf, sizeof(file->pathname)); 3125 if (!ptr) 3126 goto out; 3127 3128 strcpy(file->pathname, ptr); 3129 3130 copy_out: 3131 err = 0; 3132 if (copy_to_user(arg, file, sizeof(*file))) 3133 err = -EFAULT; 3134 out: 3135 kfree(buf); 3136 kfree(file); 3137 return err; 3138 } 3139 3140 static int get_disk_info(mddev_t * mddev, void __user * arg) 3141 { 3142 mdu_disk_info_t info; 3143 unsigned int nr; 3144 mdk_rdev_t *rdev; 3145 3146 if (copy_from_user(&info, arg, sizeof(info))) 3147 return -EFAULT; 3148 3149 nr = info.number; 3150 3151 rdev = find_rdev_nr(mddev, nr); 3152 if (rdev) { 3153 info.major = MAJOR(rdev->bdev->bd_dev); 3154 info.minor = MINOR(rdev->bdev->bd_dev); 3155 info.raid_disk = rdev->raid_disk; 3156 info.state = 0; 3157 if (test_bit(Faulty, &rdev->flags)) 3158 info.state |= (1<<MD_DISK_FAULTY); 3159 else if (test_bit(In_sync, &rdev->flags)) { 3160 info.state |= (1<<MD_DISK_ACTIVE); 3161 info.state |= (1<<MD_DISK_SYNC); 3162 } 3163 if (test_bit(WriteMostly, &rdev->flags)) 3164 info.state |= (1<<MD_DISK_WRITEMOSTLY); 3165 } else { 3166 info.major = info.minor = 0; 3167 info.raid_disk = -1; 3168 info.state = (1<<MD_DISK_REMOVED); 3169 } 3170 3171 if (copy_to_user(arg, &info, sizeof(info))) 3172 return -EFAULT; 3173 3174 return 0; 3175 } 3176 3177 static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info) 3178 { 3179 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 3180 mdk_rdev_t *rdev; 3181 dev_t dev = MKDEV(info->major,info->minor); 3182 3183 if (info->major != MAJOR(dev) || info->minor != MINOR(dev)) 3184 return -EOVERFLOW; 3185 3186 if (!mddev->raid_disks) { 3187 int err; 3188 /* expecting a device which has a superblock */ 3189 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version); 3190 if (IS_ERR(rdev)) { 3191 printk(KERN_WARNING 3192 "md: md_import_device returned %ld\n", 3193 PTR_ERR(rdev)); 3194 return PTR_ERR(rdev); 3195 } 3196 if (!list_empty(&mddev->disks)) { 3197 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next, 3198 mdk_rdev_t, same_set); 3199 int err = super_types[mddev->major_version] 3200 .load_super(rdev, rdev0, mddev->minor_version); 3201 if (err < 0) { 3202 printk(KERN_WARNING 3203 "md: %s has different UUID to %s\n", 3204 bdevname(rdev->bdev,b), 3205 bdevname(rdev0->bdev,b2)); 3206 export_rdev(rdev); 3207 return -EINVAL; 3208 } 3209 } 3210 err = bind_rdev_to_array(rdev, mddev); 3211 if (err) 3212 export_rdev(rdev); 3213 return err; 3214 } 3215 3216 /* 3217 * add_new_disk can be used once the array is assembled 3218 * to add "hot spares". They must already have a superblock 3219 * written 3220 */ 3221 if (mddev->pers) { 3222 int err; 3223 if (!mddev->pers->hot_add_disk) { 3224 printk(KERN_WARNING 3225 "%s: personality does not support diskops!\n", 3226 mdname(mddev)); 3227 return -EINVAL; 3228 } 3229 if (mddev->persistent) 3230 rdev = md_import_device(dev, mddev->major_version, 3231 mddev->minor_version); 3232 else 3233 rdev = md_import_device(dev, -1, -1); 3234 if (IS_ERR(rdev)) { 3235 printk(KERN_WARNING 3236 "md: md_import_device returned %ld\n", 3237 PTR_ERR(rdev)); 3238 return PTR_ERR(rdev); 3239 } 3240 /* set save_raid_disk if appropriate */ 3241 if (!mddev->persistent) { 3242 if (info->state & (1<<MD_DISK_SYNC) && 3243 info->raid_disk < mddev->raid_disks) 3244 rdev->raid_disk = info->raid_disk; 3245 else 3246 rdev->raid_disk = -1; 3247 } else 3248 super_types[mddev->major_version]. 3249 validate_super(mddev, rdev); 3250 rdev->saved_raid_disk = rdev->raid_disk; 3251 3252 clear_bit(In_sync, &rdev->flags); /* just to be sure */ 3253 if (info->state & (1<<MD_DISK_WRITEMOSTLY)) 3254 set_bit(WriteMostly, &rdev->flags); 3255 3256 rdev->raid_disk = -1; 3257 err = bind_rdev_to_array(rdev, mddev); 3258 if (err) 3259 export_rdev(rdev); 3260 3261 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3262 md_wakeup_thread(mddev->thread); 3263 return err; 3264 } 3265 3266 /* otherwise, add_new_disk is only allowed 3267 * for major_version==0 superblocks 3268 */ 3269 if (mddev->major_version != 0) { 3270 printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n", 3271 mdname(mddev)); 3272 return -EINVAL; 3273 } 3274 3275 if (!(info->state & (1<<MD_DISK_FAULTY))) { 3276 int err; 3277 rdev = md_import_device (dev, -1, 0); 3278 if (IS_ERR(rdev)) { 3279 printk(KERN_WARNING 3280 "md: error, md_import_device() returned %ld\n", 3281 PTR_ERR(rdev)); 3282 return PTR_ERR(rdev); 3283 } 3284 rdev->desc_nr = info->number; 3285 if (info->raid_disk < mddev->raid_disks) 3286 rdev->raid_disk = info->raid_disk; 3287 else 3288 rdev->raid_disk = -1; 3289 3290 rdev->flags = 0; 3291 3292 if (rdev->raid_disk < mddev->raid_disks) 3293 if (info->state & (1<<MD_DISK_SYNC)) 3294 set_bit(In_sync, &rdev->flags); 3295 3296 if (info->state & (1<<MD_DISK_WRITEMOSTLY)) 3297 set_bit(WriteMostly, &rdev->flags); 3298 3299 if (!mddev->persistent) { 3300 printk(KERN_INFO "md: nonpersistent superblock ...\n"); 3301 rdev->sb_offset = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; 3302 } else 3303 rdev->sb_offset = calc_dev_sboffset(rdev->bdev); 3304 rdev->size = calc_dev_size(rdev, mddev->chunk_size); 3305 3306 err = bind_rdev_to_array(rdev, mddev); 3307 if (err) { 3308 export_rdev(rdev); 3309 return err; 3310 } 3311 } 3312 3313 return 0; 3314 } 3315 3316 static int hot_remove_disk(mddev_t * mddev, dev_t dev) 3317 { 3318 char b[BDEVNAME_SIZE]; 3319 mdk_rdev_t *rdev; 3320 3321 if (!mddev->pers) 3322 return -ENODEV; 3323 3324 rdev = find_rdev(mddev, dev); 3325 if (!rdev) 3326 return -ENXIO; 3327 3328 if (rdev->raid_disk >= 0) 3329 goto busy; 3330 3331 kick_rdev_from_array(rdev); 3332 md_update_sb(mddev); 3333 md_new_event(mddev); 3334 3335 return 0; 3336 busy: 3337 printk(KERN_WARNING "md: cannot remove active disk %s from %s ... \n", 3338 bdevname(rdev->bdev,b), mdname(mddev)); 3339 return -EBUSY; 3340 } 3341 3342 static int hot_add_disk(mddev_t * mddev, dev_t dev) 3343 { 3344 char b[BDEVNAME_SIZE]; 3345 int err; 3346 unsigned int size; 3347 mdk_rdev_t *rdev; 3348 3349 if (!mddev->pers) 3350 return -ENODEV; 3351 3352 if (mddev->major_version != 0) { 3353 printk(KERN_WARNING "%s: HOT_ADD may only be used with" 3354 " version-0 superblocks.\n", 3355 mdname(mddev)); 3356 return -EINVAL; 3357 } 3358 if (!mddev->pers->hot_add_disk) { 3359 printk(KERN_WARNING 3360 "%s: personality does not support diskops!\n", 3361 mdname(mddev)); 3362 return -EINVAL; 3363 } 3364 3365 rdev = md_import_device (dev, -1, 0); 3366 if (IS_ERR(rdev)) { 3367 printk(KERN_WARNING 3368 "md: error, md_import_device() returned %ld\n", 3369 PTR_ERR(rdev)); 3370 return -EINVAL; 3371 } 3372 3373 if (mddev->persistent) 3374 rdev->sb_offset = calc_dev_sboffset(rdev->bdev); 3375 else 3376 rdev->sb_offset = 3377 rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; 3378 3379 size = calc_dev_size(rdev, mddev->chunk_size); 3380 rdev->size = size; 3381 3382 if (test_bit(Faulty, &rdev->flags)) { 3383 printk(KERN_WARNING 3384 "md: can not hot-add faulty %s disk to %s!\n", 3385 bdevname(rdev->bdev,b), mdname(mddev)); 3386 err = -EINVAL; 3387 goto abort_export; 3388 } 3389 clear_bit(In_sync, &rdev->flags); 3390 rdev->desc_nr = -1; 3391 err = bind_rdev_to_array(rdev, mddev); 3392 if (err) 3393 goto abort_export; 3394 3395 /* 3396 * The rest should better be atomic, we can have disk failures 3397 * noticed in interrupt contexts ... 3398 */ 3399 3400 if (rdev->desc_nr == mddev->max_disks) { 3401 printk(KERN_WARNING "%s: can not hot-add to full array!\n", 3402 mdname(mddev)); 3403 err = -EBUSY; 3404 goto abort_unbind_export; 3405 } 3406 3407 rdev->raid_disk = -1; 3408 3409 md_update_sb(mddev); 3410 3411 /* 3412 * Kick recovery, maybe this spare has to be added to the 3413 * array immediately. 3414 */ 3415 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3416 md_wakeup_thread(mddev->thread); 3417 md_new_event(mddev); 3418 return 0; 3419 3420 abort_unbind_export: 3421 unbind_rdev_from_array(rdev); 3422 3423 abort_export: 3424 export_rdev(rdev); 3425 return err; 3426 } 3427 3428 /* similar to deny_write_access, but accounts for our holding a reference 3429 * to the file ourselves */ 3430 static int deny_bitmap_write_access(struct file * file) 3431 { 3432 struct inode *inode = file->f_mapping->host; 3433 3434 spin_lock(&inode->i_lock); 3435 if (atomic_read(&inode->i_writecount) > 1) { 3436 spin_unlock(&inode->i_lock); 3437 return -ETXTBSY; 3438 } 3439 atomic_set(&inode->i_writecount, -1); 3440 spin_unlock(&inode->i_lock); 3441 3442 return 0; 3443 } 3444 3445 static int set_bitmap_file(mddev_t *mddev, int fd) 3446 { 3447 int err; 3448 3449 if (mddev->pers) { 3450 if (!mddev->pers->quiesce) 3451 return -EBUSY; 3452 if (mddev->recovery || mddev->sync_thread) 3453 return -EBUSY; 3454 /* we should be able to change the bitmap.. */ 3455 } 3456 3457 3458 if (fd >= 0) { 3459 if (mddev->bitmap) 3460 return -EEXIST; /* cannot add when bitmap is present */ 3461 mddev->bitmap_file = fget(fd); 3462 3463 if (mddev->bitmap_file == NULL) { 3464 printk(KERN_ERR "%s: error: failed to get bitmap file\n", 3465 mdname(mddev)); 3466 return -EBADF; 3467 } 3468 3469 err = deny_bitmap_write_access(mddev->bitmap_file); 3470 if (err) { 3471 printk(KERN_ERR "%s: error: bitmap file is already in use\n", 3472 mdname(mddev)); 3473 fput(mddev->bitmap_file); 3474 mddev->bitmap_file = NULL; 3475 return err; 3476 } 3477 mddev->bitmap_offset = 0; /* file overrides offset */ 3478 } else if (mddev->bitmap == NULL) 3479 return -ENOENT; /* cannot remove what isn't there */ 3480 err = 0; 3481 if (mddev->pers) { 3482 mddev->pers->quiesce(mddev, 1); 3483 if (fd >= 0) 3484 err = bitmap_create(mddev); 3485 if (fd < 0 || err) 3486 bitmap_destroy(mddev); 3487 mddev->pers->quiesce(mddev, 0); 3488 } else if (fd < 0) { 3489 if (mddev->bitmap_file) 3490 fput(mddev->bitmap_file); 3491 mddev->bitmap_file = NULL; 3492 } 3493 3494 return err; 3495 } 3496 3497 /* 3498 * set_array_info is used two different ways 3499 * The original usage is when creating a new array. 3500 * In this usage, raid_disks is > 0 and it together with 3501 * level, size, not_persistent,layout,chunksize determine the 3502 * shape of the array. 3503 * This will always create an array with a type-0.90.0 superblock. 3504 * The newer usage is when assembling an array. 3505 * In this case raid_disks will be 0, and the major_version field is 3506 * use to determine which style super-blocks are to be found on the devices. 3507 * The minor and patch _version numbers are also kept incase the 3508 * super_block handler wishes to interpret them. 3509 */ 3510 static int set_array_info(mddev_t * mddev, mdu_array_info_t *info) 3511 { 3512 3513 if (info->raid_disks == 0) { 3514 /* just setting version number for superblock loading */ 3515 if (info->major_version < 0 || 3516 info->major_version >= sizeof(super_types)/sizeof(super_types[0]) || 3517 super_types[info->major_version].name == NULL) { 3518 /* maybe try to auto-load a module? */ 3519 printk(KERN_INFO 3520 "md: superblock version %d not known\n", 3521 info->major_version); 3522 return -EINVAL; 3523 } 3524 mddev->major_version = info->major_version; 3525 mddev->minor_version = info->minor_version; 3526 mddev->patch_version = info->patch_version; 3527 return 0; 3528 } 3529 mddev->major_version = MD_MAJOR_VERSION; 3530 mddev->minor_version = MD_MINOR_VERSION; 3531 mddev->patch_version = MD_PATCHLEVEL_VERSION; 3532 mddev->ctime = get_seconds(); 3533 3534 mddev->level = info->level; 3535 mddev->clevel[0] = 0; 3536 mddev->size = info->size; 3537 mddev->raid_disks = info->raid_disks; 3538 /* don't set md_minor, it is determined by which /dev/md* was 3539 * openned 3540 */ 3541 if (info->state & (1<<MD_SB_CLEAN)) 3542 mddev->recovery_cp = MaxSector; 3543 else 3544 mddev->recovery_cp = 0; 3545 mddev->persistent = ! info->not_persistent; 3546 3547 mddev->layout = info->layout; 3548 mddev->chunk_size = info->chunk_size; 3549 3550 mddev->max_disks = MD_SB_DISKS; 3551 3552 mddev->sb_dirty = 1; 3553 3554 mddev->default_bitmap_offset = MD_SB_BYTES >> 9; 3555 mddev->bitmap_offset = 0; 3556 3557 mddev->reshape_position = MaxSector; 3558 3559 /* 3560 * Generate a 128 bit UUID 3561 */ 3562 get_random_bytes(mddev->uuid, 16); 3563 3564 mddev->new_level = mddev->level; 3565 mddev->new_chunk = mddev->chunk_size; 3566 mddev->new_layout = mddev->layout; 3567 mddev->delta_disks = 0; 3568 3569 return 0; 3570 } 3571 3572 static int update_size(mddev_t *mddev, unsigned long size) 3573 { 3574 mdk_rdev_t * rdev; 3575 int rv; 3576 struct list_head *tmp; 3577 int fit = (size == 0); 3578 3579 if (mddev->pers->resize == NULL) 3580 return -EINVAL; 3581 /* The "size" is the amount of each device that is used. 3582 * This can only make sense for arrays with redundancy. 3583 * linear and raid0 always use whatever space is available 3584 * We can only consider changing the size if no resync 3585 * or reconstruction is happening, and if the new size 3586 * is acceptable. It must fit before the sb_offset or, 3587 * if that is <data_offset, it must fit before the 3588 * size of each device. 3589 * If size is zero, we find the largest size that fits. 3590 */ 3591 if (mddev->sync_thread) 3592 return -EBUSY; 3593 ITERATE_RDEV(mddev,rdev,tmp) { 3594 sector_t avail; 3595 if (rdev->sb_offset > rdev->data_offset) 3596 avail = (rdev->sb_offset*2) - rdev->data_offset; 3597 else 3598 avail = get_capacity(rdev->bdev->bd_disk) 3599 - rdev->data_offset; 3600 if (fit && (size == 0 || size > avail/2)) 3601 size = avail/2; 3602 if (avail < ((sector_t)size << 1)) 3603 return -ENOSPC; 3604 } 3605 rv = mddev->pers->resize(mddev, (sector_t)size *2); 3606 if (!rv) { 3607 struct block_device *bdev; 3608 3609 bdev = bdget_disk(mddev->gendisk, 0); 3610 if (bdev) { 3611 mutex_lock(&bdev->bd_inode->i_mutex); 3612 i_size_write(bdev->bd_inode, (loff_t)mddev->array_size << 10); 3613 mutex_unlock(&bdev->bd_inode->i_mutex); 3614 bdput(bdev); 3615 } 3616 } 3617 return rv; 3618 } 3619 3620 static int update_raid_disks(mddev_t *mddev, int raid_disks) 3621 { 3622 int rv; 3623 /* change the number of raid disks */ 3624 if (mddev->pers->check_reshape == NULL) 3625 return -EINVAL; 3626 if (raid_disks <= 0 || 3627 raid_disks >= mddev->max_disks) 3628 return -EINVAL; 3629 if (mddev->sync_thread || mddev->reshape_position != MaxSector) 3630 return -EBUSY; 3631 mddev->delta_disks = raid_disks - mddev->raid_disks; 3632 3633 rv = mddev->pers->check_reshape(mddev); 3634 return rv; 3635 } 3636 3637 3638 /* 3639 * update_array_info is used to change the configuration of an 3640 * on-line array. 3641 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size 3642 * fields in the info are checked against the array. 3643 * Any differences that cannot be handled will cause an error. 3644 * Normally, only one change can be managed at a time. 3645 */ 3646 static int update_array_info(mddev_t *mddev, mdu_array_info_t *info) 3647 { 3648 int rv = 0; 3649 int cnt = 0; 3650 int state = 0; 3651 3652 /* calculate expected state,ignoring low bits */ 3653 if (mddev->bitmap && mddev->bitmap_offset) 3654 state |= (1 << MD_SB_BITMAP_PRESENT); 3655 3656 if (mddev->major_version != info->major_version || 3657 mddev->minor_version != info->minor_version || 3658 /* mddev->patch_version != info->patch_version || */ 3659 mddev->ctime != info->ctime || 3660 mddev->level != info->level || 3661 /* mddev->layout != info->layout || */ 3662 !mddev->persistent != info->not_persistent|| 3663 mddev->chunk_size != info->chunk_size || 3664 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */ 3665 ((state^info->state) & 0xfffffe00) 3666 ) 3667 return -EINVAL; 3668 /* Check there is only one change */ 3669 if (info->size >= 0 && mddev->size != info->size) cnt++; 3670 if (mddev->raid_disks != info->raid_disks) cnt++; 3671 if (mddev->layout != info->layout) cnt++; 3672 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) cnt++; 3673 if (cnt == 0) return 0; 3674 if (cnt > 1) return -EINVAL; 3675 3676 if (mddev->layout != info->layout) { 3677 /* Change layout 3678 * we don't need to do anything at the md level, the 3679 * personality will take care of it all. 3680 */ 3681 if (mddev->pers->reconfig == NULL) 3682 return -EINVAL; 3683 else 3684 return mddev->pers->reconfig(mddev, info->layout, -1); 3685 } 3686 if (info->size >= 0 && mddev->size != info->size) 3687 rv = update_size(mddev, info->size); 3688 3689 if (mddev->raid_disks != info->raid_disks) 3690 rv = update_raid_disks(mddev, info->raid_disks); 3691 3692 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) { 3693 if (mddev->pers->quiesce == NULL) 3694 return -EINVAL; 3695 if (mddev->recovery || mddev->sync_thread) 3696 return -EBUSY; 3697 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) { 3698 /* add the bitmap */ 3699 if (mddev->bitmap) 3700 return -EEXIST; 3701 if (mddev->default_bitmap_offset == 0) 3702 return -EINVAL; 3703 mddev->bitmap_offset = mddev->default_bitmap_offset; 3704 mddev->pers->quiesce(mddev, 1); 3705 rv = bitmap_create(mddev); 3706 if (rv) 3707 bitmap_destroy(mddev); 3708 mddev->pers->quiesce(mddev, 0); 3709 } else { 3710 /* remove the bitmap */ 3711 if (!mddev->bitmap) 3712 return -ENOENT; 3713 if (mddev->bitmap->file) 3714 return -EINVAL; 3715 mddev->pers->quiesce(mddev, 1); 3716 bitmap_destroy(mddev); 3717 mddev->pers->quiesce(mddev, 0); 3718 mddev->bitmap_offset = 0; 3719 } 3720 } 3721 md_update_sb(mddev); 3722 return rv; 3723 } 3724 3725 static int set_disk_faulty(mddev_t *mddev, dev_t dev) 3726 { 3727 mdk_rdev_t *rdev; 3728 3729 if (mddev->pers == NULL) 3730 return -ENODEV; 3731 3732 rdev = find_rdev(mddev, dev); 3733 if (!rdev) 3734 return -ENODEV; 3735 3736 md_error(mddev, rdev); 3737 return 0; 3738 } 3739 3740 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo) 3741 { 3742 mddev_t *mddev = bdev->bd_disk->private_data; 3743 3744 geo->heads = 2; 3745 geo->sectors = 4; 3746 geo->cylinders = get_capacity(mddev->gendisk) / 8; 3747 return 0; 3748 } 3749 3750 static int md_ioctl(struct inode *inode, struct file *file, 3751 unsigned int cmd, unsigned long arg) 3752 { 3753 int err = 0; 3754 void __user *argp = (void __user *)arg; 3755 mddev_t *mddev = NULL; 3756 3757 if (!capable(CAP_SYS_ADMIN)) 3758 return -EACCES; 3759 3760 /* 3761 * Commands dealing with the RAID driver but not any 3762 * particular array: 3763 */ 3764 switch (cmd) 3765 { 3766 case RAID_VERSION: 3767 err = get_version(argp); 3768 goto done; 3769 3770 case PRINT_RAID_DEBUG: 3771 err = 0; 3772 md_print_devices(); 3773 goto done; 3774 3775 #ifndef MODULE 3776 case RAID_AUTORUN: 3777 err = 0; 3778 autostart_arrays(arg); 3779 goto done; 3780 #endif 3781 default:; 3782 } 3783 3784 /* 3785 * Commands creating/starting a new array: 3786 */ 3787 3788 mddev = inode->i_bdev->bd_disk->private_data; 3789 3790 if (!mddev) { 3791 BUG(); 3792 goto abort; 3793 } 3794 3795 3796 if (cmd == START_ARRAY) { 3797 /* START_ARRAY doesn't need to lock the array as autostart_array 3798 * does the locking, and it could even be a different array 3799 */ 3800 static int cnt = 3; 3801 if (cnt > 0 ) { 3802 printk(KERN_WARNING 3803 "md: %s(pid %d) used deprecated START_ARRAY ioctl. " 3804 "This will not be supported beyond July 2006\n", 3805 current->comm, current->pid); 3806 cnt--; 3807 } 3808 err = autostart_array(new_decode_dev(arg)); 3809 if (err) { 3810 printk(KERN_WARNING "md: autostart failed!\n"); 3811 goto abort; 3812 } 3813 goto done; 3814 } 3815 3816 err = mddev_lock(mddev); 3817 if (err) { 3818 printk(KERN_INFO 3819 "md: ioctl lock interrupted, reason %d, cmd %d\n", 3820 err, cmd); 3821 goto abort; 3822 } 3823 3824 switch (cmd) 3825 { 3826 case SET_ARRAY_INFO: 3827 { 3828 mdu_array_info_t info; 3829 if (!arg) 3830 memset(&info, 0, sizeof(info)); 3831 else if (copy_from_user(&info, argp, sizeof(info))) { 3832 err = -EFAULT; 3833 goto abort_unlock; 3834 } 3835 if (mddev->pers) { 3836 err = update_array_info(mddev, &info); 3837 if (err) { 3838 printk(KERN_WARNING "md: couldn't update" 3839 " array info. %d\n", err); 3840 goto abort_unlock; 3841 } 3842 goto done_unlock; 3843 } 3844 if (!list_empty(&mddev->disks)) { 3845 printk(KERN_WARNING 3846 "md: array %s already has disks!\n", 3847 mdname(mddev)); 3848 err = -EBUSY; 3849 goto abort_unlock; 3850 } 3851 if (mddev->raid_disks) { 3852 printk(KERN_WARNING 3853 "md: array %s already initialised!\n", 3854 mdname(mddev)); 3855 err = -EBUSY; 3856 goto abort_unlock; 3857 } 3858 err = set_array_info(mddev, &info); 3859 if (err) { 3860 printk(KERN_WARNING "md: couldn't set" 3861 " array info. %d\n", err); 3862 goto abort_unlock; 3863 } 3864 } 3865 goto done_unlock; 3866 3867 default:; 3868 } 3869 3870 /* 3871 * Commands querying/configuring an existing array: 3872 */ 3873 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY, 3874 * RUN_ARRAY, and SET_BITMAP_FILE are allowed */ 3875 if (!mddev->raid_disks && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY 3876 && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE) { 3877 err = -ENODEV; 3878 goto abort_unlock; 3879 } 3880 3881 /* 3882 * Commands even a read-only array can execute: 3883 */ 3884 switch (cmd) 3885 { 3886 case GET_ARRAY_INFO: 3887 err = get_array_info(mddev, argp); 3888 goto done_unlock; 3889 3890 case GET_BITMAP_FILE: 3891 err = get_bitmap_file(mddev, argp); 3892 goto done_unlock; 3893 3894 case GET_DISK_INFO: 3895 err = get_disk_info(mddev, argp); 3896 goto done_unlock; 3897 3898 case RESTART_ARRAY_RW: 3899 err = restart_array(mddev); 3900 goto done_unlock; 3901 3902 case STOP_ARRAY: 3903 err = do_md_stop (mddev, 0); 3904 goto done_unlock; 3905 3906 case STOP_ARRAY_RO: 3907 err = do_md_stop (mddev, 1); 3908 goto done_unlock; 3909 3910 /* 3911 * We have a problem here : there is no easy way to give a CHS 3912 * virtual geometry. We currently pretend that we have a 2 heads 3913 * 4 sectors (with a BIG number of cylinders...). This drives 3914 * dosfs just mad... ;-) 3915 */ 3916 } 3917 3918 /* 3919 * The remaining ioctls are changing the state of the 3920 * superblock, so we do not allow them on read-only arrays. 3921 * However non-MD ioctls (e.g. get-size) will still come through 3922 * here and hit the 'default' below, so only disallow 3923 * 'md' ioctls, and switch to rw mode if started auto-readonly. 3924 */ 3925 if (_IOC_TYPE(cmd) == MD_MAJOR && 3926 mddev->ro && mddev->pers) { 3927 if (mddev->ro == 2) { 3928 mddev->ro = 0; 3929 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3930 md_wakeup_thread(mddev->thread); 3931 3932 } else { 3933 err = -EROFS; 3934 goto abort_unlock; 3935 } 3936 } 3937 3938 switch (cmd) 3939 { 3940 case ADD_NEW_DISK: 3941 { 3942 mdu_disk_info_t info; 3943 if (copy_from_user(&info, argp, sizeof(info))) 3944 err = -EFAULT; 3945 else 3946 err = add_new_disk(mddev, &info); 3947 goto done_unlock; 3948 } 3949 3950 case HOT_REMOVE_DISK: 3951 err = hot_remove_disk(mddev, new_decode_dev(arg)); 3952 goto done_unlock; 3953 3954 case HOT_ADD_DISK: 3955 err = hot_add_disk(mddev, new_decode_dev(arg)); 3956 goto done_unlock; 3957 3958 case SET_DISK_FAULTY: 3959 err = set_disk_faulty(mddev, new_decode_dev(arg)); 3960 goto done_unlock; 3961 3962 case RUN_ARRAY: 3963 err = do_md_run (mddev); 3964 goto done_unlock; 3965 3966 case SET_BITMAP_FILE: 3967 err = set_bitmap_file(mddev, (int)arg); 3968 goto done_unlock; 3969 3970 default: 3971 if (_IOC_TYPE(cmd) == MD_MAJOR) 3972 printk(KERN_WARNING "md: %s(pid %d) used" 3973 " obsolete MD ioctl, upgrade your" 3974 " software to use new ictls.\n", 3975 current->comm, current->pid); 3976 err = -EINVAL; 3977 goto abort_unlock; 3978 } 3979 3980 done_unlock: 3981 abort_unlock: 3982 mddev_unlock(mddev); 3983 3984 return err; 3985 done: 3986 if (err) 3987 MD_BUG(); 3988 abort: 3989 return err; 3990 } 3991 3992 static int md_open(struct inode *inode, struct file *file) 3993 { 3994 /* 3995 * Succeed if we can lock the mddev, which confirms that 3996 * it isn't being stopped right now. 3997 */ 3998 mddev_t *mddev = inode->i_bdev->bd_disk->private_data; 3999 int err; 4000 4001 if ((err = mddev_lock(mddev))) 4002 goto out; 4003 4004 err = 0; 4005 mddev_get(mddev); 4006 mddev_unlock(mddev); 4007 4008 check_disk_change(inode->i_bdev); 4009 out: 4010 return err; 4011 } 4012 4013 static int md_release(struct inode *inode, struct file * file) 4014 { 4015 mddev_t *mddev = inode->i_bdev->bd_disk->private_data; 4016 4017 if (!mddev) 4018 BUG(); 4019 mddev_put(mddev); 4020 4021 return 0; 4022 } 4023 4024 static int md_media_changed(struct gendisk *disk) 4025 { 4026 mddev_t *mddev = disk->private_data; 4027 4028 return mddev->changed; 4029 } 4030 4031 static int md_revalidate(struct gendisk *disk) 4032 { 4033 mddev_t *mddev = disk->private_data; 4034 4035 mddev->changed = 0; 4036 return 0; 4037 } 4038 static struct block_device_operations md_fops = 4039 { 4040 .owner = THIS_MODULE, 4041 .open = md_open, 4042 .release = md_release, 4043 .ioctl = md_ioctl, 4044 .getgeo = md_getgeo, 4045 .media_changed = md_media_changed, 4046 .revalidate_disk= md_revalidate, 4047 }; 4048 4049 static int md_thread(void * arg) 4050 { 4051 mdk_thread_t *thread = arg; 4052 4053 /* 4054 * md_thread is a 'system-thread', it's priority should be very 4055 * high. We avoid resource deadlocks individually in each 4056 * raid personality. (RAID5 does preallocation) We also use RR and 4057 * the very same RT priority as kswapd, thus we will never get 4058 * into a priority inversion deadlock. 4059 * 4060 * we definitely have to have equal or higher priority than 4061 * bdflush, otherwise bdflush will deadlock if there are too 4062 * many dirty RAID5 blocks. 4063 */ 4064 4065 allow_signal(SIGKILL); 4066 while (!kthread_should_stop()) { 4067 4068 /* We need to wait INTERRUPTIBLE so that 4069 * we don't add to the load-average. 4070 * That means we need to be sure no signals are 4071 * pending 4072 */ 4073 if (signal_pending(current)) 4074 flush_signals(current); 4075 4076 wait_event_interruptible_timeout 4077 (thread->wqueue, 4078 test_bit(THREAD_WAKEUP, &thread->flags) 4079 || kthread_should_stop(), 4080 thread->timeout); 4081 try_to_freeze(); 4082 4083 clear_bit(THREAD_WAKEUP, &thread->flags); 4084 4085 thread->run(thread->mddev); 4086 } 4087 4088 return 0; 4089 } 4090 4091 void md_wakeup_thread(mdk_thread_t *thread) 4092 { 4093 if (thread) { 4094 dprintk("md: waking up MD thread %s.\n", thread->tsk->comm); 4095 set_bit(THREAD_WAKEUP, &thread->flags); 4096 wake_up(&thread->wqueue); 4097 } 4098 } 4099 4100 mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev, 4101 const char *name) 4102 { 4103 mdk_thread_t *thread; 4104 4105 thread = kzalloc(sizeof(mdk_thread_t), GFP_KERNEL); 4106 if (!thread) 4107 return NULL; 4108 4109 init_waitqueue_head(&thread->wqueue); 4110 4111 thread->run = run; 4112 thread->mddev = mddev; 4113 thread->timeout = MAX_SCHEDULE_TIMEOUT; 4114 thread->tsk = kthread_run(md_thread, thread, name, mdname(thread->mddev)); 4115 if (IS_ERR(thread->tsk)) { 4116 kfree(thread); 4117 return NULL; 4118 } 4119 return thread; 4120 } 4121 4122 void md_unregister_thread(mdk_thread_t *thread) 4123 { 4124 dprintk("interrupting MD-thread pid %d\n", thread->tsk->pid); 4125 4126 kthread_stop(thread->tsk); 4127 kfree(thread); 4128 } 4129 4130 void md_error(mddev_t *mddev, mdk_rdev_t *rdev) 4131 { 4132 if (!mddev) { 4133 MD_BUG(); 4134 return; 4135 } 4136 4137 if (!rdev || test_bit(Faulty, &rdev->flags)) 4138 return; 4139 /* 4140 dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n", 4141 mdname(mddev), 4142 MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev), 4143 __builtin_return_address(0),__builtin_return_address(1), 4144 __builtin_return_address(2),__builtin_return_address(3)); 4145 */ 4146 if (!mddev->pers->error_handler) 4147 return; 4148 mddev->pers->error_handler(mddev,rdev); 4149 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 4150 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4151 md_wakeup_thread(mddev->thread); 4152 md_new_event(mddev); 4153 } 4154 4155 /* seq_file implementation /proc/mdstat */ 4156 4157 static void status_unused(struct seq_file *seq) 4158 { 4159 int i = 0; 4160 mdk_rdev_t *rdev; 4161 struct list_head *tmp; 4162 4163 seq_printf(seq, "unused devices: "); 4164 4165 ITERATE_RDEV_PENDING(rdev,tmp) { 4166 char b[BDEVNAME_SIZE]; 4167 i++; 4168 seq_printf(seq, "%s ", 4169 bdevname(rdev->bdev,b)); 4170 } 4171 if (!i) 4172 seq_printf(seq, "<none>"); 4173 4174 seq_printf(seq, "\n"); 4175 } 4176 4177 4178 static void status_resync(struct seq_file *seq, mddev_t * mddev) 4179 { 4180 sector_t max_blocks, resync, res; 4181 unsigned long dt, db, rt; 4182 int scale; 4183 unsigned int per_milli; 4184 4185 resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active))/2; 4186 4187 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 4188 max_blocks = mddev->resync_max_sectors >> 1; 4189 else 4190 max_blocks = mddev->size; 4191 4192 /* 4193 * Should not happen. 4194 */ 4195 if (!max_blocks) { 4196 MD_BUG(); 4197 return; 4198 } 4199 /* Pick 'scale' such that (resync>>scale)*1000 will fit 4200 * in a sector_t, and (max_blocks>>scale) will fit in a 4201 * u32, as those are the requirements for sector_div. 4202 * Thus 'scale' must be at least 10 4203 */ 4204 scale = 10; 4205 if (sizeof(sector_t) > sizeof(unsigned long)) { 4206 while ( max_blocks/2 > (1ULL<<(scale+32))) 4207 scale++; 4208 } 4209 res = (resync>>scale)*1000; 4210 sector_div(res, (u32)((max_blocks>>scale)+1)); 4211 4212 per_milli = res; 4213 { 4214 int i, x = per_milli/50, y = 20-x; 4215 seq_printf(seq, "["); 4216 for (i = 0; i < x; i++) 4217 seq_printf(seq, "="); 4218 seq_printf(seq, ">"); 4219 for (i = 0; i < y; i++) 4220 seq_printf(seq, "."); 4221 seq_printf(seq, "] "); 4222 } 4223 seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)", 4224 (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)? 4225 "reshape" : 4226 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ? 4227 "resync" : "recovery")), 4228 per_milli/10, per_milli % 10, 4229 (unsigned long long) resync, 4230 (unsigned long long) max_blocks); 4231 4232 /* 4233 * We do not want to overflow, so the order of operands and 4234 * the * 100 / 100 trick are important. We do a +1 to be 4235 * safe against division by zero. We only estimate anyway. 4236 * 4237 * dt: time from mark until now 4238 * db: blocks written from mark until now 4239 * rt: remaining time 4240 */ 4241 dt = ((jiffies - mddev->resync_mark) / HZ); 4242 if (!dt) dt++; 4243 db = resync - (mddev->resync_mark_cnt/2); 4244 rt = (dt * ((unsigned long)(max_blocks-resync) / (db/100+1)))/100; 4245 4246 seq_printf(seq, " finish=%lu.%lumin", rt / 60, (rt % 60)/6); 4247 4248 seq_printf(seq, " speed=%ldK/sec", db/dt); 4249 } 4250 4251 static void *md_seq_start(struct seq_file *seq, loff_t *pos) 4252 { 4253 struct list_head *tmp; 4254 loff_t l = *pos; 4255 mddev_t *mddev; 4256 4257 if (l >= 0x10000) 4258 return NULL; 4259 if (!l--) 4260 /* header */ 4261 return (void*)1; 4262 4263 spin_lock(&all_mddevs_lock); 4264 list_for_each(tmp,&all_mddevs) 4265 if (!l--) { 4266 mddev = list_entry(tmp, mddev_t, all_mddevs); 4267 mddev_get(mddev); 4268 spin_unlock(&all_mddevs_lock); 4269 return mddev; 4270 } 4271 spin_unlock(&all_mddevs_lock); 4272 if (!l--) 4273 return (void*)2;/* tail */ 4274 return NULL; 4275 } 4276 4277 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos) 4278 { 4279 struct list_head *tmp; 4280 mddev_t *next_mddev, *mddev = v; 4281 4282 ++*pos; 4283 if (v == (void*)2) 4284 return NULL; 4285 4286 spin_lock(&all_mddevs_lock); 4287 if (v == (void*)1) 4288 tmp = all_mddevs.next; 4289 else 4290 tmp = mddev->all_mddevs.next; 4291 if (tmp != &all_mddevs) 4292 next_mddev = mddev_get(list_entry(tmp,mddev_t,all_mddevs)); 4293 else { 4294 next_mddev = (void*)2; 4295 *pos = 0x10000; 4296 } 4297 spin_unlock(&all_mddevs_lock); 4298 4299 if (v != (void*)1) 4300 mddev_put(mddev); 4301 return next_mddev; 4302 4303 } 4304 4305 static void md_seq_stop(struct seq_file *seq, void *v) 4306 { 4307 mddev_t *mddev = v; 4308 4309 if (mddev && v != (void*)1 && v != (void*)2) 4310 mddev_put(mddev); 4311 } 4312 4313 struct mdstat_info { 4314 int event; 4315 }; 4316 4317 static int md_seq_show(struct seq_file *seq, void *v) 4318 { 4319 mddev_t *mddev = v; 4320 sector_t size; 4321 struct list_head *tmp2; 4322 mdk_rdev_t *rdev; 4323 struct mdstat_info *mi = seq->private; 4324 struct bitmap *bitmap; 4325 4326 if (v == (void*)1) { 4327 struct mdk_personality *pers; 4328 seq_printf(seq, "Personalities : "); 4329 spin_lock(&pers_lock); 4330 list_for_each_entry(pers, &pers_list, list) 4331 seq_printf(seq, "[%s] ", pers->name); 4332 4333 spin_unlock(&pers_lock); 4334 seq_printf(seq, "\n"); 4335 mi->event = atomic_read(&md_event_count); 4336 return 0; 4337 } 4338 if (v == (void*)2) { 4339 status_unused(seq); 4340 return 0; 4341 } 4342 4343 if (mddev_lock(mddev)!=0) 4344 return -EINTR; 4345 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) { 4346 seq_printf(seq, "%s : %sactive", mdname(mddev), 4347 mddev->pers ? "" : "in"); 4348 if (mddev->pers) { 4349 if (mddev->ro==1) 4350 seq_printf(seq, " (read-only)"); 4351 if (mddev->ro==2) 4352 seq_printf(seq, "(auto-read-only)"); 4353 seq_printf(seq, " %s", mddev->pers->name); 4354 } 4355 4356 size = 0; 4357 ITERATE_RDEV(mddev,rdev,tmp2) { 4358 char b[BDEVNAME_SIZE]; 4359 seq_printf(seq, " %s[%d]", 4360 bdevname(rdev->bdev,b), rdev->desc_nr); 4361 if (test_bit(WriteMostly, &rdev->flags)) 4362 seq_printf(seq, "(W)"); 4363 if (test_bit(Faulty, &rdev->flags)) { 4364 seq_printf(seq, "(F)"); 4365 continue; 4366 } else if (rdev->raid_disk < 0) 4367 seq_printf(seq, "(S)"); /* spare */ 4368 size += rdev->size; 4369 } 4370 4371 if (!list_empty(&mddev->disks)) { 4372 if (mddev->pers) 4373 seq_printf(seq, "\n %llu blocks", 4374 (unsigned long long)mddev->array_size); 4375 else 4376 seq_printf(seq, "\n %llu blocks", 4377 (unsigned long long)size); 4378 } 4379 if (mddev->persistent) { 4380 if (mddev->major_version != 0 || 4381 mddev->minor_version != 90) { 4382 seq_printf(seq," super %d.%d", 4383 mddev->major_version, 4384 mddev->minor_version); 4385 } 4386 } else 4387 seq_printf(seq, " super non-persistent"); 4388 4389 if (mddev->pers) { 4390 mddev->pers->status (seq, mddev); 4391 seq_printf(seq, "\n "); 4392 if (mddev->pers->sync_request) { 4393 if (mddev->curr_resync > 2) { 4394 status_resync (seq, mddev); 4395 seq_printf(seq, "\n "); 4396 } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2) 4397 seq_printf(seq, "\tresync=DELAYED\n "); 4398 else if (mddev->recovery_cp < MaxSector) 4399 seq_printf(seq, "\tresync=PENDING\n "); 4400 } 4401 } else 4402 seq_printf(seq, "\n "); 4403 4404 if ((bitmap = mddev->bitmap)) { 4405 unsigned long chunk_kb; 4406 unsigned long flags; 4407 spin_lock_irqsave(&bitmap->lock, flags); 4408 chunk_kb = bitmap->chunksize >> 10; 4409 seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], " 4410 "%lu%s chunk", 4411 bitmap->pages - bitmap->missing_pages, 4412 bitmap->pages, 4413 (bitmap->pages - bitmap->missing_pages) 4414 << (PAGE_SHIFT - 10), 4415 chunk_kb ? chunk_kb : bitmap->chunksize, 4416 chunk_kb ? "KB" : "B"); 4417 if (bitmap->file) { 4418 seq_printf(seq, ", file: "); 4419 seq_path(seq, bitmap->file->f_vfsmnt, 4420 bitmap->file->f_dentry," \t\n"); 4421 } 4422 4423 seq_printf(seq, "\n"); 4424 spin_unlock_irqrestore(&bitmap->lock, flags); 4425 } 4426 4427 seq_printf(seq, "\n"); 4428 } 4429 mddev_unlock(mddev); 4430 4431 return 0; 4432 } 4433 4434 static struct seq_operations md_seq_ops = { 4435 .start = md_seq_start, 4436 .next = md_seq_next, 4437 .stop = md_seq_stop, 4438 .show = md_seq_show, 4439 }; 4440 4441 static int md_seq_open(struct inode *inode, struct file *file) 4442 { 4443 int error; 4444 struct mdstat_info *mi = kmalloc(sizeof(*mi), GFP_KERNEL); 4445 if (mi == NULL) 4446 return -ENOMEM; 4447 4448 error = seq_open(file, &md_seq_ops); 4449 if (error) 4450 kfree(mi); 4451 else { 4452 struct seq_file *p = file->private_data; 4453 p->private = mi; 4454 mi->event = atomic_read(&md_event_count); 4455 } 4456 return error; 4457 } 4458 4459 static int md_seq_release(struct inode *inode, struct file *file) 4460 { 4461 struct seq_file *m = file->private_data; 4462 struct mdstat_info *mi = m->private; 4463 m->private = NULL; 4464 kfree(mi); 4465 return seq_release(inode, file); 4466 } 4467 4468 static unsigned int mdstat_poll(struct file *filp, poll_table *wait) 4469 { 4470 struct seq_file *m = filp->private_data; 4471 struct mdstat_info *mi = m->private; 4472 int mask; 4473 4474 poll_wait(filp, &md_event_waiters, wait); 4475 4476 /* always allow read */ 4477 mask = POLLIN | POLLRDNORM; 4478 4479 if (mi->event != atomic_read(&md_event_count)) 4480 mask |= POLLERR | POLLPRI; 4481 return mask; 4482 } 4483 4484 static struct file_operations md_seq_fops = { 4485 .open = md_seq_open, 4486 .read = seq_read, 4487 .llseek = seq_lseek, 4488 .release = md_seq_release, 4489 .poll = mdstat_poll, 4490 }; 4491 4492 int register_md_personality(struct mdk_personality *p) 4493 { 4494 spin_lock(&pers_lock); 4495 list_add_tail(&p->list, &pers_list); 4496 printk(KERN_INFO "md: %s personality registered for level %d\n", p->name, p->level); 4497 spin_unlock(&pers_lock); 4498 return 0; 4499 } 4500 4501 int unregister_md_personality(struct mdk_personality *p) 4502 { 4503 printk(KERN_INFO "md: %s personality unregistered\n", p->name); 4504 spin_lock(&pers_lock); 4505 list_del_init(&p->list); 4506 spin_unlock(&pers_lock); 4507 return 0; 4508 } 4509 4510 static int is_mddev_idle(mddev_t *mddev) 4511 { 4512 mdk_rdev_t * rdev; 4513 struct list_head *tmp; 4514 int idle; 4515 unsigned long curr_events; 4516 4517 idle = 1; 4518 ITERATE_RDEV(mddev,rdev,tmp) { 4519 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk; 4520 curr_events = disk_stat_read(disk, sectors[0]) + 4521 disk_stat_read(disk, sectors[1]) - 4522 atomic_read(&disk->sync_io); 4523 /* The difference between curr_events and last_events 4524 * will be affected by any new non-sync IO (making 4525 * curr_events bigger) and any difference in the amount of 4526 * in-flight syncio (making current_events bigger or smaller) 4527 * The amount in-flight is currently limited to 4528 * 32*64K in raid1/10 and 256*PAGE_SIZE in raid5/6 4529 * which is at most 4096 sectors. 4530 * These numbers are fairly fragile and should be made 4531 * more robust, probably by enforcing the 4532 * 'window size' that md_do_sync sort-of uses. 4533 * 4534 * Note: the following is an unsigned comparison. 4535 */ 4536 if ((curr_events - rdev->last_events + 4096) > 8192) { 4537 rdev->last_events = curr_events; 4538 idle = 0; 4539 } 4540 } 4541 return idle; 4542 } 4543 4544 void md_done_sync(mddev_t *mddev, int blocks, int ok) 4545 { 4546 /* another "blocks" (512byte) blocks have been synced */ 4547 atomic_sub(blocks, &mddev->recovery_active); 4548 wake_up(&mddev->recovery_wait); 4549 if (!ok) { 4550 set_bit(MD_RECOVERY_ERR, &mddev->recovery); 4551 md_wakeup_thread(mddev->thread); 4552 // stop recovery, signal do_sync .... 4553 } 4554 } 4555 4556 4557 /* md_write_start(mddev, bi) 4558 * If we need to update some array metadata (e.g. 'active' flag 4559 * in superblock) before writing, schedule a superblock update 4560 * and wait for it to complete. 4561 */ 4562 void md_write_start(mddev_t *mddev, struct bio *bi) 4563 { 4564 if (bio_data_dir(bi) != WRITE) 4565 return; 4566 4567 BUG_ON(mddev->ro == 1); 4568 if (mddev->ro == 2) { 4569 /* need to switch to read/write */ 4570 mddev->ro = 0; 4571 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4572 md_wakeup_thread(mddev->thread); 4573 } 4574 atomic_inc(&mddev->writes_pending); 4575 if (mddev->in_sync) { 4576 spin_lock_irq(&mddev->write_lock); 4577 if (mddev->in_sync) { 4578 mddev->in_sync = 0; 4579 mddev->sb_dirty = 1; 4580 md_wakeup_thread(mddev->thread); 4581 } 4582 spin_unlock_irq(&mddev->write_lock); 4583 } 4584 wait_event(mddev->sb_wait, mddev->sb_dirty==0); 4585 } 4586 4587 void md_write_end(mddev_t *mddev) 4588 { 4589 if (atomic_dec_and_test(&mddev->writes_pending)) { 4590 if (mddev->safemode == 2) 4591 md_wakeup_thread(mddev->thread); 4592 else 4593 mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay); 4594 } 4595 } 4596 4597 static DECLARE_WAIT_QUEUE_HEAD(resync_wait); 4598 4599 #define SYNC_MARKS 10 4600 #define SYNC_MARK_STEP (3*HZ) 4601 void md_do_sync(mddev_t *mddev) 4602 { 4603 mddev_t *mddev2; 4604 unsigned int currspeed = 0, 4605 window; 4606 sector_t max_sectors,j, io_sectors; 4607 unsigned long mark[SYNC_MARKS]; 4608 sector_t mark_cnt[SYNC_MARKS]; 4609 int last_mark,m; 4610 struct list_head *tmp; 4611 sector_t last_check; 4612 int skipped = 0; 4613 4614 /* just incase thread restarts... */ 4615 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) 4616 return; 4617 4618 /* we overload curr_resync somewhat here. 4619 * 0 == not engaged in resync at all 4620 * 2 == checking that there is no conflict with another sync 4621 * 1 == like 2, but have yielded to allow conflicting resync to 4622 * commense 4623 * other == active in resync - this many blocks 4624 * 4625 * Before starting a resync we must have set curr_resync to 4626 * 2, and then checked that every "conflicting" array has curr_resync 4627 * less than ours. When we find one that is the same or higher 4628 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync 4629 * to 1 if we choose to yield (based arbitrarily on address of mddev structure). 4630 * This will mean we have to start checking from the beginning again. 4631 * 4632 */ 4633 4634 do { 4635 mddev->curr_resync = 2; 4636 4637 try_again: 4638 if (kthread_should_stop()) { 4639 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 4640 goto skip; 4641 } 4642 ITERATE_MDDEV(mddev2,tmp) { 4643 if (mddev2 == mddev) 4644 continue; 4645 if (mddev2->curr_resync && 4646 match_mddev_units(mddev,mddev2)) { 4647 DEFINE_WAIT(wq); 4648 if (mddev < mddev2 && mddev->curr_resync == 2) { 4649 /* arbitrarily yield */ 4650 mddev->curr_resync = 1; 4651 wake_up(&resync_wait); 4652 } 4653 if (mddev > mddev2 && mddev->curr_resync == 1) 4654 /* no need to wait here, we can wait the next 4655 * time 'round when curr_resync == 2 4656 */ 4657 continue; 4658 prepare_to_wait(&resync_wait, &wq, TASK_UNINTERRUPTIBLE); 4659 if (!kthread_should_stop() && 4660 mddev2->curr_resync >= mddev->curr_resync) { 4661 printk(KERN_INFO "md: delaying resync of %s" 4662 " until %s has finished resync (they" 4663 " share one or more physical units)\n", 4664 mdname(mddev), mdname(mddev2)); 4665 mddev_put(mddev2); 4666 schedule(); 4667 finish_wait(&resync_wait, &wq); 4668 goto try_again; 4669 } 4670 finish_wait(&resync_wait, &wq); 4671 } 4672 } 4673 } while (mddev->curr_resync < 2); 4674 4675 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 4676 /* resync follows the size requested by the personality, 4677 * which defaults to physical size, but can be virtual size 4678 */ 4679 max_sectors = mddev->resync_max_sectors; 4680 mddev->resync_mismatches = 0; 4681 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 4682 max_sectors = mddev->size << 1; 4683 else 4684 /* recovery follows the physical size of devices */ 4685 max_sectors = mddev->size << 1; 4686 4687 printk(KERN_INFO "md: syncing RAID array %s\n", mdname(mddev)); 4688 printk(KERN_INFO "md: minimum _guaranteed_ reconstruction speed:" 4689 " %d KB/sec/disc.\n", speed_min(mddev)); 4690 printk(KERN_INFO "md: using maximum available idle IO bandwidth " 4691 "(but not more than %d KB/sec) for reconstruction.\n", 4692 speed_max(mddev)); 4693 4694 is_mddev_idle(mddev); /* this also initializes IO event counters */ 4695 /* we don't use the checkpoint if there's a bitmap */ 4696 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && !mddev->bitmap 4697 && ! test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 4698 j = mddev->recovery_cp; 4699 else 4700 j = 0; 4701 io_sectors = 0; 4702 for (m = 0; m < SYNC_MARKS; m++) { 4703 mark[m] = jiffies; 4704 mark_cnt[m] = io_sectors; 4705 } 4706 last_mark = 0; 4707 mddev->resync_mark = mark[last_mark]; 4708 mddev->resync_mark_cnt = mark_cnt[last_mark]; 4709 4710 /* 4711 * Tune reconstruction: 4712 */ 4713 window = 32*(PAGE_SIZE/512); 4714 printk(KERN_INFO "md: using %dk window, over a total of %llu blocks.\n", 4715 window/2,(unsigned long long) max_sectors/2); 4716 4717 atomic_set(&mddev->recovery_active, 0); 4718 init_waitqueue_head(&mddev->recovery_wait); 4719 last_check = 0; 4720 4721 if (j>2) { 4722 printk(KERN_INFO 4723 "md: resuming recovery of %s from checkpoint.\n", 4724 mdname(mddev)); 4725 mddev->curr_resync = j; 4726 } 4727 4728 while (j < max_sectors) { 4729 sector_t sectors; 4730 4731 skipped = 0; 4732 sectors = mddev->pers->sync_request(mddev, j, &skipped, 4733 currspeed < speed_min(mddev)); 4734 if (sectors == 0) { 4735 set_bit(MD_RECOVERY_ERR, &mddev->recovery); 4736 goto out; 4737 } 4738 4739 if (!skipped) { /* actual IO requested */ 4740 io_sectors += sectors; 4741 atomic_add(sectors, &mddev->recovery_active); 4742 } 4743 4744 j += sectors; 4745 if (j>1) mddev->curr_resync = j; 4746 if (last_check == 0) 4747 /* this is the earliers that rebuilt will be 4748 * visible in /proc/mdstat 4749 */ 4750 md_new_event(mddev); 4751 4752 if (last_check + window > io_sectors || j == max_sectors) 4753 continue; 4754 4755 last_check = io_sectors; 4756 4757 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery) || 4758 test_bit(MD_RECOVERY_ERR, &mddev->recovery)) 4759 break; 4760 4761 repeat: 4762 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) { 4763 /* step marks */ 4764 int next = (last_mark+1) % SYNC_MARKS; 4765 4766 mddev->resync_mark = mark[next]; 4767 mddev->resync_mark_cnt = mark_cnt[next]; 4768 mark[next] = jiffies; 4769 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active); 4770 last_mark = next; 4771 } 4772 4773 4774 if (kthread_should_stop()) { 4775 /* 4776 * got a signal, exit. 4777 */ 4778 printk(KERN_INFO 4779 "md: md_do_sync() got signal ... exiting\n"); 4780 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 4781 goto out; 4782 } 4783 4784 /* 4785 * this loop exits only if either when we are slower than 4786 * the 'hard' speed limit, or the system was IO-idle for 4787 * a jiffy. 4788 * the system might be non-idle CPU-wise, but we only care 4789 * about not overloading the IO subsystem. (things like an 4790 * e2fsck being done on the RAID array should execute fast) 4791 */ 4792 mddev->queue->unplug_fn(mddev->queue); 4793 cond_resched(); 4794 4795 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2 4796 /((jiffies-mddev->resync_mark)/HZ +1) +1; 4797 4798 if (currspeed > speed_min(mddev)) { 4799 if ((currspeed > speed_max(mddev)) || 4800 !is_mddev_idle(mddev)) { 4801 msleep(500); 4802 goto repeat; 4803 } 4804 } 4805 } 4806 printk(KERN_INFO "md: %s: sync done.\n",mdname(mddev)); 4807 /* 4808 * this also signals 'finished resyncing' to md_stop 4809 */ 4810 out: 4811 mddev->queue->unplug_fn(mddev->queue); 4812 4813 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); 4814 4815 /* tell personality that we are finished */ 4816 mddev->pers->sync_request(mddev, max_sectors, &skipped, 1); 4817 4818 if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) && 4819 test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && 4820 !test_bit(MD_RECOVERY_CHECK, &mddev->recovery) && 4821 mddev->curr_resync > 2 && 4822 mddev->curr_resync >= mddev->recovery_cp) { 4823 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 4824 printk(KERN_INFO 4825 "md: checkpointing recovery of %s.\n", 4826 mdname(mddev)); 4827 mddev->recovery_cp = mddev->curr_resync; 4828 } else 4829 mddev->recovery_cp = MaxSector; 4830 } 4831 4832 skip: 4833 mddev->curr_resync = 0; 4834 wake_up(&resync_wait); 4835 set_bit(MD_RECOVERY_DONE, &mddev->recovery); 4836 md_wakeup_thread(mddev->thread); 4837 } 4838 EXPORT_SYMBOL_GPL(md_do_sync); 4839 4840 4841 /* 4842 * This routine is regularly called by all per-raid-array threads to 4843 * deal with generic issues like resync and super-block update. 4844 * Raid personalities that don't have a thread (linear/raid0) do not 4845 * need this as they never do any recovery or update the superblock. 4846 * 4847 * It does not do any resync itself, but rather "forks" off other threads 4848 * to do that as needed. 4849 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in 4850 * "->recovery" and create a thread at ->sync_thread. 4851 * When the thread finishes it sets MD_RECOVERY_DONE (and might set MD_RECOVERY_ERR) 4852 * and wakeups up this thread which will reap the thread and finish up. 4853 * This thread also removes any faulty devices (with nr_pending == 0). 4854 * 4855 * The overall approach is: 4856 * 1/ if the superblock needs updating, update it. 4857 * 2/ If a recovery thread is running, don't do anything else. 4858 * 3/ If recovery has finished, clean up, possibly marking spares active. 4859 * 4/ If there are any faulty devices, remove them. 4860 * 5/ If array is degraded, try to add spares devices 4861 * 6/ If array has spares or is not in-sync, start a resync thread. 4862 */ 4863 void md_check_recovery(mddev_t *mddev) 4864 { 4865 mdk_rdev_t *rdev; 4866 struct list_head *rtmp; 4867 4868 4869 if (mddev->bitmap) 4870 bitmap_daemon_work(mddev->bitmap); 4871 4872 if (mddev->ro) 4873 return; 4874 4875 if (signal_pending(current)) { 4876 if (mddev->pers->sync_request) { 4877 printk(KERN_INFO "md: %s in immediate safe mode\n", 4878 mdname(mddev)); 4879 mddev->safemode = 2; 4880 } 4881 flush_signals(current); 4882 } 4883 4884 if ( ! ( 4885 mddev->sb_dirty || 4886 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || 4887 test_bit(MD_RECOVERY_DONE, &mddev->recovery) || 4888 (mddev->safemode == 1) || 4889 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending) 4890 && !mddev->in_sync && mddev->recovery_cp == MaxSector) 4891 )) 4892 return; 4893 4894 if (mddev_trylock(mddev)) { 4895 int spares =0; 4896 4897 spin_lock_irq(&mddev->write_lock); 4898 if (mddev->safemode && !atomic_read(&mddev->writes_pending) && 4899 !mddev->in_sync && mddev->recovery_cp == MaxSector) { 4900 mddev->in_sync = 1; 4901 mddev->sb_dirty = 1; 4902 } 4903 if (mddev->safemode == 1) 4904 mddev->safemode = 0; 4905 spin_unlock_irq(&mddev->write_lock); 4906 4907 if (mddev->sb_dirty) 4908 md_update_sb(mddev); 4909 4910 4911 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && 4912 !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) { 4913 /* resync/recovery still happening */ 4914 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4915 goto unlock; 4916 } 4917 if (mddev->sync_thread) { 4918 /* resync has finished, collect result */ 4919 md_unregister_thread(mddev->sync_thread); 4920 mddev->sync_thread = NULL; 4921 if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) && 4922 !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 4923 /* success...*/ 4924 /* activate any spares */ 4925 mddev->pers->spare_active(mddev); 4926 } 4927 md_update_sb(mddev); 4928 4929 /* if array is no-longer degraded, then any saved_raid_disk 4930 * information must be scrapped 4931 */ 4932 if (!mddev->degraded) 4933 ITERATE_RDEV(mddev,rdev,rtmp) 4934 rdev->saved_raid_disk = -1; 4935 4936 mddev->recovery = 0; 4937 /* flag recovery needed just to double check */ 4938 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4939 md_new_event(mddev); 4940 goto unlock; 4941 } 4942 /* Clear some bits that don't mean anything, but 4943 * might be left set 4944 */ 4945 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4946 clear_bit(MD_RECOVERY_ERR, &mddev->recovery); 4947 clear_bit(MD_RECOVERY_INTR, &mddev->recovery); 4948 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); 4949 4950 /* no recovery is running. 4951 * remove any failed drives, then 4952 * add spares if possible. 4953 * Spare are also removed and re-added, to allow 4954 * the personality to fail the re-add. 4955 */ 4956 ITERATE_RDEV(mddev,rdev,rtmp) 4957 if (rdev->raid_disk >= 0 && 4958 (test_bit(Faulty, &rdev->flags) || ! test_bit(In_sync, &rdev->flags)) && 4959 atomic_read(&rdev->nr_pending)==0) { 4960 if (mddev->pers->hot_remove_disk(mddev, rdev->raid_disk)==0) { 4961 char nm[20]; 4962 sprintf(nm,"rd%d", rdev->raid_disk); 4963 sysfs_remove_link(&mddev->kobj, nm); 4964 rdev->raid_disk = -1; 4965 } 4966 } 4967 4968 if (mddev->degraded) { 4969 ITERATE_RDEV(mddev,rdev,rtmp) 4970 if (rdev->raid_disk < 0 4971 && !test_bit(Faulty, &rdev->flags)) { 4972 if (mddev->pers->hot_add_disk(mddev,rdev)) { 4973 char nm[20]; 4974 sprintf(nm, "rd%d", rdev->raid_disk); 4975 sysfs_create_link(&mddev->kobj, &rdev->kobj, nm); 4976 spares++; 4977 md_new_event(mddev); 4978 } else 4979 break; 4980 } 4981 } 4982 4983 if (spares) { 4984 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 4985 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 4986 } else if (mddev->recovery_cp < MaxSector) { 4987 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 4988 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 4989 /* nothing to be done ... */ 4990 goto unlock; 4991 4992 if (mddev->pers->sync_request) { 4993 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 4994 if (spares && mddev->bitmap && ! mddev->bitmap->file) { 4995 /* We are adding a device or devices to an array 4996 * which has the bitmap stored on all devices. 4997 * So make sure all bitmap pages get written 4998 */ 4999 bitmap_write_all(mddev->bitmap); 5000 } 5001 mddev->sync_thread = md_register_thread(md_do_sync, 5002 mddev, 5003 "%s_resync"); 5004 if (!mddev->sync_thread) { 5005 printk(KERN_ERR "%s: could not start resync" 5006 " thread...\n", 5007 mdname(mddev)); 5008 /* leave the spares where they are, it shouldn't hurt */ 5009 mddev->recovery = 0; 5010 } else 5011 md_wakeup_thread(mddev->sync_thread); 5012 md_new_event(mddev); 5013 } 5014 unlock: 5015 mddev_unlock(mddev); 5016 } 5017 } 5018 5019 static int md_notify_reboot(struct notifier_block *this, 5020 unsigned long code, void *x) 5021 { 5022 struct list_head *tmp; 5023 mddev_t *mddev; 5024 5025 if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) { 5026 5027 printk(KERN_INFO "md: stopping all md devices.\n"); 5028 5029 ITERATE_MDDEV(mddev,tmp) 5030 if (mddev_trylock(mddev)) 5031 do_md_stop (mddev, 1); 5032 /* 5033 * certain more exotic SCSI devices are known to be 5034 * volatile wrt too early system reboots. While the 5035 * right place to handle this issue is the given 5036 * driver, we do want to have a safe RAID driver ... 5037 */ 5038 mdelay(1000*1); 5039 } 5040 return NOTIFY_DONE; 5041 } 5042 5043 static struct notifier_block md_notifier = { 5044 .notifier_call = md_notify_reboot, 5045 .next = NULL, 5046 .priority = INT_MAX, /* before any real devices */ 5047 }; 5048 5049 static void md_geninit(void) 5050 { 5051 struct proc_dir_entry *p; 5052 5053 dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t)); 5054 5055 p = create_proc_entry("mdstat", S_IRUGO, NULL); 5056 if (p) 5057 p->proc_fops = &md_seq_fops; 5058 } 5059 5060 static int __init md_init(void) 5061 { 5062 int minor; 5063 5064 printk(KERN_INFO "md: md driver %d.%d.%d MAX_MD_DEVS=%d," 5065 " MD_SB_DISKS=%d\n", 5066 MD_MAJOR_VERSION, MD_MINOR_VERSION, 5067 MD_PATCHLEVEL_VERSION, MAX_MD_DEVS, MD_SB_DISKS); 5068 printk(KERN_INFO "md: bitmap version %d.%d\n", BITMAP_MAJOR_HI, 5069 BITMAP_MINOR); 5070 5071 if (register_blkdev(MAJOR_NR, "md")) 5072 return -1; 5073 if ((mdp_major=register_blkdev(0, "mdp"))<=0) { 5074 unregister_blkdev(MAJOR_NR, "md"); 5075 return -1; 5076 } 5077 devfs_mk_dir("md"); 5078 blk_register_region(MKDEV(MAJOR_NR, 0), MAX_MD_DEVS, THIS_MODULE, 5079 md_probe, NULL, NULL); 5080 blk_register_region(MKDEV(mdp_major, 0), MAX_MD_DEVS<<MdpMinorShift, THIS_MODULE, 5081 md_probe, NULL, NULL); 5082 5083 for (minor=0; minor < MAX_MD_DEVS; ++minor) 5084 devfs_mk_bdev(MKDEV(MAJOR_NR, minor), 5085 S_IFBLK|S_IRUSR|S_IWUSR, 5086 "md/%d", minor); 5087 5088 for (minor=0; minor < MAX_MD_DEVS; ++minor) 5089 devfs_mk_bdev(MKDEV(mdp_major, minor<<MdpMinorShift), 5090 S_IFBLK|S_IRUSR|S_IWUSR, 5091 "md/mdp%d", minor); 5092 5093 5094 register_reboot_notifier(&md_notifier); 5095 raid_table_header = register_sysctl_table(raid_root_table, 1); 5096 5097 md_geninit(); 5098 return (0); 5099 } 5100 5101 5102 #ifndef MODULE 5103 5104 /* 5105 * Searches all registered partitions for autorun RAID arrays 5106 * at boot time. 5107 */ 5108 static dev_t detected_devices[128]; 5109 static int dev_cnt; 5110 5111 void md_autodetect_dev(dev_t dev) 5112 { 5113 if (dev_cnt >= 0 && dev_cnt < 127) 5114 detected_devices[dev_cnt++] = dev; 5115 } 5116 5117 5118 static void autostart_arrays(int part) 5119 { 5120 mdk_rdev_t *rdev; 5121 int i; 5122 5123 printk(KERN_INFO "md: Autodetecting RAID arrays.\n"); 5124 5125 for (i = 0; i < dev_cnt; i++) { 5126 dev_t dev = detected_devices[i]; 5127 5128 rdev = md_import_device(dev,0, 0); 5129 if (IS_ERR(rdev)) 5130 continue; 5131 5132 if (test_bit(Faulty, &rdev->flags)) { 5133 MD_BUG(); 5134 continue; 5135 } 5136 list_add(&rdev->same_set, &pending_raid_disks); 5137 } 5138 dev_cnt = 0; 5139 5140 autorun_devices(part); 5141 } 5142 5143 #endif 5144 5145 static __exit void md_exit(void) 5146 { 5147 mddev_t *mddev; 5148 struct list_head *tmp; 5149 int i; 5150 blk_unregister_region(MKDEV(MAJOR_NR,0), MAX_MD_DEVS); 5151 blk_unregister_region(MKDEV(mdp_major,0), MAX_MD_DEVS << MdpMinorShift); 5152 for (i=0; i < MAX_MD_DEVS; i++) 5153 devfs_remove("md/%d", i); 5154 for (i=0; i < MAX_MD_DEVS; i++) 5155 devfs_remove("md/d%d", i); 5156 5157 devfs_remove("md"); 5158 5159 unregister_blkdev(MAJOR_NR,"md"); 5160 unregister_blkdev(mdp_major, "mdp"); 5161 unregister_reboot_notifier(&md_notifier); 5162 unregister_sysctl_table(raid_table_header); 5163 remove_proc_entry("mdstat", NULL); 5164 ITERATE_MDDEV(mddev,tmp) { 5165 struct gendisk *disk = mddev->gendisk; 5166 if (!disk) 5167 continue; 5168 export_array(mddev); 5169 del_gendisk(disk); 5170 put_disk(disk); 5171 mddev->gendisk = NULL; 5172 mddev_put(mddev); 5173 } 5174 } 5175 5176 module_init(md_init) 5177 module_exit(md_exit) 5178 5179 static int get_ro(char *buffer, struct kernel_param *kp) 5180 { 5181 return sprintf(buffer, "%d", start_readonly); 5182 } 5183 static int set_ro(const char *val, struct kernel_param *kp) 5184 { 5185 char *e; 5186 int num = simple_strtoul(val, &e, 10); 5187 if (*val && (*e == '\0' || *e == '\n')) { 5188 start_readonly = num; 5189 return 0; 5190 } 5191 return -EINVAL; 5192 } 5193 5194 module_param_call(start_ro, set_ro, get_ro, NULL, 0600); 5195 module_param(start_dirty_degraded, int, 0644); 5196 5197 5198 EXPORT_SYMBOL(register_md_personality); 5199 EXPORT_SYMBOL(unregister_md_personality); 5200 EXPORT_SYMBOL(md_error); 5201 EXPORT_SYMBOL(md_done_sync); 5202 EXPORT_SYMBOL(md_write_start); 5203 EXPORT_SYMBOL(md_write_end); 5204 EXPORT_SYMBOL(md_register_thread); 5205 EXPORT_SYMBOL(md_unregister_thread); 5206 EXPORT_SYMBOL(md_wakeup_thread); 5207 EXPORT_SYMBOL(md_print_devices); 5208 EXPORT_SYMBOL(md_check_recovery); 5209 MODULE_LICENSE("GPL"); 5210 MODULE_ALIAS("md"); 5211 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR); 5212