1 /* 2 md.c : Multiple Devices driver for Linux 3 Copyright (C) 1998, 1999, 2000 Ingo Molnar 4 5 completely rewritten, based on the MD driver code from Marc Zyngier 6 7 Changes: 8 9 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar 10 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com> 11 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net> 12 - kerneld support by Boris Tobotras <boris@xtalk.msk.su> 13 - kmod support by: Cyrus Durgin 14 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com> 15 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au> 16 17 - lots of fixes and improvements to the RAID1/RAID5 and generic 18 RAID code (such as request based resynchronization): 19 20 Neil Brown <neilb@cse.unsw.edu.au>. 21 22 - persistent bitmap code 23 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc. 24 25 This program is free software; you can redistribute it and/or modify 26 it under the terms of the GNU General Public License as published by 27 the Free Software Foundation; either version 2, or (at your option) 28 any later version. 29 30 You should have received a copy of the GNU General Public License 31 (for example /usr/src/linux/COPYING); if not, write to the Free 32 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 33 */ 34 35 #include <linux/module.h> 36 #include <linux/config.h> 37 #include <linux/kthread.h> 38 #include <linux/linkage.h> 39 #include <linux/raid/md.h> 40 #include <linux/raid/bitmap.h> 41 #include <linux/sysctl.h> 42 #include <linux/devfs_fs_kernel.h> 43 #include <linux/buffer_head.h> /* for invalidate_bdev */ 44 #include <linux/suspend.h> 45 #include <linux/poll.h> 46 #include <linux/mutex.h> 47 48 #include <linux/init.h> 49 50 #include <linux/file.h> 51 52 #ifdef CONFIG_KMOD 53 #include <linux/kmod.h> 54 #endif 55 56 #include <asm/unaligned.h> 57 58 #define MAJOR_NR MD_MAJOR 59 #define MD_DRIVER 60 61 /* 63 partitions with the alternate major number (mdp) */ 62 #define MdpMinorShift 6 63 64 #define DEBUG 0 65 #define dprintk(x...) ((void)(DEBUG && printk(x))) 66 67 68 #ifndef MODULE 69 static void autostart_arrays (int part); 70 #endif 71 72 static LIST_HEAD(pers_list); 73 static DEFINE_SPINLOCK(pers_lock); 74 75 /* 76 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit' 77 * is 1000 KB/sec, so the extra system load does not show up that much. 78 * Increase it if you want to have more _guaranteed_ speed. Note that 79 * the RAID driver will use the maximum available bandwidth if the IO 80 * subsystem is idle. There is also an 'absolute maximum' reconstruction 81 * speed limit - in case reconstruction slows down your system despite 82 * idle IO detection. 83 * 84 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max. 85 * or /sys/block/mdX/md/sync_speed_{min,max} 86 */ 87 88 static int sysctl_speed_limit_min = 1000; 89 static int sysctl_speed_limit_max = 200000; 90 static inline int speed_min(mddev_t *mddev) 91 { 92 return mddev->sync_speed_min ? 93 mddev->sync_speed_min : sysctl_speed_limit_min; 94 } 95 96 static inline int speed_max(mddev_t *mddev) 97 { 98 return mddev->sync_speed_max ? 99 mddev->sync_speed_max : sysctl_speed_limit_max; 100 } 101 102 static struct ctl_table_header *raid_table_header; 103 104 static ctl_table raid_table[] = { 105 { 106 .ctl_name = DEV_RAID_SPEED_LIMIT_MIN, 107 .procname = "speed_limit_min", 108 .data = &sysctl_speed_limit_min, 109 .maxlen = sizeof(int), 110 .mode = 0644, 111 .proc_handler = &proc_dointvec, 112 }, 113 { 114 .ctl_name = DEV_RAID_SPEED_LIMIT_MAX, 115 .procname = "speed_limit_max", 116 .data = &sysctl_speed_limit_max, 117 .maxlen = sizeof(int), 118 .mode = 0644, 119 .proc_handler = &proc_dointvec, 120 }, 121 { .ctl_name = 0 } 122 }; 123 124 static ctl_table raid_dir_table[] = { 125 { 126 .ctl_name = DEV_RAID, 127 .procname = "raid", 128 .maxlen = 0, 129 .mode = 0555, 130 .child = raid_table, 131 }, 132 { .ctl_name = 0 } 133 }; 134 135 static ctl_table raid_root_table[] = { 136 { 137 .ctl_name = CTL_DEV, 138 .procname = "dev", 139 .maxlen = 0, 140 .mode = 0555, 141 .child = raid_dir_table, 142 }, 143 { .ctl_name = 0 } 144 }; 145 146 static struct block_device_operations md_fops; 147 148 static int start_readonly; 149 150 /* 151 * We have a system wide 'event count' that is incremented 152 * on any 'interesting' event, and readers of /proc/mdstat 153 * can use 'poll' or 'select' to find out when the event 154 * count increases. 155 * 156 * Events are: 157 * start array, stop array, error, add device, remove device, 158 * start build, activate spare 159 */ 160 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters); 161 static atomic_t md_event_count; 162 void md_new_event(mddev_t *mddev) 163 { 164 atomic_inc(&md_event_count); 165 wake_up(&md_event_waiters); 166 } 167 EXPORT_SYMBOL_GPL(md_new_event); 168 169 /* 170 * Enables to iterate over all existing md arrays 171 * all_mddevs_lock protects this list. 172 */ 173 static LIST_HEAD(all_mddevs); 174 static DEFINE_SPINLOCK(all_mddevs_lock); 175 176 177 /* 178 * iterates through all used mddevs in the system. 179 * We take care to grab the all_mddevs_lock whenever navigating 180 * the list, and to always hold a refcount when unlocked. 181 * Any code which breaks out of this loop while own 182 * a reference to the current mddev and must mddev_put it. 183 */ 184 #define ITERATE_MDDEV(mddev,tmp) \ 185 \ 186 for (({ spin_lock(&all_mddevs_lock); \ 187 tmp = all_mddevs.next; \ 188 mddev = NULL;}); \ 189 ({ if (tmp != &all_mddevs) \ 190 mddev_get(list_entry(tmp, mddev_t, all_mddevs));\ 191 spin_unlock(&all_mddevs_lock); \ 192 if (mddev) mddev_put(mddev); \ 193 mddev = list_entry(tmp, mddev_t, all_mddevs); \ 194 tmp != &all_mddevs;}); \ 195 ({ spin_lock(&all_mddevs_lock); \ 196 tmp = tmp->next;}) \ 197 ) 198 199 200 static int md_fail_request (request_queue_t *q, struct bio *bio) 201 { 202 bio_io_error(bio, bio->bi_size); 203 return 0; 204 } 205 206 static inline mddev_t *mddev_get(mddev_t *mddev) 207 { 208 atomic_inc(&mddev->active); 209 return mddev; 210 } 211 212 static void mddev_put(mddev_t *mddev) 213 { 214 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock)) 215 return; 216 if (!mddev->raid_disks && list_empty(&mddev->disks)) { 217 list_del(&mddev->all_mddevs); 218 /* that blocks */ 219 blk_cleanup_queue(mddev->queue); 220 /* that also blocks */ 221 kobject_unregister(&mddev->kobj); 222 /* result blows... */ 223 } 224 spin_unlock(&all_mddevs_lock); 225 } 226 227 static mddev_t * mddev_find(dev_t unit) 228 { 229 mddev_t *mddev, *new = NULL; 230 231 retry: 232 spin_lock(&all_mddevs_lock); 233 list_for_each_entry(mddev, &all_mddevs, all_mddevs) 234 if (mddev->unit == unit) { 235 mddev_get(mddev); 236 spin_unlock(&all_mddevs_lock); 237 kfree(new); 238 return mddev; 239 } 240 241 if (new) { 242 list_add(&new->all_mddevs, &all_mddevs); 243 spin_unlock(&all_mddevs_lock); 244 return new; 245 } 246 spin_unlock(&all_mddevs_lock); 247 248 new = kzalloc(sizeof(*new), GFP_KERNEL); 249 if (!new) 250 return NULL; 251 252 new->unit = unit; 253 if (MAJOR(unit) == MD_MAJOR) 254 new->md_minor = MINOR(unit); 255 else 256 new->md_minor = MINOR(unit) >> MdpMinorShift; 257 258 mutex_init(&new->reconfig_mutex); 259 INIT_LIST_HEAD(&new->disks); 260 INIT_LIST_HEAD(&new->all_mddevs); 261 init_timer(&new->safemode_timer); 262 atomic_set(&new->active, 1); 263 spin_lock_init(&new->write_lock); 264 init_waitqueue_head(&new->sb_wait); 265 266 new->queue = blk_alloc_queue(GFP_KERNEL); 267 if (!new->queue) { 268 kfree(new); 269 return NULL; 270 } 271 set_bit(QUEUE_FLAG_CLUSTER, &new->queue->queue_flags); 272 273 blk_queue_make_request(new->queue, md_fail_request); 274 275 goto retry; 276 } 277 278 static inline int mddev_lock(mddev_t * mddev) 279 { 280 return mutex_lock_interruptible(&mddev->reconfig_mutex); 281 } 282 283 static inline void mddev_lock_uninterruptible(mddev_t * mddev) 284 { 285 mutex_lock(&mddev->reconfig_mutex); 286 } 287 288 static inline int mddev_trylock(mddev_t * mddev) 289 { 290 return mutex_trylock(&mddev->reconfig_mutex); 291 } 292 293 static inline void mddev_unlock(mddev_t * mddev) 294 { 295 mutex_unlock(&mddev->reconfig_mutex); 296 297 md_wakeup_thread(mddev->thread); 298 } 299 300 static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr) 301 { 302 mdk_rdev_t * rdev; 303 struct list_head *tmp; 304 305 ITERATE_RDEV(mddev,rdev,tmp) { 306 if (rdev->desc_nr == nr) 307 return rdev; 308 } 309 return NULL; 310 } 311 312 static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev) 313 { 314 struct list_head *tmp; 315 mdk_rdev_t *rdev; 316 317 ITERATE_RDEV(mddev,rdev,tmp) { 318 if (rdev->bdev->bd_dev == dev) 319 return rdev; 320 } 321 return NULL; 322 } 323 324 static struct mdk_personality *find_pers(int level, char *clevel) 325 { 326 struct mdk_personality *pers; 327 list_for_each_entry(pers, &pers_list, list) { 328 if (level != LEVEL_NONE && pers->level == level) 329 return pers; 330 if (strcmp(pers->name, clevel)==0) 331 return pers; 332 } 333 return NULL; 334 } 335 336 static inline sector_t calc_dev_sboffset(struct block_device *bdev) 337 { 338 sector_t size = bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; 339 return MD_NEW_SIZE_BLOCKS(size); 340 } 341 342 static sector_t calc_dev_size(mdk_rdev_t *rdev, unsigned chunk_size) 343 { 344 sector_t size; 345 346 size = rdev->sb_offset; 347 348 if (chunk_size) 349 size &= ~((sector_t)chunk_size/1024 - 1); 350 return size; 351 } 352 353 static int alloc_disk_sb(mdk_rdev_t * rdev) 354 { 355 if (rdev->sb_page) 356 MD_BUG(); 357 358 rdev->sb_page = alloc_page(GFP_KERNEL); 359 if (!rdev->sb_page) { 360 printk(KERN_ALERT "md: out of memory.\n"); 361 return -EINVAL; 362 } 363 364 return 0; 365 } 366 367 static void free_disk_sb(mdk_rdev_t * rdev) 368 { 369 if (rdev->sb_page) { 370 put_page(rdev->sb_page); 371 rdev->sb_loaded = 0; 372 rdev->sb_page = NULL; 373 rdev->sb_offset = 0; 374 rdev->size = 0; 375 } 376 } 377 378 379 static int super_written(struct bio *bio, unsigned int bytes_done, int error) 380 { 381 mdk_rdev_t *rdev = bio->bi_private; 382 mddev_t *mddev = rdev->mddev; 383 if (bio->bi_size) 384 return 1; 385 386 if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) 387 md_error(mddev, rdev); 388 389 if (atomic_dec_and_test(&mddev->pending_writes)) 390 wake_up(&mddev->sb_wait); 391 bio_put(bio); 392 return 0; 393 } 394 395 static int super_written_barrier(struct bio *bio, unsigned int bytes_done, int error) 396 { 397 struct bio *bio2 = bio->bi_private; 398 mdk_rdev_t *rdev = bio2->bi_private; 399 mddev_t *mddev = rdev->mddev; 400 if (bio->bi_size) 401 return 1; 402 403 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && 404 error == -EOPNOTSUPP) { 405 unsigned long flags; 406 /* barriers don't appear to be supported :-( */ 407 set_bit(BarriersNotsupp, &rdev->flags); 408 mddev->barriers_work = 0; 409 spin_lock_irqsave(&mddev->write_lock, flags); 410 bio2->bi_next = mddev->biolist; 411 mddev->biolist = bio2; 412 spin_unlock_irqrestore(&mddev->write_lock, flags); 413 wake_up(&mddev->sb_wait); 414 bio_put(bio); 415 return 0; 416 } 417 bio_put(bio2); 418 bio->bi_private = rdev; 419 return super_written(bio, bytes_done, error); 420 } 421 422 void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, 423 sector_t sector, int size, struct page *page) 424 { 425 /* write first size bytes of page to sector of rdev 426 * Increment mddev->pending_writes before returning 427 * and decrement it on completion, waking up sb_wait 428 * if zero is reached. 429 * If an error occurred, call md_error 430 * 431 * As we might need to resubmit the request if BIO_RW_BARRIER 432 * causes ENOTSUPP, we allocate a spare bio... 433 */ 434 struct bio *bio = bio_alloc(GFP_NOIO, 1); 435 int rw = (1<<BIO_RW) | (1<<BIO_RW_SYNC); 436 437 bio->bi_bdev = rdev->bdev; 438 bio->bi_sector = sector; 439 bio_add_page(bio, page, size, 0); 440 bio->bi_private = rdev; 441 bio->bi_end_io = super_written; 442 bio->bi_rw = rw; 443 444 atomic_inc(&mddev->pending_writes); 445 if (!test_bit(BarriersNotsupp, &rdev->flags)) { 446 struct bio *rbio; 447 rw |= (1<<BIO_RW_BARRIER); 448 rbio = bio_clone(bio, GFP_NOIO); 449 rbio->bi_private = bio; 450 rbio->bi_end_io = super_written_barrier; 451 submit_bio(rw, rbio); 452 } else 453 submit_bio(rw, bio); 454 } 455 456 void md_super_wait(mddev_t *mddev) 457 { 458 /* wait for all superblock writes that were scheduled to complete. 459 * if any had to be retried (due to BARRIER problems), retry them 460 */ 461 DEFINE_WAIT(wq); 462 for(;;) { 463 prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE); 464 if (atomic_read(&mddev->pending_writes)==0) 465 break; 466 while (mddev->biolist) { 467 struct bio *bio; 468 spin_lock_irq(&mddev->write_lock); 469 bio = mddev->biolist; 470 mddev->biolist = bio->bi_next ; 471 bio->bi_next = NULL; 472 spin_unlock_irq(&mddev->write_lock); 473 submit_bio(bio->bi_rw, bio); 474 } 475 schedule(); 476 } 477 finish_wait(&mddev->sb_wait, &wq); 478 } 479 480 static int bi_complete(struct bio *bio, unsigned int bytes_done, int error) 481 { 482 if (bio->bi_size) 483 return 1; 484 485 complete((struct completion*)bio->bi_private); 486 return 0; 487 } 488 489 int sync_page_io(struct block_device *bdev, sector_t sector, int size, 490 struct page *page, int rw) 491 { 492 struct bio *bio = bio_alloc(GFP_NOIO, 1); 493 struct completion event; 494 int ret; 495 496 rw |= (1 << BIO_RW_SYNC); 497 498 bio->bi_bdev = bdev; 499 bio->bi_sector = sector; 500 bio_add_page(bio, page, size, 0); 501 init_completion(&event); 502 bio->bi_private = &event; 503 bio->bi_end_io = bi_complete; 504 submit_bio(rw, bio); 505 wait_for_completion(&event); 506 507 ret = test_bit(BIO_UPTODATE, &bio->bi_flags); 508 bio_put(bio); 509 return ret; 510 } 511 EXPORT_SYMBOL_GPL(sync_page_io); 512 513 static int read_disk_sb(mdk_rdev_t * rdev, int size) 514 { 515 char b[BDEVNAME_SIZE]; 516 if (!rdev->sb_page) { 517 MD_BUG(); 518 return -EINVAL; 519 } 520 if (rdev->sb_loaded) 521 return 0; 522 523 524 if (!sync_page_io(rdev->bdev, rdev->sb_offset<<1, size, rdev->sb_page, READ)) 525 goto fail; 526 rdev->sb_loaded = 1; 527 return 0; 528 529 fail: 530 printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n", 531 bdevname(rdev->bdev,b)); 532 return -EINVAL; 533 } 534 535 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2) 536 { 537 if ( (sb1->set_uuid0 == sb2->set_uuid0) && 538 (sb1->set_uuid1 == sb2->set_uuid1) && 539 (sb1->set_uuid2 == sb2->set_uuid2) && 540 (sb1->set_uuid3 == sb2->set_uuid3)) 541 542 return 1; 543 544 return 0; 545 } 546 547 548 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2) 549 { 550 int ret; 551 mdp_super_t *tmp1, *tmp2; 552 553 tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL); 554 tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL); 555 556 if (!tmp1 || !tmp2) { 557 ret = 0; 558 printk(KERN_INFO "md.c: sb1 is not equal to sb2!\n"); 559 goto abort; 560 } 561 562 *tmp1 = *sb1; 563 *tmp2 = *sb2; 564 565 /* 566 * nr_disks is not constant 567 */ 568 tmp1->nr_disks = 0; 569 tmp2->nr_disks = 0; 570 571 if (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4)) 572 ret = 0; 573 else 574 ret = 1; 575 576 abort: 577 kfree(tmp1); 578 kfree(tmp2); 579 return ret; 580 } 581 582 static unsigned int calc_sb_csum(mdp_super_t * sb) 583 { 584 unsigned int disk_csum, csum; 585 586 disk_csum = sb->sb_csum; 587 sb->sb_csum = 0; 588 csum = csum_partial((void *)sb, MD_SB_BYTES, 0); 589 sb->sb_csum = disk_csum; 590 return csum; 591 } 592 593 594 /* 595 * Handle superblock details. 596 * We want to be able to handle multiple superblock formats 597 * so we have a common interface to them all, and an array of 598 * different handlers. 599 * We rely on user-space to write the initial superblock, and support 600 * reading and updating of superblocks. 601 * Interface methods are: 602 * int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version) 603 * loads and validates a superblock on dev. 604 * if refdev != NULL, compare superblocks on both devices 605 * Return: 606 * 0 - dev has a superblock that is compatible with refdev 607 * 1 - dev has a superblock that is compatible and newer than refdev 608 * so dev should be used as the refdev in future 609 * -EINVAL superblock incompatible or invalid 610 * -othererror e.g. -EIO 611 * 612 * int validate_super(mddev_t *mddev, mdk_rdev_t *dev) 613 * Verify that dev is acceptable into mddev. 614 * The first time, mddev->raid_disks will be 0, and data from 615 * dev should be merged in. Subsequent calls check that dev 616 * is new enough. Return 0 or -EINVAL 617 * 618 * void sync_super(mddev_t *mddev, mdk_rdev_t *dev) 619 * Update the superblock for rdev with data in mddev 620 * This does not write to disc. 621 * 622 */ 623 624 struct super_type { 625 char *name; 626 struct module *owner; 627 int (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version); 628 int (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev); 629 void (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev); 630 }; 631 632 /* 633 * load_super for 0.90.0 634 */ 635 static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) 636 { 637 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 638 mdp_super_t *sb; 639 int ret; 640 sector_t sb_offset; 641 642 /* 643 * Calculate the position of the superblock, 644 * it's at the end of the disk. 645 * 646 * It also happens to be a multiple of 4Kb. 647 */ 648 sb_offset = calc_dev_sboffset(rdev->bdev); 649 rdev->sb_offset = sb_offset; 650 651 ret = read_disk_sb(rdev, MD_SB_BYTES); 652 if (ret) return ret; 653 654 ret = -EINVAL; 655 656 bdevname(rdev->bdev, b); 657 sb = (mdp_super_t*)page_address(rdev->sb_page); 658 659 if (sb->md_magic != MD_SB_MAGIC) { 660 printk(KERN_ERR "md: invalid raid superblock magic on %s\n", 661 b); 662 goto abort; 663 } 664 665 if (sb->major_version != 0 || 666 sb->minor_version < 90 || 667 sb->minor_version > 91) { 668 printk(KERN_WARNING "Bad version number %d.%d on %s\n", 669 sb->major_version, sb->minor_version, 670 b); 671 goto abort; 672 } 673 674 if (sb->raid_disks <= 0) 675 goto abort; 676 677 if (csum_fold(calc_sb_csum(sb)) != csum_fold(sb->sb_csum)) { 678 printk(KERN_WARNING "md: invalid superblock checksum on %s\n", 679 b); 680 goto abort; 681 } 682 683 rdev->preferred_minor = sb->md_minor; 684 rdev->data_offset = 0; 685 rdev->sb_size = MD_SB_BYTES; 686 687 if (sb->level == LEVEL_MULTIPATH) 688 rdev->desc_nr = -1; 689 else 690 rdev->desc_nr = sb->this_disk.number; 691 692 if (refdev == 0) 693 ret = 1; 694 else { 695 __u64 ev1, ev2; 696 mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page); 697 if (!uuid_equal(refsb, sb)) { 698 printk(KERN_WARNING "md: %s has different UUID to %s\n", 699 b, bdevname(refdev->bdev,b2)); 700 goto abort; 701 } 702 if (!sb_equal(refsb, sb)) { 703 printk(KERN_WARNING "md: %s has same UUID" 704 " but different superblock to %s\n", 705 b, bdevname(refdev->bdev, b2)); 706 goto abort; 707 } 708 ev1 = md_event(sb); 709 ev2 = md_event(refsb); 710 if (ev1 > ev2) 711 ret = 1; 712 else 713 ret = 0; 714 } 715 rdev->size = calc_dev_size(rdev, sb->chunk_size); 716 717 if (rdev->size < sb->size && sb->level > 1) 718 /* "this cannot possibly happen" ... */ 719 ret = -EINVAL; 720 721 abort: 722 return ret; 723 } 724 725 /* 726 * validate_super for 0.90.0 727 */ 728 static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev) 729 { 730 mdp_disk_t *desc; 731 mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page); 732 733 rdev->raid_disk = -1; 734 rdev->flags = 0; 735 if (mddev->raid_disks == 0) { 736 mddev->major_version = 0; 737 mddev->minor_version = sb->minor_version; 738 mddev->patch_version = sb->patch_version; 739 mddev->persistent = ! sb->not_persistent; 740 mddev->chunk_size = sb->chunk_size; 741 mddev->ctime = sb->ctime; 742 mddev->utime = sb->utime; 743 mddev->level = sb->level; 744 mddev->clevel[0] = 0; 745 mddev->layout = sb->layout; 746 mddev->raid_disks = sb->raid_disks; 747 mddev->size = sb->size; 748 mddev->events = md_event(sb); 749 mddev->bitmap_offset = 0; 750 mddev->default_bitmap_offset = MD_SB_BYTES >> 9; 751 752 if (mddev->minor_version >= 91) { 753 mddev->reshape_position = sb->reshape_position; 754 mddev->delta_disks = sb->delta_disks; 755 mddev->new_level = sb->new_level; 756 mddev->new_layout = sb->new_layout; 757 mddev->new_chunk = sb->new_chunk; 758 } else { 759 mddev->reshape_position = MaxSector; 760 mddev->delta_disks = 0; 761 mddev->new_level = mddev->level; 762 mddev->new_layout = mddev->layout; 763 mddev->new_chunk = mddev->chunk_size; 764 } 765 766 if (sb->state & (1<<MD_SB_CLEAN)) 767 mddev->recovery_cp = MaxSector; 768 else { 769 if (sb->events_hi == sb->cp_events_hi && 770 sb->events_lo == sb->cp_events_lo) { 771 mddev->recovery_cp = sb->recovery_cp; 772 } else 773 mddev->recovery_cp = 0; 774 } 775 776 memcpy(mddev->uuid+0, &sb->set_uuid0, 4); 777 memcpy(mddev->uuid+4, &sb->set_uuid1, 4); 778 memcpy(mddev->uuid+8, &sb->set_uuid2, 4); 779 memcpy(mddev->uuid+12,&sb->set_uuid3, 4); 780 781 mddev->max_disks = MD_SB_DISKS; 782 783 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) && 784 mddev->bitmap_file == NULL) { 785 if (mddev->level != 1 && mddev->level != 4 786 && mddev->level != 5 && mddev->level != 6 787 && mddev->level != 10) { 788 /* FIXME use a better test */ 789 printk(KERN_WARNING "md: bitmaps not supported for this level.\n"); 790 return -EINVAL; 791 } 792 mddev->bitmap_offset = mddev->default_bitmap_offset; 793 } 794 795 } else if (mddev->pers == NULL) { 796 /* Insist on good event counter while assembling */ 797 __u64 ev1 = md_event(sb); 798 ++ev1; 799 if (ev1 < mddev->events) 800 return -EINVAL; 801 } else if (mddev->bitmap) { 802 /* if adding to array with a bitmap, then we can accept an 803 * older device ... but not too old. 804 */ 805 __u64 ev1 = md_event(sb); 806 if (ev1 < mddev->bitmap->events_cleared) 807 return 0; 808 } else /* just a hot-add of a new device, leave raid_disk at -1 */ 809 return 0; 810 811 if (mddev->level != LEVEL_MULTIPATH) { 812 desc = sb->disks + rdev->desc_nr; 813 814 if (desc->state & (1<<MD_DISK_FAULTY)) 815 set_bit(Faulty, &rdev->flags); 816 else if (desc->state & (1<<MD_DISK_SYNC) && 817 desc->raid_disk < mddev->raid_disks) { 818 set_bit(In_sync, &rdev->flags); 819 rdev->raid_disk = desc->raid_disk; 820 } 821 if (desc->state & (1<<MD_DISK_WRITEMOSTLY)) 822 set_bit(WriteMostly, &rdev->flags); 823 } else /* MULTIPATH are always insync */ 824 set_bit(In_sync, &rdev->flags); 825 return 0; 826 } 827 828 /* 829 * sync_super for 0.90.0 830 */ 831 static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev) 832 { 833 mdp_super_t *sb; 834 struct list_head *tmp; 835 mdk_rdev_t *rdev2; 836 int next_spare = mddev->raid_disks; 837 838 839 /* make rdev->sb match mddev data.. 840 * 841 * 1/ zero out disks 842 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare); 843 * 3/ any empty disks < next_spare become removed 844 * 845 * disks[0] gets initialised to REMOVED because 846 * we cannot be sure from other fields if it has 847 * been initialised or not. 848 */ 849 int i; 850 int active=0, working=0,failed=0,spare=0,nr_disks=0; 851 852 rdev->sb_size = MD_SB_BYTES; 853 854 sb = (mdp_super_t*)page_address(rdev->sb_page); 855 856 memset(sb, 0, sizeof(*sb)); 857 858 sb->md_magic = MD_SB_MAGIC; 859 sb->major_version = mddev->major_version; 860 sb->patch_version = mddev->patch_version; 861 sb->gvalid_words = 0; /* ignored */ 862 memcpy(&sb->set_uuid0, mddev->uuid+0, 4); 863 memcpy(&sb->set_uuid1, mddev->uuid+4, 4); 864 memcpy(&sb->set_uuid2, mddev->uuid+8, 4); 865 memcpy(&sb->set_uuid3, mddev->uuid+12,4); 866 867 sb->ctime = mddev->ctime; 868 sb->level = mddev->level; 869 sb->size = mddev->size; 870 sb->raid_disks = mddev->raid_disks; 871 sb->md_minor = mddev->md_minor; 872 sb->not_persistent = !mddev->persistent; 873 sb->utime = mddev->utime; 874 sb->state = 0; 875 sb->events_hi = (mddev->events>>32); 876 sb->events_lo = (u32)mddev->events; 877 878 if (mddev->reshape_position == MaxSector) 879 sb->minor_version = 90; 880 else { 881 sb->minor_version = 91; 882 sb->reshape_position = mddev->reshape_position; 883 sb->new_level = mddev->new_level; 884 sb->delta_disks = mddev->delta_disks; 885 sb->new_layout = mddev->new_layout; 886 sb->new_chunk = mddev->new_chunk; 887 } 888 mddev->minor_version = sb->minor_version; 889 if (mddev->in_sync) 890 { 891 sb->recovery_cp = mddev->recovery_cp; 892 sb->cp_events_hi = (mddev->events>>32); 893 sb->cp_events_lo = (u32)mddev->events; 894 if (mddev->recovery_cp == MaxSector) 895 sb->state = (1<< MD_SB_CLEAN); 896 } else 897 sb->recovery_cp = 0; 898 899 sb->layout = mddev->layout; 900 sb->chunk_size = mddev->chunk_size; 901 902 if (mddev->bitmap && mddev->bitmap_file == NULL) 903 sb->state |= (1<<MD_SB_BITMAP_PRESENT); 904 905 sb->disks[0].state = (1<<MD_DISK_REMOVED); 906 ITERATE_RDEV(mddev,rdev2,tmp) { 907 mdp_disk_t *d; 908 int desc_nr; 909 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags) 910 && !test_bit(Faulty, &rdev2->flags)) 911 desc_nr = rdev2->raid_disk; 912 else 913 desc_nr = next_spare++; 914 rdev2->desc_nr = desc_nr; 915 d = &sb->disks[rdev2->desc_nr]; 916 nr_disks++; 917 d->number = rdev2->desc_nr; 918 d->major = MAJOR(rdev2->bdev->bd_dev); 919 d->minor = MINOR(rdev2->bdev->bd_dev); 920 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags) 921 && !test_bit(Faulty, &rdev2->flags)) 922 d->raid_disk = rdev2->raid_disk; 923 else 924 d->raid_disk = rdev2->desc_nr; /* compatibility */ 925 if (test_bit(Faulty, &rdev2->flags)) 926 d->state = (1<<MD_DISK_FAULTY); 927 else if (test_bit(In_sync, &rdev2->flags)) { 928 d->state = (1<<MD_DISK_ACTIVE); 929 d->state |= (1<<MD_DISK_SYNC); 930 active++; 931 working++; 932 } else { 933 d->state = 0; 934 spare++; 935 working++; 936 } 937 if (test_bit(WriteMostly, &rdev2->flags)) 938 d->state |= (1<<MD_DISK_WRITEMOSTLY); 939 } 940 /* now set the "removed" and "faulty" bits on any missing devices */ 941 for (i=0 ; i < mddev->raid_disks ; i++) { 942 mdp_disk_t *d = &sb->disks[i]; 943 if (d->state == 0 && d->number == 0) { 944 d->number = i; 945 d->raid_disk = i; 946 d->state = (1<<MD_DISK_REMOVED); 947 d->state |= (1<<MD_DISK_FAULTY); 948 failed++; 949 } 950 } 951 sb->nr_disks = nr_disks; 952 sb->active_disks = active; 953 sb->working_disks = working; 954 sb->failed_disks = failed; 955 sb->spare_disks = spare; 956 957 sb->this_disk = sb->disks[rdev->desc_nr]; 958 sb->sb_csum = calc_sb_csum(sb); 959 } 960 961 /* 962 * version 1 superblock 963 */ 964 965 static unsigned int calc_sb_1_csum(struct mdp_superblock_1 * sb) 966 { 967 unsigned int disk_csum, csum; 968 unsigned long long newcsum; 969 int size = 256 + le32_to_cpu(sb->max_dev)*2; 970 unsigned int *isuper = (unsigned int*)sb; 971 int i; 972 973 disk_csum = sb->sb_csum; 974 sb->sb_csum = 0; 975 newcsum = 0; 976 for (i=0; size>=4; size -= 4 ) 977 newcsum += le32_to_cpu(*isuper++); 978 979 if (size == 2) 980 newcsum += le16_to_cpu(*(unsigned short*) isuper); 981 982 csum = (newcsum & 0xffffffff) + (newcsum >> 32); 983 sb->sb_csum = disk_csum; 984 return cpu_to_le32(csum); 985 } 986 987 static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) 988 { 989 struct mdp_superblock_1 *sb; 990 int ret; 991 sector_t sb_offset; 992 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 993 int bmask; 994 995 /* 996 * Calculate the position of the superblock. 997 * It is always aligned to a 4K boundary and 998 * depeding on minor_version, it can be: 999 * 0: At least 8K, but less than 12K, from end of device 1000 * 1: At start of device 1001 * 2: 4K from start of device. 1002 */ 1003 switch(minor_version) { 1004 case 0: 1005 sb_offset = rdev->bdev->bd_inode->i_size >> 9; 1006 sb_offset -= 8*2; 1007 sb_offset &= ~(sector_t)(4*2-1); 1008 /* convert from sectors to K */ 1009 sb_offset /= 2; 1010 break; 1011 case 1: 1012 sb_offset = 0; 1013 break; 1014 case 2: 1015 sb_offset = 4; 1016 break; 1017 default: 1018 return -EINVAL; 1019 } 1020 rdev->sb_offset = sb_offset; 1021 1022 /* superblock is rarely larger than 1K, but it can be larger, 1023 * and it is safe to read 4k, so we do that 1024 */ 1025 ret = read_disk_sb(rdev, 4096); 1026 if (ret) return ret; 1027 1028 1029 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page); 1030 1031 if (sb->magic != cpu_to_le32(MD_SB_MAGIC) || 1032 sb->major_version != cpu_to_le32(1) || 1033 le32_to_cpu(sb->max_dev) > (4096-256)/2 || 1034 le64_to_cpu(sb->super_offset) != (rdev->sb_offset<<1) || 1035 (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0) 1036 return -EINVAL; 1037 1038 if (calc_sb_1_csum(sb) != sb->sb_csum) { 1039 printk("md: invalid superblock checksum on %s\n", 1040 bdevname(rdev->bdev,b)); 1041 return -EINVAL; 1042 } 1043 if (le64_to_cpu(sb->data_size) < 10) { 1044 printk("md: data_size too small on %s\n", 1045 bdevname(rdev->bdev,b)); 1046 return -EINVAL; 1047 } 1048 rdev->preferred_minor = 0xffff; 1049 rdev->data_offset = le64_to_cpu(sb->data_offset); 1050 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); 1051 1052 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; 1053 bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1; 1054 if (rdev->sb_size & bmask) 1055 rdev-> sb_size = (rdev->sb_size | bmask)+1; 1056 1057 if (refdev == 0) 1058 ret = 1; 1059 else { 1060 __u64 ev1, ev2; 1061 struct mdp_superblock_1 *refsb = 1062 (struct mdp_superblock_1*)page_address(refdev->sb_page); 1063 1064 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 || 1065 sb->level != refsb->level || 1066 sb->layout != refsb->layout || 1067 sb->chunksize != refsb->chunksize) { 1068 printk(KERN_WARNING "md: %s has strangely different" 1069 " superblock to %s\n", 1070 bdevname(rdev->bdev,b), 1071 bdevname(refdev->bdev,b2)); 1072 return -EINVAL; 1073 } 1074 ev1 = le64_to_cpu(sb->events); 1075 ev2 = le64_to_cpu(refsb->events); 1076 1077 if (ev1 > ev2) 1078 ret = 1; 1079 else 1080 ret = 0; 1081 } 1082 if (minor_version) 1083 rdev->size = ((rdev->bdev->bd_inode->i_size>>9) - le64_to_cpu(sb->data_offset)) / 2; 1084 else 1085 rdev->size = rdev->sb_offset; 1086 if (rdev->size < le64_to_cpu(sb->data_size)/2) 1087 return -EINVAL; 1088 rdev->size = le64_to_cpu(sb->data_size)/2; 1089 if (le32_to_cpu(sb->chunksize)) 1090 rdev->size &= ~((sector_t)le32_to_cpu(sb->chunksize)/2 - 1); 1091 1092 if (le32_to_cpu(sb->size) > rdev->size*2) 1093 return -EINVAL; 1094 return ret; 1095 } 1096 1097 static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev) 1098 { 1099 struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page); 1100 1101 rdev->raid_disk = -1; 1102 rdev->flags = 0; 1103 if (mddev->raid_disks == 0) { 1104 mddev->major_version = 1; 1105 mddev->patch_version = 0; 1106 mddev->persistent = 1; 1107 mddev->chunk_size = le32_to_cpu(sb->chunksize) << 9; 1108 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1); 1109 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1); 1110 mddev->level = le32_to_cpu(sb->level); 1111 mddev->clevel[0] = 0; 1112 mddev->layout = le32_to_cpu(sb->layout); 1113 mddev->raid_disks = le32_to_cpu(sb->raid_disks); 1114 mddev->size = le64_to_cpu(sb->size)/2; 1115 mddev->events = le64_to_cpu(sb->events); 1116 mddev->bitmap_offset = 0; 1117 mddev->default_bitmap_offset = 1024 >> 9; 1118 1119 mddev->recovery_cp = le64_to_cpu(sb->resync_offset); 1120 memcpy(mddev->uuid, sb->set_uuid, 16); 1121 1122 mddev->max_disks = (4096-256)/2; 1123 1124 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) && 1125 mddev->bitmap_file == NULL ) { 1126 if (mddev->level != 1 && mddev->level != 5 && mddev->level != 6 1127 && mddev->level != 10) { 1128 printk(KERN_WARNING "md: bitmaps not supported for this level.\n"); 1129 return -EINVAL; 1130 } 1131 mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset); 1132 } 1133 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) { 1134 mddev->reshape_position = le64_to_cpu(sb->reshape_position); 1135 mddev->delta_disks = le32_to_cpu(sb->delta_disks); 1136 mddev->new_level = le32_to_cpu(sb->new_level); 1137 mddev->new_layout = le32_to_cpu(sb->new_layout); 1138 mddev->new_chunk = le32_to_cpu(sb->new_chunk)<<9; 1139 } else { 1140 mddev->reshape_position = MaxSector; 1141 mddev->delta_disks = 0; 1142 mddev->new_level = mddev->level; 1143 mddev->new_layout = mddev->layout; 1144 mddev->new_chunk = mddev->chunk_size; 1145 } 1146 1147 } else if (mddev->pers == NULL) { 1148 /* Insist of good event counter while assembling */ 1149 __u64 ev1 = le64_to_cpu(sb->events); 1150 ++ev1; 1151 if (ev1 < mddev->events) 1152 return -EINVAL; 1153 } else if (mddev->bitmap) { 1154 /* If adding to array with a bitmap, then we can accept an 1155 * older device, but not too old. 1156 */ 1157 __u64 ev1 = le64_to_cpu(sb->events); 1158 if (ev1 < mddev->bitmap->events_cleared) 1159 return 0; 1160 } else /* just a hot-add of a new device, leave raid_disk at -1 */ 1161 return 0; 1162 1163 if (mddev->level != LEVEL_MULTIPATH) { 1164 int role; 1165 rdev->desc_nr = le32_to_cpu(sb->dev_number); 1166 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]); 1167 switch(role) { 1168 case 0xffff: /* spare */ 1169 break; 1170 case 0xfffe: /* faulty */ 1171 set_bit(Faulty, &rdev->flags); 1172 break; 1173 default: 1174 set_bit(In_sync, &rdev->flags); 1175 rdev->raid_disk = role; 1176 break; 1177 } 1178 if (sb->devflags & WriteMostly1) 1179 set_bit(WriteMostly, &rdev->flags); 1180 } else /* MULTIPATH are always insync */ 1181 set_bit(In_sync, &rdev->flags); 1182 1183 return 0; 1184 } 1185 1186 static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev) 1187 { 1188 struct mdp_superblock_1 *sb; 1189 struct list_head *tmp; 1190 mdk_rdev_t *rdev2; 1191 int max_dev, i; 1192 /* make rdev->sb match mddev and rdev data. */ 1193 1194 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page); 1195 1196 sb->feature_map = 0; 1197 sb->pad0 = 0; 1198 memset(sb->pad1, 0, sizeof(sb->pad1)); 1199 memset(sb->pad2, 0, sizeof(sb->pad2)); 1200 memset(sb->pad3, 0, sizeof(sb->pad3)); 1201 1202 sb->utime = cpu_to_le64((__u64)mddev->utime); 1203 sb->events = cpu_to_le64(mddev->events); 1204 if (mddev->in_sync) 1205 sb->resync_offset = cpu_to_le64(mddev->recovery_cp); 1206 else 1207 sb->resync_offset = cpu_to_le64(0); 1208 1209 sb->cnt_corrected_read = atomic_read(&rdev->corrected_errors); 1210 1211 sb->raid_disks = cpu_to_le32(mddev->raid_disks); 1212 sb->size = cpu_to_le64(mddev->size<<1); 1213 1214 if (mddev->bitmap && mddev->bitmap_file == NULL) { 1215 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset); 1216 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET); 1217 } 1218 if (mddev->reshape_position != MaxSector) { 1219 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE); 1220 sb->reshape_position = cpu_to_le64(mddev->reshape_position); 1221 sb->new_layout = cpu_to_le32(mddev->new_layout); 1222 sb->delta_disks = cpu_to_le32(mddev->delta_disks); 1223 sb->new_level = cpu_to_le32(mddev->new_level); 1224 sb->new_chunk = cpu_to_le32(mddev->new_chunk>>9); 1225 } 1226 1227 max_dev = 0; 1228 ITERATE_RDEV(mddev,rdev2,tmp) 1229 if (rdev2->desc_nr+1 > max_dev) 1230 max_dev = rdev2->desc_nr+1; 1231 1232 sb->max_dev = cpu_to_le32(max_dev); 1233 for (i=0; i<max_dev;i++) 1234 sb->dev_roles[i] = cpu_to_le16(0xfffe); 1235 1236 ITERATE_RDEV(mddev,rdev2,tmp) { 1237 i = rdev2->desc_nr; 1238 if (test_bit(Faulty, &rdev2->flags)) 1239 sb->dev_roles[i] = cpu_to_le16(0xfffe); 1240 else if (test_bit(In_sync, &rdev2->flags)) 1241 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); 1242 else 1243 sb->dev_roles[i] = cpu_to_le16(0xffff); 1244 } 1245 1246 sb->recovery_offset = cpu_to_le64(0); /* not supported yet */ 1247 sb->sb_csum = calc_sb_1_csum(sb); 1248 } 1249 1250 1251 static struct super_type super_types[] = { 1252 [0] = { 1253 .name = "0.90.0", 1254 .owner = THIS_MODULE, 1255 .load_super = super_90_load, 1256 .validate_super = super_90_validate, 1257 .sync_super = super_90_sync, 1258 }, 1259 [1] = { 1260 .name = "md-1", 1261 .owner = THIS_MODULE, 1262 .load_super = super_1_load, 1263 .validate_super = super_1_validate, 1264 .sync_super = super_1_sync, 1265 }, 1266 }; 1267 1268 static mdk_rdev_t * match_dev_unit(mddev_t *mddev, mdk_rdev_t *dev) 1269 { 1270 struct list_head *tmp; 1271 mdk_rdev_t *rdev; 1272 1273 ITERATE_RDEV(mddev,rdev,tmp) 1274 if (rdev->bdev->bd_contains == dev->bdev->bd_contains) 1275 return rdev; 1276 1277 return NULL; 1278 } 1279 1280 static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2) 1281 { 1282 struct list_head *tmp; 1283 mdk_rdev_t *rdev; 1284 1285 ITERATE_RDEV(mddev1,rdev,tmp) 1286 if (match_dev_unit(mddev2, rdev)) 1287 return 1; 1288 1289 return 0; 1290 } 1291 1292 static LIST_HEAD(pending_raid_disks); 1293 1294 static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev) 1295 { 1296 mdk_rdev_t *same_pdev; 1297 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 1298 struct kobject *ko; 1299 char *s; 1300 1301 if (rdev->mddev) { 1302 MD_BUG(); 1303 return -EINVAL; 1304 } 1305 /* make sure rdev->size exceeds mddev->size */ 1306 if (rdev->size && (mddev->size == 0 || rdev->size < mddev->size)) { 1307 if (mddev->pers) 1308 /* Cannot change size, so fail */ 1309 return -ENOSPC; 1310 else 1311 mddev->size = rdev->size; 1312 } 1313 same_pdev = match_dev_unit(mddev, rdev); 1314 if (same_pdev) 1315 printk(KERN_WARNING 1316 "%s: WARNING: %s appears to be on the same physical" 1317 " disk as %s. True\n protection against single-disk" 1318 " failure might be compromised.\n", 1319 mdname(mddev), bdevname(rdev->bdev,b), 1320 bdevname(same_pdev->bdev,b2)); 1321 1322 /* Verify rdev->desc_nr is unique. 1323 * If it is -1, assign a free number, else 1324 * check number is not in use 1325 */ 1326 if (rdev->desc_nr < 0) { 1327 int choice = 0; 1328 if (mddev->pers) choice = mddev->raid_disks; 1329 while (find_rdev_nr(mddev, choice)) 1330 choice++; 1331 rdev->desc_nr = choice; 1332 } else { 1333 if (find_rdev_nr(mddev, rdev->desc_nr)) 1334 return -EBUSY; 1335 } 1336 bdevname(rdev->bdev,b); 1337 if (kobject_set_name(&rdev->kobj, "dev-%s", b) < 0) 1338 return -ENOMEM; 1339 while ( (s=strchr(rdev->kobj.k_name, '/')) != NULL) 1340 *s = '!'; 1341 1342 list_add(&rdev->same_set, &mddev->disks); 1343 rdev->mddev = mddev; 1344 printk(KERN_INFO "md: bind<%s>\n", b); 1345 1346 rdev->kobj.parent = &mddev->kobj; 1347 kobject_add(&rdev->kobj); 1348 1349 if (rdev->bdev->bd_part) 1350 ko = &rdev->bdev->bd_part->kobj; 1351 else 1352 ko = &rdev->bdev->bd_disk->kobj; 1353 sysfs_create_link(&rdev->kobj, ko, "block"); 1354 bd_claim_by_disk(rdev->bdev, rdev, mddev->gendisk); 1355 return 0; 1356 } 1357 1358 static void unbind_rdev_from_array(mdk_rdev_t * rdev) 1359 { 1360 char b[BDEVNAME_SIZE]; 1361 if (!rdev->mddev) { 1362 MD_BUG(); 1363 return; 1364 } 1365 bd_release_from_disk(rdev->bdev, rdev->mddev->gendisk); 1366 list_del_init(&rdev->same_set); 1367 printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b)); 1368 rdev->mddev = NULL; 1369 sysfs_remove_link(&rdev->kobj, "block"); 1370 kobject_del(&rdev->kobj); 1371 } 1372 1373 /* 1374 * prevent the device from being mounted, repartitioned or 1375 * otherwise reused by a RAID array (or any other kernel 1376 * subsystem), by bd_claiming the device. 1377 */ 1378 static int lock_rdev(mdk_rdev_t *rdev, dev_t dev) 1379 { 1380 int err = 0; 1381 struct block_device *bdev; 1382 char b[BDEVNAME_SIZE]; 1383 1384 bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE); 1385 if (IS_ERR(bdev)) { 1386 printk(KERN_ERR "md: could not open %s.\n", 1387 __bdevname(dev, b)); 1388 return PTR_ERR(bdev); 1389 } 1390 err = bd_claim(bdev, rdev); 1391 if (err) { 1392 printk(KERN_ERR "md: could not bd_claim %s.\n", 1393 bdevname(bdev, b)); 1394 blkdev_put(bdev); 1395 return err; 1396 } 1397 rdev->bdev = bdev; 1398 return err; 1399 } 1400 1401 static void unlock_rdev(mdk_rdev_t *rdev) 1402 { 1403 struct block_device *bdev = rdev->bdev; 1404 rdev->bdev = NULL; 1405 if (!bdev) 1406 MD_BUG(); 1407 bd_release(bdev); 1408 blkdev_put(bdev); 1409 } 1410 1411 void md_autodetect_dev(dev_t dev); 1412 1413 static void export_rdev(mdk_rdev_t * rdev) 1414 { 1415 char b[BDEVNAME_SIZE]; 1416 printk(KERN_INFO "md: export_rdev(%s)\n", 1417 bdevname(rdev->bdev,b)); 1418 if (rdev->mddev) 1419 MD_BUG(); 1420 free_disk_sb(rdev); 1421 list_del_init(&rdev->same_set); 1422 #ifndef MODULE 1423 md_autodetect_dev(rdev->bdev->bd_dev); 1424 #endif 1425 unlock_rdev(rdev); 1426 kobject_put(&rdev->kobj); 1427 } 1428 1429 static void kick_rdev_from_array(mdk_rdev_t * rdev) 1430 { 1431 unbind_rdev_from_array(rdev); 1432 export_rdev(rdev); 1433 } 1434 1435 static void export_array(mddev_t *mddev) 1436 { 1437 struct list_head *tmp; 1438 mdk_rdev_t *rdev; 1439 1440 ITERATE_RDEV(mddev,rdev,tmp) { 1441 if (!rdev->mddev) { 1442 MD_BUG(); 1443 continue; 1444 } 1445 kick_rdev_from_array(rdev); 1446 } 1447 if (!list_empty(&mddev->disks)) 1448 MD_BUG(); 1449 mddev->raid_disks = 0; 1450 mddev->major_version = 0; 1451 } 1452 1453 static void print_desc(mdp_disk_t *desc) 1454 { 1455 printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number, 1456 desc->major,desc->minor,desc->raid_disk,desc->state); 1457 } 1458 1459 static void print_sb(mdp_super_t *sb) 1460 { 1461 int i; 1462 1463 printk(KERN_INFO 1464 "md: SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n", 1465 sb->major_version, sb->minor_version, sb->patch_version, 1466 sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3, 1467 sb->ctime); 1468 printk(KERN_INFO "md: L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n", 1469 sb->level, sb->size, sb->nr_disks, sb->raid_disks, 1470 sb->md_minor, sb->layout, sb->chunk_size); 1471 printk(KERN_INFO "md: UT:%08x ST:%d AD:%d WD:%d" 1472 " FD:%d SD:%d CSUM:%08x E:%08lx\n", 1473 sb->utime, sb->state, sb->active_disks, sb->working_disks, 1474 sb->failed_disks, sb->spare_disks, 1475 sb->sb_csum, (unsigned long)sb->events_lo); 1476 1477 printk(KERN_INFO); 1478 for (i = 0; i < MD_SB_DISKS; i++) { 1479 mdp_disk_t *desc; 1480 1481 desc = sb->disks + i; 1482 if (desc->number || desc->major || desc->minor || 1483 desc->raid_disk || (desc->state && (desc->state != 4))) { 1484 printk(" D %2d: ", i); 1485 print_desc(desc); 1486 } 1487 } 1488 printk(KERN_INFO "md: THIS: "); 1489 print_desc(&sb->this_disk); 1490 1491 } 1492 1493 static void print_rdev(mdk_rdev_t *rdev) 1494 { 1495 char b[BDEVNAME_SIZE]; 1496 printk(KERN_INFO "md: rdev %s, SZ:%08llu F:%d S:%d DN:%u\n", 1497 bdevname(rdev->bdev,b), (unsigned long long)rdev->size, 1498 test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags), 1499 rdev->desc_nr); 1500 if (rdev->sb_loaded) { 1501 printk(KERN_INFO "md: rdev superblock:\n"); 1502 print_sb((mdp_super_t*)page_address(rdev->sb_page)); 1503 } else 1504 printk(KERN_INFO "md: no rdev superblock!\n"); 1505 } 1506 1507 void md_print_devices(void) 1508 { 1509 struct list_head *tmp, *tmp2; 1510 mdk_rdev_t *rdev; 1511 mddev_t *mddev; 1512 char b[BDEVNAME_SIZE]; 1513 1514 printk("\n"); 1515 printk("md: **********************************\n"); 1516 printk("md: * <COMPLETE RAID STATE PRINTOUT> *\n"); 1517 printk("md: **********************************\n"); 1518 ITERATE_MDDEV(mddev,tmp) { 1519 1520 if (mddev->bitmap) 1521 bitmap_print_sb(mddev->bitmap); 1522 else 1523 printk("%s: ", mdname(mddev)); 1524 ITERATE_RDEV(mddev,rdev,tmp2) 1525 printk("<%s>", bdevname(rdev->bdev,b)); 1526 printk("\n"); 1527 1528 ITERATE_RDEV(mddev,rdev,tmp2) 1529 print_rdev(rdev); 1530 } 1531 printk("md: **********************************\n"); 1532 printk("\n"); 1533 } 1534 1535 1536 static void sync_sbs(mddev_t * mddev) 1537 { 1538 mdk_rdev_t *rdev; 1539 struct list_head *tmp; 1540 1541 ITERATE_RDEV(mddev,rdev,tmp) { 1542 super_types[mddev->major_version]. 1543 sync_super(mddev, rdev); 1544 rdev->sb_loaded = 1; 1545 } 1546 } 1547 1548 void md_update_sb(mddev_t * mddev) 1549 { 1550 int err; 1551 struct list_head *tmp; 1552 mdk_rdev_t *rdev; 1553 int sync_req; 1554 1555 repeat: 1556 spin_lock_irq(&mddev->write_lock); 1557 sync_req = mddev->in_sync; 1558 mddev->utime = get_seconds(); 1559 mddev->events ++; 1560 1561 if (!mddev->events) { 1562 /* 1563 * oops, this 64-bit counter should never wrap. 1564 * Either we are in around ~1 trillion A.C., assuming 1565 * 1 reboot per second, or we have a bug: 1566 */ 1567 MD_BUG(); 1568 mddev->events --; 1569 } 1570 mddev->sb_dirty = 2; 1571 sync_sbs(mddev); 1572 1573 /* 1574 * do not write anything to disk if using 1575 * nonpersistent superblocks 1576 */ 1577 if (!mddev->persistent) { 1578 mddev->sb_dirty = 0; 1579 spin_unlock_irq(&mddev->write_lock); 1580 wake_up(&mddev->sb_wait); 1581 return; 1582 } 1583 spin_unlock_irq(&mddev->write_lock); 1584 1585 dprintk(KERN_INFO 1586 "md: updating %s RAID superblock on device (in sync %d)\n", 1587 mdname(mddev),mddev->in_sync); 1588 1589 err = bitmap_update_sb(mddev->bitmap); 1590 ITERATE_RDEV(mddev,rdev,tmp) { 1591 char b[BDEVNAME_SIZE]; 1592 dprintk(KERN_INFO "md: "); 1593 if (test_bit(Faulty, &rdev->flags)) 1594 dprintk("(skipping faulty "); 1595 1596 dprintk("%s ", bdevname(rdev->bdev,b)); 1597 if (!test_bit(Faulty, &rdev->flags)) { 1598 md_super_write(mddev,rdev, 1599 rdev->sb_offset<<1, rdev->sb_size, 1600 rdev->sb_page); 1601 dprintk(KERN_INFO "(write) %s's sb offset: %llu\n", 1602 bdevname(rdev->bdev,b), 1603 (unsigned long long)rdev->sb_offset); 1604 1605 } else 1606 dprintk(")\n"); 1607 if (mddev->level == LEVEL_MULTIPATH) 1608 /* only need to write one superblock... */ 1609 break; 1610 } 1611 md_super_wait(mddev); 1612 /* if there was a failure, sb_dirty was set to 1, and we re-write super */ 1613 1614 spin_lock_irq(&mddev->write_lock); 1615 if (mddev->in_sync != sync_req|| mddev->sb_dirty == 1) { 1616 /* have to write it out again */ 1617 spin_unlock_irq(&mddev->write_lock); 1618 goto repeat; 1619 } 1620 mddev->sb_dirty = 0; 1621 spin_unlock_irq(&mddev->write_lock); 1622 wake_up(&mddev->sb_wait); 1623 1624 } 1625 EXPORT_SYMBOL_GPL(md_update_sb); 1626 1627 /* words written to sysfs files may, or my not, be \n terminated. 1628 * We want to accept with case. For this we use cmd_match. 1629 */ 1630 static int cmd_match(const char *cmd, const char *str) 1631 { 1632 /* See if cmd, written into a sysfs file, matches 1633 * str. They must either be the same, or cmd can 1634 * have a trailing newline 1635 */ 1636 while (*cmd && *str && *cmd == *str) { 1637 cmd++; 1638 str++; 1639 } 1640 if (*cmd == '\n') 1641 cmd++; 1642 if (*str || *cmd) 1643 return 0; 1644 return 1; 1645 } 1646 1647 struct rdev_sysfs_entry { 1648 struct attribute attr; 1649 ssize_t (*show)(mdk_rdev_t *, char *); 1650 ssize_t (*store)(mdk_rdev_t *, const char *, size_t); 1651 }; 1652 1653 static ssize_t 1654 state_show(mdk_rdev_t *rdev, char *page) 1655 { 1656 char *sep = ""; 1657 int len=0; 1658 1659 if (test_bit(Faulty, &rdev->flags)) { 1660 len+= sprintf(page+len, "%sfaulty",sep); 1661 sep = ","; 1662 } 1663 if (test_bit(In_sync, &rdev->flags)) { 1664 len += sprintf(page+len, "%sin_sync",sep); 1665 sep = ","; 1666 } 1667 if (!test_bit(Faulty, &rdev->flags) && 1668 !test_bit(In_sync, &rdev->flags)) { 1669 len += sprintf(page+len, "%sspare", sep); 1670 sep = ","; 1671 } 1672 return len+sprintf(page+len, "\n"); 1673 } 1674 1675 static struct rdev_sysfs_entry 1676 rdev_state = __ATTR_RO(state); 1677 1678 static ssize_t 1679 super_show(mdk_rdev_t *rdev, char *page) 1680 { 1681 if (rdev->sb_loaded && rdev->sb_size) { 1682 memcpy(page, page_address(rdev->sb_page), rdev->sb_size); 1683 return rdev->sb_size; 1684 } else 1685 return 0; 1686 } 1687 static struct rdev_sysfs_entry rdev_super = __ATTR_RO(super); 1688 1689 static ssize_t 1690 errors_show(mdk_rdev_t *rdev, char *page) 1691 { 1692 return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors)); 1693 } 1694 1695 static ssize_t 1696 errors_store(mdk_rdev_t *rdev, const char *buf, size_t len) 1697 { 1698 char *e; 1699 unsigned long n = simple_strtoul(buf, &e, 10); 1700 if (*buf && (*e == 0 || *e == '\n')) { 1701 atomic_set(&rdev->corrected_errors, n); 1702 return len; 1703 } 1704 return -EINVAL; 1705 } 1706 static struct rdev_sysfs_entry rdev_errors = 1707 __ATTR(errors, 0644, errors_show, errors_store); 1708 1709 static ssize_t 1710 slot_show(mdk_rdev_t *rdev, char *page) 1711 { 1712 if (rdev->raid_disk < 0) 1713 return sprintf(page, "none\n"); 1714 else 1715 return sprintf(page, "%d\n", rdev->raid_disk); 1716 } 1717 1718 static ssize_t 1719 slot_store(mdk_rdev_t *rdev, const char *buf, size_t len) 1720 { 1721 char *e; 1722 int slot = simple_strtoul(buf, &e, 10); 1723 if (strncmp(buf, "none", 4)==0) 1724 slot = -1; 1725 else if (e==buf || (*e && *e!= '\n')) 1726 return -EINVAL; 1727 if (rdev->mddev->pers) 1728 /* Cannot set slot in active array (yet) */ 1729 return -EBUSY; 1730 if (slot >= rdev->mddev->raid_disks) 1731 return -ENOSPC; 1732 rdev->raid_disk = slot; 1733 /* assume it is working */ 1734 rdev->flags = 0; 1735 set_bit(In_sync, &rdev->flags); 1736 return len; 1737 } 1738 1739 1740 static struct rdev_sysfs_entry rdev_slot = 1741 __ATTR(slot, 0644, slot_show, slot_store); 1742 1743 static ssize_t 1744 offset_show(mdk_rdev_t *rdev, char *page) 1745 { 1746 return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset); 1747 } 1748 1749 static ssize_t 1750 offset_store(mdk_rdev_t *rdev, const char *buf, size_t len) 1751 { 1752 char *e; 1753 unsigned long long offset = simple_strtoull(buf, &e, 10); 1754 if (e==buf || (*e && *e != '\n')) 1755 return -EINVAL; 1756 if (rdev->mddev->pers) 1757 return -EBUSY; 1758 rdev->data_offset = offset; 1759 return len; 1760 } 1761 1762 static struct rdev_sysfs_entry rdev_offset = 1763 __ATTR(offset, 0644, offset_show, offset_store); 1764 1765 static ssize_t 1766 rdev_size_show(mdk_rdev_t *rdev, char *page) 1767 { 1768 return sprintf(page, "%llu\n", (unsigned long long)rdev->size); 1769 } 1770 1771 static ssize_t 1772 rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len) 1773 { 1774 char *e; 1775 unsigned long long size = simple_strtoull(buf, &e, 10); 1776 if (e==buf || (*e && *e != '\n')) 1777 return -EINVAL; 1778 if (rdev->mddev->pers) 1779 return -EBUSY; 1780 rdev->size = size; 1781 if (size < rdev->mddev->size || rdev->mddev->size == 0) 1782 rdev->mddev->size = size; 1783 return len; 1784 } 1785 1786 static struct rdev_sysfs_entry rdev_size = 1787 __ATTR(size, 0644, rdev_size_show, rdev_size_store); 1788 1789 static struct attribute *rdev_default_attrs[] = { 1790 &rdev_state.attr, 1791 &rdev_super.attr, 1792 &rdev_errors.attr, 1793 &rdev_slot.attr, 1794 &rdev_offset.attr, 1795 &rdev_size.attr, 1796 NULL, 1797 }; 1798 static ssize_t 1799 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 1800 { 1801 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); 1802 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj); 1803 1804 if (!entry->show) 1805 return -EIO; 1806 return entry->show(rdev, page); 1807 } 1808 1809 static ssize_t 1810 rdev_attr_store(struct kobject *kobj, struct attribute *attr, 1811 const char *page, size_t length) 1812 { 1813 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); 1814 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj); 1815 1816 if (!entry->store) 1817 return -EIO; 1818 return entry->store(rdev, page, length); 1819 } 1820 1821 static void rdev_free(struct kobject *ko) 1822 { 1823 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj); 1824 kfree(rdev); 1825 } 1826 static struct sysfs_ops rdev_sysfs_ops = { 1827 .show = rdev_attr_show, 1828 .store = rdev_attr_store, 1829 }; 1830 static struct kobj_type rdev_ktype = { 1831 .release = rdev_free, 1832 .sysfs_ops = &rdev_sysfs_ops, 1833 .default_attrs = rdev_default_attrs, 1834 }; 1835 1836 /* 1837 * Import a device. If 'super_format' >= 0, then sanity check the superblock 1838 * 1839 * mark the device faulty if: 1840 * 1841 * - the device is nonexistent (zero size) 1842 * - the device has no valid superblock 1843 * 1844 * a faulty rdev _never_ has rdev->sb set. 1845 */ 1846 static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_minor) 1847 { 1848 char b[BDEVNAME_SIZE]; 1849 int err; 1850 mdk_rdev_t *rdev; 1851 sector_t size; 1852 1853 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); 1854 if (!rdev) { 1855 printk(KERN_ERR "md: could not alloc mem for new device!\n"); 1856 return ERR_PTR(-ENOMEM); 1857 } 1858 1859 if ((err = alloc_disk_sb(rdev))) 1860 goto abort_free; 1861 1862 err = lock_rdev(rdev, newdev); 1863 if (err) 1864 goto abort_free; 1865 1866 rdev->kobj.parent = NULL; 1867 rdev->kobj.ktype = &rdev_ktype; 1868 kobject_init(&rdev->kobj); 1869 1870 rdev->desc_nr = -1; 1871 rdev->flags = 0; 1872 rdev->data_offset = 0; 1873 atomic_set(&rdev->nr_pending, 0); 1874 atomic_set(&rdev->read_errors, 0); 1875 atomic_set(&rdev->corrected_errors, 0); 1876 1877 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; 1878 if (!size) { 1879 printk(KERN_WARNING 1880 "md: %s has zero or unknown size, marking faulty!\n", 1881 bdevname(rdev->bdev,b)); 1882 err = -EINVAL; 1883 goto abort_free; 1884 } 1885 1886 if (super_format >= 0) { 1887 err = super_types[super_format]. 1888 load_super(rdev, NULL, super_minor); 1889 if (err == -EINVAL) { 1890 printk(KERN_WARNING 1891 "md: %s has invalid sb, not importing!\n", 1892 bdevname(rdev->bdev,b)); 1893 goto abort_free; 1894 } 1895 if (err < 0) { 1896 printk(KERN_WARNING 1897 "md: could not read %s's sb, not importing!\n", 1898 bdevname(rdev->bdev,b)); 1899 goto abort_free; 1900 } 1901 } 1902 INIT_LIST_HEAD(&rdev->same_set); 1903 1904 return rdev; 1905 1906 abort_free: 1907 if (rdev->sb_page) { 1908 if (rdev->bdev) 1909 unlock_rdev(rdev); 1910 free_disk_sb(rdev); 1911 } 1912 kfree(rdev); 1913 return ERR_PTR(err); 1914 } 1915 1916 /* 1917 * Check a full RAID array for plausibility 1918 */ 1919 1920 1921 static void analyze_sbs(mddev_t * mddev) 1922 { 1923 int i; 1924 struct list_head *tmp; 1925 mdk_rdev_t *rdev, *freshest; 1926 char b[BDEVNAME_SIZE]; 1927 1928 freshest = NULL; 1929 ITERATE_RDEV(mddev,rdev,tmp) 1930 switch (super_types[mddev->major_version]. 1931 load_super(rdev, freshest, mddev->minor_version)) { 1932 case 1: 1933 freshest = rdev; 1934 break; 1935 case 0: 1936 break; 1937 default: 1938 printk( KERN_ERR \ 1939 "md: fatal superblock inconsistency in %s" 1940 " -- removing from array\n", 1941 bdevname(rdev->bdev,b)); 1942 kick_rdev_from_array(rdev); 1943 } 1944 1945 1946 super_types[mddev->major_version]. 1947 validate_super(mddev, freshest); 1948 1949 i = 0; 1950 ITERATE_RDEV(mddev,rdev,tmp) { 1951 if (rdev != freshest) 1952 if (super_types[mddev->major_version]. 1953 validate_super(mddev, rdev)) { 1954 printk(KERN_WARNING "md: kicking non-fresh %s" 1955 " from array!\n", 1956 bdevname(rdev->bdev,b)); 1957 kick_rdev_from_array(rdev); 1958 continue; 1959 } 1960 if (mddev->level == LEVEL_MULTIPATH) { 1961 rdev->desc_nr = i++; 1962 rdev->raid_disk = rdev->desc_nr; 1963 set_bit(In_sync, &rdev->flags); 1964 } 1965 } 1966 1967 1968 1969 if (mddev->recovery_cp != MaxSector && 1970 mddev->level >= 1) 1971 printk(KERN_ERR "md: %s: raid array is not clean" 1972 " -- starting background reconstruction\n", 1973 mdname(mddev)); 1974 1975 } 1976 1977 static ssize_t 1978 level_show(mddev_t *mddev, char *page) 1979 { 1980 struct mdk_personality *p = mddev->pers; 1981 if (p) 1982 return sprintf(page, "%s\n", p->name); 1983 else if (mddev->clevel[0]) 1984 return sprintf(page, "%s\n", mddev->clevel); 1985 else if (mddev->level != LEVEL_NONE) 1986 return sprintf(page, "%d\n", mddev->level); 1987 else 1988 return 0; 1989 } 1990 1991 static ssize_t 1992 level_store(mddev_t *mddev, const char *buf, size_t len) 1993 { 1994 int rv = len; 1995 if (mddev->pers) 1996 return -EBUSY; 1997 if (len == 0) 1998 return 0; 1999 if (len >= sizeof(mddev->clevel)) 2000 return -ENOSPC; 2001 strncpy(mddev->clevel, buf, len); 2002 if (mddev->clevel[len-1] == '\n') 2003 len--; 2004 mddev->clevel[len] = 0; 2005 mddev->level = LEVEL_NONE; 2006 return rv; 2007 } 2008 2009 static struct md_sysfs_entry md_level = 2010 __ATTR(level, 0644, level_show, level_store); 2011 2012 static ssize_t 2013 raid_disks_show(mddev_t *mddev, char *page) 2014 { 2015 if (mddev->raid_disks == 0) 2016 return 0; 2017 return sprintf(page, "%d\n", mddev->raid_disks); 2018 } 2019 2020 static int update_raid_disks(mddev_t *mddev, int raid_disks); 2021 2022 static ssize_t 2023 raid_disks_store(mddev_t *mddev, const char *buf, size_t len) 2024 { 2025 /* can only set raid_disks if array is not yet active */ 2026 char *e; 2027 int rv = 0; 2028 unsigned long n = simple_strtoul(buf, &e, 10); 2029 2030 if (!*buf || (*e && *e != '\n')) 2031 return -EINVAL; 2032 2033 if (mddev->pers) 2034 rv = update_raid_disks(mddev, n); 2035 else 2036 mddev->raid_disks = n; 2037 return rv ? rv : len; 2038 } 2039 static struct md_sysfs_entry md_raid_disks = 2040 __ATTR(raid_disks, 0644, raid_disks_show, raid_disks_store); 2041 2042 static ssize_t 2043 chunk_size_show(mddev_t *mddev, char *page) 2044 { 2045 return sprintf(page, "%d\n", mddev->chunk_size); 2046 } 2047 2048 static ssize_t 2049 chunk_size_store(mddev_t *mddev, const char *buf, size_t len) 2050 { 2051 /* can only set chunk_size if array is not yet active */ 2052 char *e; 2053 unsigned long n = simple_strtoul(buf, &e, 10); 2054 2055 if (mddev->pers) 2056 return -EBUSY; 2057 if (!*buf || (*e && *e != '\n')) 2058 return -EINVAL; 2059 2060 mddev->chunk_size = n; 2061 return len; 2062 } 2063 static struct md_sysfs_entry md_chunk_size = 2064 __ATTR(chunk_size, 0644, chunk_size_show, chunk_size_store); 2065 2066 static ssize_t 2067 null_show(mddev_t *mddev, char *page) 2068 { 2069 return -EINVAL; 2070 } 2071 2072 static ssize_t 2073 new_dev_store(mddev_t *mddev, const char *buf, size_t len) 2074 { 2075 /* buf must be %d:%d\n? giving major and minor numbers */ 2076 /* The new device is added to the array. 2077 * If the array has a persistent superblock, we read the 2078 * superblock to initialise info and check validity. 2079 * Otherwise, only checking done is that in bind_rdev_to_array, 2080 * which mainly checks size. 2081 */ 2082 char *e; 2083 int major = simple_strtoul(buf, &e, 10); 2084 int minor; 2085 dev_t dev; 2086 mdk_rdev_t *rdev; 2087 int err; 2088 2089 if (!*buf || *e != ':' || !e[1] || e[1] == '\n') 2090 return -EINVAL; 2091 minor = simple_strtoul(e+1, &e, 10); 2092 if (*e && *e != '\n') 2093 return -EINVAL; 2094 dev = MKDEV(major, minor); 2095 if (major != MAJOR(dev) || 2096 minor != MINOR(dev)) 2097 return -EOVERFLOW; 2098 2099 2100 if (mddev->persistent) { 2101 rdev = md_import_device(dev, mddev->major_version, 2102 mddev->minor_version); 2103 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) { 2104 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next, 2105 mdk_rdev_t, same_set); 2106 err = super_types[mddev->major_version] 2107 .load_super(rdev, rdev0, mddev->minor_version); 2108 if (err < 0) 2109 goto out; 2110 } 2111 } else 2112 rdev = md_import_device(dev, -1, -1); 2113 2114 if (IS_ERR(rdev)) 2115 return PTR_ERR(rdev); 2116 err = bind_rdev_to_array(rdev, mddev); 2117 out: 2118 if (err) 2119 export_rdev(rdev); 2120 return err ? err : len; 2121 } 2122 2123 static struct md_sysfs_entry md_new_device = 2124 __ATTR(new_dev, 0200, null_show, new_dev_store); 2125 2126 static ssize_t 2127 size_show(mddev_t *mddev, char *page) 2128 { 2129 return sprintf(page, "%llu\n", (unsigned long long)mddev->size); 2130 } 2131 2132 static int update_size(mddev_t *mddev, unsigned long size); 2133 2134 static ssize_t 2135 size_store(mddev_t *mddev, const char *buf, size_t len) 2136 { 2137 /* If array is inactive, we can reduce the component size, but 2138 * not increase it (except from 0). 2139 * If array is active, we can try an on-line resize 2140 */ 2141 char *e; 2142 int err = 0; 2143 unsigned long long size = simple_strtoull(buf, &e, 10); 2144 if (!*buf || *buf == '\n' || 2145 (*e && *e != '\n')) 2146 return -EINVAL; 2147 2148 if (mddev->pers) { 2149 err = update_size(mddev, size); 2150 md_update_sb(mddev); 2151 } else { 2152 if (mddev->size == 0 || 2153 mddev->size > size) 2154 mddev->size = size; 2155 else 2156 err = -ENOSPC; 2157 } 2158 return err ? err : len; 2159 } 2160 2161 static struct md_sysfs_entry md_size = 2162 __ATTR(component_size, 0644, size_show, size_store); 2163 2164 2165 /* Metdata version. 2166 * This is either 'none' for arrays with externally managed metadata, 2167 * or N.M for internally known formats 2168 */ 2169 static ssize_t 2170 metadata_show(mddev_t *mddev, char *page) 2171 { 2172 if (mddev->persistent) 2173 return sprintf(page, "%d.%d\n", 2174 mddev->major_version, mddev->minor_version); 2175 else 2176 return sprintf(page, "none\n"); 2177 } 2178 2179 static ssize_t 2180 metadata_store(mddev_t *mddev, const char *buf, size_t len) 2181 { 2182 int major, minor; 2183 char *e; 2184 if (!list_empty(&mddev->disks)) 2185 return -EBUSY; 2186 2187 if (cmd_match(buf, "none")) { 2188 mddev->persistent = 0; 2189 mddev->major_version = 0; 2190 mddev->minor_version = 90; 2191 return len; 2192 } 2193 major = simple_strtoul(buf, &e, 10); 2194 if (e==buf || *e != '.') 2195 return -EINVAL; 2196 buf = e+1; 2197 minor = simple_strtoul(buf, &e, 10); 2198 if (e==buf || *e != '\n') 2199 return -EINVAL; 2200 if (major >= sizeof(super_types)/sizeof(super_types[0]) || 2201 super_types[major].name == NULL) 2202 return -ENOENT; 2203 mddev->major_version = major; 2204 mddev->minor_version = minor; 2205 mddev->persistent = 1; 2206 return len; 2207 } 2208 2209 static struct md_sysfs_entry md_metadata = 2210 __ATTR(metadata_version, 0644, metadata_show, metadata_store); 2211 2212 static ssize_t 2213 action_show(mddev_t *mddev, char *page) 2214 { 2215 char *type = "idle"; 2216 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 2217 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) { 2218 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 2219 type = "reshape"; 2220 else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 2221 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 2222 type = "resync"; 2223 else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) 2224 type = "check"; 2225 else 2226 type = "repair"; 2227 } else 2228 type = "recover"; 2229 } 2230 return sprintf(page, "%s\n", type); 2231 } 2232 2233 static ssize_t 2234 action_store(mddev_t *mddev, const char *page, size_t len) 2235 { 2236 if (!mddev->pers || !mddev->pers->sync_request) 2237 return -EINVAL; 2238 2239 if (cmd_match(page, "idle")) { 2240 if (mddev->sync_thread) { 2241 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 2242 md_unregister_thread(mddev->sync_thread); 2243 mddev->sync_thread = NULL; 2244 mddev->recovery = 0; 2245 } 2246 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 2247 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) 2248 return -EBUSY; 2249 else if (cmd_match(page, "resync") || cmd_match(page, "recover")) 2250 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 2251 else if (cmd_match(page, "reshape")) { 2252 int err; 2253 if (mddev->pers->start_reshape == NULL) 2254 return -EINVAL; 2255 err = mddev->pers->start_reshape(mddev); 2256 if (err) 2257 return err; 2258 } else { 2259 if (cmd_match(page, "check")) 2260 set_bit(MD_RECOVERY_CHECK, &mddev->recovery); 2261 else if (cmd_match(page, "repair")) 2262 return -EINVAL; 2263 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 2264 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 2265 } 2266 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 2267 md_wakeup_thread(mddev->thread); 2268 return len; 2269 } 2270 2271 static ssize_t 2272 mismatch_cnt_show(mddev_t *mddev, char *page) 2273 { 2274 return sprintf(page, "%llu\n", 2275 (unsigned long long) mddev->resync_mismatches); 2276 } 2277 2278 static struct md_sysfs_entry 2279 md_scan_mode = __ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store); 2280 2281 2282 static struct md_sysfs_entry 2283 md_mismatches = __ATTR_RO(mismatch_cnt); 2284 2285 static ssize_t 2286 sync_min_show(mddev_t *mddev, char *page) 2287 { 2288 return sprintf(page, "%d (%s)\n", speed_min(mddev), 2289 mddev->sync_speed_min ? "local": "system"); 2290 } 2291 2292 static ssize_t 2293 sync_min_store(mddev_t *mddev, const char *buf, size_t len) 2294 { 2295 int min; 2296 char *e; 2297 if (strncmp(buf, "system", 6)==0) { 2298 mddev->sync_speed_min = 0; 2299 return len; 2300 } 2301 min = simple_strtoul(buf, &e, 10); 2302 if (buf == e || (*e && *e != '\n') || min <= 0) 2303 return -EINVAL; 2304 mddev->sync_speed_min = min; 2305 return len; 2306 } 2307 2308 static struct md_sysfs_entry md_sync_min = 2309 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store); 2310 2311 static ssize_t 2312 sync_max_show(mddev_t *mddev, char *page) 2313 { 2314 return sprintf(page, "%d (%s)\n", speed_max(mddev), 2315 mddev->sync_speed_max ? "local": "system"); 2316 } 2317 2318 static ssize_t 2319 sync_max_store(mddev_t *mddev, const char *buf, size_t len) 2320 { 2321 int max; 2322 char *e; 2323 if (strncmp(buf, "system", 6)==0) { 2324 mddev->sync_speed_max = 0; 2325 return len; 2326 } 2327 max = simple_strtoul(buf, &e, 10); 2328 if (buf == e || (*e && *e != '\n') || max <= 0) 2329 return -EINVAL; 2330 mddev->sync_speed_max = max; 2331 return len; 2332 } 2333 2334 static struct md_sysfs_entry md_sync_max = 2335 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store); 2336 2337 2338 static ssize_t 2339 sync_speed_show(mddev_t *mddev, char *page) 2340 { 2341 unsigned long resync, dt, db; 2342 resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active)); 2343 dt = ((jiffies - mddev->resync_mark) / HZ); 2344 if (!dt) dt++; 2345 db = resync - (mddev->resync_mark_cnt); 2346 return sprintf(page, "%ld\n", db/dt/2); /* K/sec */ 2347 } 2348 2349 static struct md_sysfs_entry 2350 md_sync_speed = __ATTR_RO(sync_speed); 2351 2352 static ssize_t 2353 sync_completed_show(mddev_t *mddev, char *page) 2354 { 2355 unsigned long max_blocks, resync; 2356 2357 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 2358 max_blocks = mddev->resync_max_sectors; 2359 else 2360 max_blocks = mddev->size << 1; 2361 2362 resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active)); 2363 return sprintf(page, "%lu / %lu\n", resync, max_blocks); 2364 } 2365 2366 static struct md_sysfs_entry 2367 md_sync_completed = __ATTR_RO(sync_completed); 2368 2369 static ssize_t 2370 suspend_lo_show(mddev_t *mddev, char *page) 2371 { 2372 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo); 2373 } 2374 2375 static ssize_t 2376 suspend_lo_store(mddev_t *mddev, const char *buf, size_t len) 2377 { 2378 char *e; 2379 unsigned long long new = simple_strtoull(buf, &e, 10); 2380 2381 if (mddev->pers->quiesce == NULL) 2382 return -EINVAL; 2383 if (buf == e || (*e && *e != '\n')) 2384 return -EINVAL; 2385 if (new >= mddev->suspend_hi || 2386 (new > mddev->suspend_lo && new < mddev->suspend_hi)) { 2387 mddev->suspend_lo = new; 2388 mddev->pers->quiesce(mddev, 2); 2389 return len; 2390 } else 2391 return -EINVAL; 2392 } 2393 static struct md_sysfs_entry md_suspend_lo = 2394 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store); 2395 2396 2397 static ssize_t 2398 suspend_hi_show(mddev_t *mddev, char *page) 2399 { 2400 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi); 2401 } 2402 2403 static ssize_t 2404 suspend_hi_store(mddev_t *mddev, const char *buf, size_t len) 2405 { 2406 char *e; 2407 unsigned long long new = simple_strtoull(buf, &e, 10); 2408 2409 if (mddev->pers->quiesce == NULL) 2410 return -EINVAL; 2411 if (buf == e || (*e && *e != '\n')) 2412 return -EINVAL; 2413 if ((new <= mddev->suspend_lo && mddev->suspend_lo >= mddev->suspend_hi) || 2414 (new > mddev->suspend_lo && new > mddev->suspend_hi)) { 2415 mddev->suspend_hi = new; 2416 mddev->pers->quiesce(mddev, 1); 2417 mddev->pers->quiesce(mddev, 0); 2418 return len; 2419 } else 2420 return -EINVAL; 2421 } 2422 static struct md_sysfs_entry md_suspend_hi = 2423 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store); 2424 2425 2426 static struct attribute *md_default_attrs[] = { 2427 &md_level.attr, 2428 &md_raid_disks.attr, 2429 &md_chunk_size.attr, 2430 &md_size.attr, 2431 &md_metadata.attr, 2432 &md_new_device.attr, 2433 NULL, 2434 }; 2435 2436 static struct attribute *md_redundancy_attrs[] = { 2437 &md_scan_mode.attr, 2438 &md_mismatches.attr, 2439 &md_sync_min.attr, 2440 &md_sync_max.attr, 2441 &md_sync_speed.attr, 2442 &md_sync_completed.attr, 2443 &md_suspend_lo.attr, 2444 &md_suspend_hi.attr, 2445 NULL, 2446 }; 2447 static struct attribute_group md_redundancy_group = { 2448 .name = NULL, 2449 .attrs = md_redundancy_attrs, 2450 }; 2451 2452 2453 static ssize_t 2454 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 2455 { 2456 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); 2457 mddev_t *mddev = container_of(kobj, struct mddev_s, kobj); 2458 ssize_t rv; 2459 2460 if (!entry->show) 2461 return -EIO; 2462 mddev_lock(mddev); 2463 rv = entry->show(mddev, page); 2464 mddev_unlock(mddev); 2465 return rv; 2466 } 2467 2468 static ssize_t 2469 md_attr_store(struct kobject *kobj, struct attribute *attr, 2470 const char *page, size_t length) 2471 { 2472 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); 2473 mddev_t *mddev = container_of(kobj, struct mddev_s, kobj); 2474 ssize_t rv; 2475 2476 if (!entry->store) 2477 return -EIO; 2478 mddev_lock(mddev); 2479 rv = entry->store(mddev, page, length); 2480 mddev_unlock(mddev); 2481 return rv; 2482 } 2483 2484 static void md_free(struct kobject *ko) 2485 { 2486 mddev_t *mddev = container_of(ko, mddev_t, kobj); 2487 kfree(mddev); 2488 } 2489 2490 static struct sysfs_ops md_sysfs_ops = { 2491 .show = md_attr_show, 2492 .store = md_attr_store, 2493 }; 2494 static struct kobj_type md_ktype = { 2495 .release = md_free, 2496 .sysfs_ops = &md_sysfs_ops, 2497 .default_attrs = md_default_attrs, 2498 }; 2499 2500 int mdp_major = 0; 2501 2502 static struct kobject *md_probe(dev_t dev, int *part, void *data) 2503 { 2504 static DEFINE_MUTEX(disks_mutex); 2505 mddev_t *mddev = mddev_find(dev); 2506 struct gendisk *disk; 2507 int partitioned = (MAJOR(dev) != MD_MAJOR); 2508 int shift = partitioned ? MdpMinorShift : 0; 2509 int unit = MINOR(dev) >> shift; 2510 2511 if (!mddev) 2512 return NULL; 2513 2514 mutex_lock(&disks_mutex); 2515 if (mddev->gendisk) { 2516 mutex_unlock(&disks_mutex); 2517 mddev_put(mddev); 2518 return NULL; 2519 } 2520 disk = alloc_disk(1 << shift); 2521 if (!disk) { 2522 mutex_unlock(&disks_mutex); 2523 mddev_put(mddev); 2524 return NULL; 2525 } 2526 disk->major = MAJOR(dev); 2527 disk->first_minor = unit << shift; 2528 if (partitioned) { 2529 sprintf(disk->disk_name, "md_d%d", unit); 2530 sprintf(disk->devfs_name, "md/d%d", unit); 2531 } else { 2532 sprintf(disk->disk_name, "md%d", unit); 2533 sprintf(disk->devfs_name, "md/%d", unit); 2534 } 2535 disk->fops = &md_fops; 2536 disk->private_data = mddev; 2537 disk->queue = mddev->queue; 2538 add_disk(disk); 2539 mddev->gendisk = disk; 2540 mutex_unlock(&disks_mutex); 2541 mddev->kobj.parent = &disk->kobj; 2542 mddev->kobj.k_name = NULL; 2543 snprintf(mddev->kobj.name, KOBJ_NAME_LEN, "%s", "md"); 2544 mddev->kobj.ktype = &md_ktype; 2545 kobject_register(&mddev->kobj); 2546 return NULL; 2547 } 2548 2549 void md_wakeup_thread(mdk_thread_t *thread); 2550 2551 static void md_safemode_timeout(unsigned long data) 2552 { 2553 mddev_t *mddev = (mddev_t *) data; 2554 2555 mddev->safemode = 1; 2556 md_wakeup_thread(mddev->thread); 2557 } 2558 2559 static int start_dirty_degraded; 2560 2561 static int do_md_run(mddev_t * mddev) 2562 { 2563 int err; 2564 int chunk_size; 2565 struct list_head *tmp; 2566 mdk_rdev_t *rdev; 2567 struct gendisk *disk; 2568 struct mdk_personality *pers; 2569 char b[BDEVNAME_SIZE]; 2570 2571 if (list_empty(&mddev->disks)) 2572 /* cannot run an array with no devices.. */ 2573 return -EINVAL; 2574 2575 if (mddev->pers) 2576 return -EBUSY; 2577 2578 /* 2579 * Analyze all RAID superblock(s) 2580 */ 2581 if (!mddev->raid_disks) 2582 analyze_sbs(mddev); 2583 2584 chunk_size = mddev->chunk_size; 2585 2586 if (chunk_size) { 2587 if (chunk_size > MAX_CHUNK_SIZE) { 2588 printk(KERN_ERR "too big chunk_size: %d > %d\n", 2589 chunk_size, MAX_CHUNK_SIZE); 2590 return -EINVAL; 2591 } 2592 /* 2593 * chunk-size has to be a power of 2 and multiples of PAGE_SIZE 2594 */ 2595 if ( (1 << ffz(~chunk_size)) != chunk_size) { 2596 printk(KERN_ERR "chunk_size of %d not valid\n", chunk_size); 2597 return -EINVAL; 2598 } 2599 if (chunk_size < PAGE_SIZE) { 2600 printk(KERN_ERR "too small chunk_size: %d < %ld\n", 2601 chunk_size, PAGE_SIZE); 2602 return -EINVAL; 2603 } 2604 2605 /* devices must have minimum size of one chunk */ 2606 ITERATE_RDEV(mddev,rdev,tmp) { 2607 if (test_bit(Faulty, &rdev->flags)) 2608 continue; 2609 if (rdev->size < chunk_size / 1024) { 2610 printk(KERN_WARNING 2611 "md: Dev %s smaller than chunk_size:" 2612 " %lluk < %dk\n", 2613 bdevname(rdev->bdev,b), 2614 (unsigned long long)rdev->size, 2615 chunk_size / 1024); 2616 return -EINVAL; 2617 } 2618 } 2619 } 2620 2621 #ifdef CONFIG_KMOD 2622 if (mddev->level != LEVEL_NONE) 2623 request_module("md-level-%d", mddev->level); 2624 else if (mddev->clevel[0]) 2625 request_module("md-%s", mddev->clevel); 2626 #endif 2627 2628 /* 2629 * Drop all container device buffers, from now on 2630 * the only valid external interface is through the md 2631 * device. 2632 * Also find largest hardsector size 2633 */ 2634 ITERATE_RDEV(mddev,rdev,tmp) { 2635 if (test_bit(Faulty, &rdev->flags)) 2636 continue; 2637 sync_blockdev(rdev->bdev); 2638 invalidate_bdev(rdev->bdev, 0); 2639 } 2640 2641 md_probe(mddev->unit, NULL, NULL); 2642 disk = mddev->gendisk; 2643 if (!disk) 2644 return -ENOMEM; 2645 2646 spin_lock(&pers_lock); 2647 pers = find_pers(mddev->level, mddev->clevel); 2648 if (!pers || !try_module_get(pers->owner)) { 2649 spin_unlock(&pers_lock); 2650 if (mddev->level != LEVEL_NONE) 2651 printk(KERN_WARNING "md: personality for level %d is not loaded!\n", 2652 mddev->level); 2653 else 2654 printk(KERN_WARNING "md: personality for level %s is not loaded!\n", 2655 mddev->clevel); 2656 return -EINVAL; 2657 } 2658 mddev->pers = pers; 2659 spin_unlock(&pers_lock); 2660 mddev->level = pers->level; 2661 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); 2662 2663 if (mddev->reshape_position != MaxSector && 2664 pers->start_reshape == NULL) { 2665 /* This personality cannot handle reshaping... */ 2666 mddev->pers = NULL; 2667 module_put(pers->owner); 2668 return -EINVAL; 2669 } 2670 2671 mddev->recovery = 0; 2672 mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */ 2673 mddev->barriers_work = 1; 2674 mddev->ok_start_degraded = start_dirty_degraded; 2675 2676 if (start_readonly) 2677 mddev->ro = 2; /* read-only, but switch on first write */ 2678 2679 err = mddev->pers->run(mddev); 2680 if (!err && mddev->pers->sync_request) { 2681 err = bitmap_create(mddev); 2682 if (err) { 2683 printk(KERN_ERR "%s: failed to create bitmap (%d)\n", 2684 mdname(mddev), err); 2685 mddev->pers->stop(mddev); 2686 } 2687 } 2688 if (err) { 2689 printk(KERN_ERR "md: pers->run() failed ...\n"); 2690 module_put(mddev->pers->owner); 2691 mddev->pers = NULL; 2692 bitmap_destroy(mddev); 2693 return err; 2694 } 2695 if (mddev->pers->sync_request) 2696 sysfs_create_group(&mddev->kobj, &md_redundancy_group); 2697 else if (mddev->ro == 2) /* auto-readonly not meaningful */ 2698 mddev->ro = 0; 2699 2700 atomic_set(&mddev->writes_pending,0); 2701 mddev->safemode = 0; 2702 mddev->safemode_timer.function = md_safemode_timeout; 2703 mddev->safemode_timer.data = (unsigned long) mddev; 2704 mddev->safemode_delay = (20 * HZ)/1000 +1; /* 20 msec delay */ 2705 mddev->in_sync = 1; 2706 2707 ITERATE_RDEV(mddev,rdev,tmp) 2708 if (rdev->raid_disk >= 0) { 2709 char nm[20]; 2710 sprintf(nm, "rd%d", rdev->raid_disk); 2711 sysfs_create_link(&mddev->kobj, &rdev->kobj, nm); 2712 } 2713 2714 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 2715 md_wakeup_thread(mddev->thread); 2716 2717 if (mddev->sb_dirty) 2718 md_update_sb(mddev); 2719 2720 set_capacity(disk, mddev->array_size<<1); 2721 2722 /* If we call blk_queue_make_request here, it will 2723 * re-initialise max_sectors etc which may have been 2724 * refined inside -> run. So just set the bits we need to set. 2725 * Most initialisation happended when we called 2726 * blk_queue_make_request(..., md_fail_request) 2727 * earlier. 2728 */ 2729 mddev->queue->queuedata = mddev; 2730 mddev->queue->make_request_fn = mddev->pers->make_request; 2731 2732 mddev->changed = 1; 2733 md_new_event(mddev); 2734 return 0; 2735 } 2736 2737 static int restart_array(mddev_t *mddev) 2738 { 2739 struct gendisk *disk = mddev->gendisk; 2740 int err; 2741 2742 /* 2743 * Complain if it has no devices 2744 */ 2745 err = -ENXIO; 2746 if (list_empty(&mddev->disks)) 2747 goto out; 2748 2749 if (mddev->pers) { 2750 err = -EBUSY; 2751 if (!mddev->ro) 2752 goto out; 2753 2754 mddev->safemode = 0; 2755 mddev->ro = 0; 2756 set_disk_ro(disk, 0); 2757 2758 printk(KERN_INFO "md: %s switched to read-write mode.\n", 2759 mdname(mddev)); 2760 /* 2761 * Kick recovery or resync if necessary 2762 */ 2763 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 2764 md_wakeup_thread(mddev->thread); 2765 err = 0; 2766 } else { 2767 printk(KERN_ERR "md: %s has no personality assigned.\n", 2768 mdname(mddev)); 2769 err = -EINVAL; 2770 } 2771 2772 out: 2773 return err; 2774 } 2775 2776 static int do_md_stop(mddev_t * mddev, int ro) 2777 { 2778 int err = 0; 2779 struct gendisk *disk = mddev->gendisk; 2780 2781 if (mddev->pers) { 2782 if (atomic_read(&mddev->active)>2) { 2783 printk("md: %s still in use.\n",mdname(mddev)); 2784 return -EBUSY; 2785 } 2786 2787 if (mddev->sync_thread) { 2788 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 2789 md_unregister_thread(mddev->sync_thread); 2790 mddev->sync_thread = NULL; 2791 } 2792 2793 del_timer_sync(&mddev->safemode_timer); 2794 2795 invalidate_partition(disk, 0); 2796 2797 if (ro) { 2798 err = -ENXIO; 2799 if (mddev->ro==1) 2800 goto out; 2801 mddev->ro = 1; 2802 } else { 2803 bitmap_flush(mddev); 2804 md_super_wait(mddev); 2805 if (mddev->ro) 2806 set_disk_ro(disk, 0); 2807 blk_queue_make_request(mddev->queue, md_fail_request); 2808 mddev->pers->stop(mddev); 2809 if (mddev->pers->sync_request) 2810 sysfs_remove_group(&mddev->kobj, &md_redundancy_group); 2811 2812 module_put(mddev->pers->owner); 2813 mddev->pers = NULL; 2814 if (mddev->ro) 2815 mddev->ro = 0; 2816 } 2817 if (!mddev->in_sync) { 2818 /* mark array as shutdown cleanly */ 2819 mddev->in_sync = 1; 2820 md_update_sb(mddev); 2821 } 2822 if (ro) 2823 set_disk_ro(disk, 1); 2824 } 2825 2826 /* 2827 * Free resources if final stop 2828 */ 2829 if (!ro) { 2830 mdk_rdev_t *rdev; 2831 struct list_head *tmp; 2832 struct gendisk *disk; 2833 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev)); 2834 2835 bitmap_destroy(mddev); 2836 if (mddev->bitmap_file) { 2837 atomic_set(&mddev->bitmap_file->f_dentry->d_inode->i_writecount, 1); 2838 fput(mddev->bitmap_file); 2839 mddev->bitmap_file = NULL; 2840 } 2841 mddev->bitmap_offset = 0; 2842 2843 ITERATE_RDEV(mddev,rdev,tmp) 2844 if (rdev->raid_disk >= 0) { 2845 char nm[20]; 2846 sprintf(nm, "rd%d", rdev->raid_disk); 2847 sysfs_remove_link(&mddev->kobj, nm); 2848 } 2849 2850 export_array(mddev); 2851 2852 mddev->array_size = 0; 2853 disk = mddev->gendisk; 2854 if (disk) 2855 set_capacity(disk, 0); 2856 mddev->changed = 1; 2857 } else 2858 printk(KERN_INFO "md: %s switched to read-only mode.\n", 2859 mdname(mddev)); 2860 err = 0; 2861 md_new_event(mddev); 2862 out: 2863 return err; 2864 } 2865 2866 static void autorun_array(mddev_t *mddev) 2867 { 2868 mdk_rdev_t *rdev; 2869 struct list_head *tmp; 2870 int err; 2871 2872 if (list_empty(&mddev->disks)) 2873 return; 2874 2875 printk(KERN_INFO "md: running: "); 2876 2877 ITERATE_RDEV(mddev,rdev,tmp) { 2878 char b[BDEVNAME_SIZE]; 2879 printk("<%s>", bdevname(rdev->bdev,b)); 2880 } 2881 printk("\n"); 2882 2883 err = do_md_run (mddev); 2884 if (err) { 2885 printk(KERN_WARNING "md: do_md_run() returned %d\n", err); 2886 do_md_stop (mddev, 0); 2887 } 2888 } 2889 2890 /* 2891 * lets try to run arrays based on all disks that have arrived 2892 * until now. (those are in pending_raid_disks) 2893 * 2894 * the method: pick the first pending disk, collect all disks with 2895 * the same UUID, remove all from the pending list and put them into 2896 * the 'same_array' list. Then order this list based on superblock 2897 * update time (freshest comes first), kick out 'old' disks and 2898 * compare superblocks. If everything's fine then run it. 2899 * 2900 * If "unit" is allocated, then bump its reference count 2901 */ 2902 static void autorun_devices(int part) 2903 { 2904 struct list_head *tmp; 2905 mdk_rdev_t *rdev0, *rdev; 2906 mddev_t *mddev; 2907 char b[BDEVNAME_SIZE]; 2908 2909 printk(KERN_INFO "md: autorun ...\n"); 2910 while (!list_empty(&pending_raid_disks)) { 2911 dev_t dev; 2912 LIST_HEAD(candidates); 2913 rdev0 = list_entry(pending_raid_disks.next, 2914 mdk_rdev_t, same_set); 2915 2916 printk(KERN_INFO "md: considering %s ...\n", 2917 bdevname(rdev0->bdev,b)); 2918 INIT_LIST_HEAD(&candidates); 2919 ITERATE_RDEV_PENDING(rdev,tmp) 2920 if (super_90_load(rdev, rdev0, 0) >= 0) { 2921 printk(KERN_INFO "md: adding %s ...\n", 2922 bdevname(rdev->bdev,b)); 2923 list_move(&rdev->same_set, &candidates); 2924 } 2925 /* 2926 * now we have a set of devices, with all of them having 2927 * mostly sane superblocks. It's time to allocate the 2928 * mddev. 2929 */ 2930 if (rdev0->preferred_minor < 0 || rdev0->preferred_minor >= MAX_MD_DEVS) { 2931 printk(KERN_INFO "md: unit number in %s is bad: %d\n", 2932 bdevname(rdev0->bdev, b), rdev0->preferred_minor); 2933 break; 2934 } 2935 if (part) 2936 dev = MKDEV(mdp_major, 2937 rdev0->preferred_minor << MdpMinorShift); 2938 else 2939 dev = MKDEV(MD_MAJOR, rdev0->preferred_minor); 2940 2941 md_probe(dev, NULL, NULL); 2942 mddev = mddev_find(dev); 2943 if (!mddev) { 2944 printk(KERN_ERR 2945 "md: cannot allocate memory for md drive.\n"); 2946 break; 2947 } 2948 if (mddev_lock(mddev)) 2949 printk(KERN_WARNING "md: %s locked, cannot run\n", 2950 mdname(mddev)); 2951 else if (mddev->raid_disks || mddev->major_version 2952 || !list_empty(&mddev->disks)) { 2953 printk(KERN_WARNING 2954 "md: %s already running, cannot run %s\n", 2955 mdname(mddev), bdevname(rdev0->bdev,b)); 2956 mddev_unlock(mddev); 2957 } else { 2958 printk(KERN_INFO "md: created %s\n", mdname(mddev)); 2959 ITERATE_RDEV_GENERIC(candidates,rdev,tmp) { 2960 list_del_init(&rdev->same_set); 2961 if (bind_rdev_to_array(rdev, mddev)) 2962 export_rdev(rdev); 2963 } 2964 autorun_array(mddev); 2965 mddev_unlock(mddev); 2966 } 2967 /* on success, candidates will be empty, on error 2968 * it won't... 2969 */ 2970 ITERATE_RDEV_GENERIC(candidates,rdev,tmp) 2971 export_rdev(rdev); 2972 mddev_put(mddev); 2973 } 2974 printk(KERN_INFO "md: ... autorun DONE.\n"); 2975 } 2976 2977 /* 2978 * import RAID devices based on one partition 2979 * if possible, the array gets run as well. 2980 */ 2981 2982 static int autostart_array(dev_t startdev) 2983 { 2984 char b[BDEVNAME_SIZE]; 2985 int err = -EINVAL, i; 2986 mdp_super_t *sb = NULL; 2987 mdk_rdev_t *start_rdev = NULL, *rdev; 2988 2989 start_rdev = md_import_device(startdev, 0, 0); 2990 if (IS_ERR(start_rdev)) 2991 return err; 2992 2993 2994 /* NOTE: this can only work for 0.90.0 superblocks */ 2995 sb = (mdp_super_t*)page_address(start_rdev->sb_page); 2996 if (sb->major_version != 0 || 2997 sb->minor_version != 90 ) { 2998 printk(KERN_WARNING "md: can only autostart 0.90.0 arrays\n"); 2999 export_rdev(start_rdev); 3000 return err; 3001 } 3002 3003 if (test_bit(Faulty, &start_rdev->flags)) { 3004 printk(KERN_WARNING 3005 "md: can not autostart based on faulty %s!\n", 3006 bdevname(start_rdev->bdev,b)); 3007 export_rdev(start_rdev); 3008 return err; 3009 } 3010 list_add(&start_rdev->same_set, &pending_raid_disks); 3011 3012 for (i = 0; i < MD_SB_DISKS; i++) { 3013 mdp_disk_t *desc = sb->disks + i; 3014 dev_t dev = MKDEV(desc->major, desc->minor); 3015 3016 if (!dev) 3017 continue; 3018 if (dev == startdev) 3019 continue; 3020 if (MAJOR(dev) != desc->major || MINOR(dev) != desc->minor) 3021 continue; 3022 rdev = md_import_device(dev, 0, 0); 3023 if (IS_ERR(rdev)) 3024 continue; 3025 3026 list_add(&rdev->same_set, &pending_raid_disks); 3027 } 3028 3029 /* 3030 * possibly return codes 3031 */ 3032 autorun_devices(0); 3033 return 0; 3034 3035 } 3036 3037 3038 static int get_version(void __user * arg) 3039 { 3040 mdu_version_t ver; 3041 3042 ver.major = MD_MAJOR_VERSION; 3043 ver.minor = MD_MINOR_VERSION; 3044 ver.patchlevel = MD_PATCHLEVEL_VERSION; 3045 3046 if (copy_to_user(arg, &ver, sizeof(ver))) 3047 return -EFAULT; 3048 3049 return 0; 3050 } 3051 3052 static int get_array_info(mddev_t * mddev, void __user * arg) 3053 { 3054 mdu_array_info_t info; 3055 int nr,working,active,failed,spare; 3056 mdk_rdev_t *rdev; 3057 struct list_head *tmp; 3058 3059 nr=working=active=failed=spare=0; 3060 ITERATE_RDEV(mddev,rdev,tmp) { 3061 nr++; 3062 if (test_bit(Faulty, &rdev->flags)) 3063 failed++; 3064 else { 3065 working++; 3066 if (test_bit(In_sync, &rdev->flags)) 3067 active++; 3068 else 3069 spare++; 3070 } 3071 } 3072 3073 info.major_version = mddev->major_version; 3074 info.minor_version = mddev->minor_version; 3075 info.patch_version = MD_PATCHLEVEL_VERSION; 3076 info.ctime = mddev->ctime; 3077 info.level = mddev->level; 3078 info.size = mddev->size; 3079 if (info.size != mddev->size) /* overflow */ 3080 info.size = -1; 3081 info.nr_disks = nr; 3082 info.raid_disks = mddev->raid_disks; 3083 info.md_minor = mddev->md_minor; 3084 info.not_persistent= !mddev->persistent; 3085 3086 info.utime = mddev->utime; 3087 info.state = 0; 3088 if (mddev->in_sync) 3089 info.state = (1<<MD_SB_CLEAN); 3090 if (mddev->bitmap && mddev->bitmap_offset) 3091 info.state = (1<<MD_SB_BITMAP_PRESENT); 3092 info.active_disks = active; 3093 info.working_disks = working; 3094 info.failed_disks = failed; 3095 info.spare_disks = spare; 3096 3097 info.layout = mddev->layout; 3098 info.chunk_size = mddev->chunk_size; 3099 3100 if (copy_to_user(arg, &info, sizeof(info))) 3101 return -EFAULT; 3102 3103 return 0; 3104 } 3105 3106 static int get_bitmap_file(mddev_t * mddev, void __user * arg) 3107 { 3108 mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */ 3109 char *ptr, *buf = NULL; 3110 int err = -ENOMEM; 3111 3112 file = kmalloc(sizeof(*file), GFP_KERNEL); 3113 if (!file) 3114 goto out; 3115 3116 /* bitmap disabled, zero the first byte and copy out */ 3117 if (!mddev->bitmap || !mddev->bitmap->file) { 3118 file->pathname[0] = '\0'; 3119 goto copy_out; 3120 } 3121 3122 buf = kmalloc(sizeof(file->pathname), GFP_KERNEL); 3123 if (!buf) 3124 goto out; 3125 3126 ptr = file_path(mddev->bitmap->file, buf, sizeof(file->pathname)); 3127 if (!ptr) 3128 goto out; 3129 3130 strcpy(file->pathname, ptr); 3131 3132 copy_out: 3133 err = 0; 3134 if (copy_to_user(arg, file, sizeof(*file))) 3135 err = -EFAULT; 3136 out: 3137 kfree(buf); 3138 kfree(file); 3139 return err; 3140 } 3141 3142 static int get_disk_info(mddev_t * mddev, void __user * arg) 3143 { 3144 mdu_disk_info_t info; 3145 unsigned int nr; 3146 mdk_rdev_t *rdev; 3147 3148 if (copy_from_user(&info, arg, sizeof(info))) 3149 return -EFAULT; 3150 3151 nr = info.number; 3152 3153 rdev = find_rdev_nr(mddev, nr); 3154 if (rdev) { 3155 info.major = MAJOR(rdev->bdev->bd_dev); 3156 info.minor = MINOR(rdev->bdev->bd_dev); 3157 info.raid_disk = rdev->raid_disk; 3158 info.state = 0; 3159 if (test_bit(Faulty, &rdev->flags)) 3160 info.state |= (1<<MD_DISK_FAULTY); 3161 else if (test_bit(In_sync, &rdev->flags)) { 3162 info.state |= (1<<MD_DISK_ACTIVE); 3163 info.state |= (1<<MD_DISK_SYNC); 3164 } 3165 if (test_bit(WriteMostly, &rdev->flags)) 3166 info.state |= (1<<MD_DISK_WRITEMOSTLY); 3167 } else { 3168 info.major = info.minor = 0; 3169 info.raid_disk = -1; 3170 info.state = (1<<MD_DISK_REMOVED); 3171 } 3172 3173 if (copy_to_user(arg, &info, sizeof(info))) 3174 return -EFAULT; 3175 3176 return 0; 3177 } 3178 3179 static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info) 3180 { 3181 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 3182 mdk_rdev_t *rdev; 3183 dev_t dev = MKDEV(info->major,info->minor); 3184 3185 if (info->major != MAJOR(dev) || info->minor != MINOR(dev)) 3186 return -EOVERFLOW; 3187 3188 if (!mddev->raid_disks) { 3189 int err; 3190 /* expecting a device which has a superblock */ 3191 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version); 3192 if (IS_ERR(rdev)) { 3193 printk(KERN_WARNING 3194 "md: md_import_device returned %ld\n", 3195 PTR_ERR(rdev)); 3196 return PTR_ERR(rdev); 3197 } 3198 if (!list_empty(&mddev->disks)) { 3199 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next, 3200 mdk_rdev_t, same_set); 3201 int err = super_types[mddev->major_version] 3202 .load_super(rdev, rdev0, mddev->minor_version); 3203 if (err < 0) { 3204 printk(KERN_WARNING 3205 "md: %s has different UUID to %s\n", 3206 bdevname(rdev->bdev,b), 3207 bdevname(rdev0->bdev,b2)); 3208 export_rdev(rdev); 3209 return -EINVAL; 3210 } 3211 } 3212 err = bind_rdev_to_array(rdev, mddev); 3213 if (err) 3214 export_rdev(rdev); 3215 return err; 3216 } 3217 3218 /* 3219 * add_new_disk can be used once the array is assembled 3220 * to add "hot spares". They must already have a superblock 3221 * written 3222 */ 3223 if (mddev->pers) { 3224 int err; 3225 if (!mddev->pers->hot_add_disk) { 3226 printk(KERN_WARNING 3227 "%s: personality does not support diskops!\n", 3228 mdname(mddev)); 3229 return -EINVAL; 3230 } 3231 if (mddev->persistent) 3232 rdev = md_import_device(dev, mddev->major_version, 3233 mddev->minor_version); 3234 else 3235 rdev = md_import_device(dev, -1, -1); 3236 if (IS_ERR(rdev)) { 3237 printk(KERN_WARNING 3238 "md: md_import_device returned %ld\n", 3239 PTR_ERR(rdev)); 3240 return PTR_ERR(rdev); 3241 } 3242 /* set save_raid_disk if appropriate */ 3243 if (!mddev->persistent) { 3244 if (info->state & (1<<MD_DISK_SYNC) && 3245 info->raid_disk < mddev->raid_disks) 3246 rdev->raid_disk = info->raid_disk; 3247 else 3248 rdev->raid_disk = -1; 3249 } else 3250 super_types[mddev->major_version]. 3251 validate_super(mddev, rdev); 3252 rdev->saved_raid_disk = rdev->raid_disk; 3253 3254 clear_bit(In_sync, &rdev->flags); /* just to be sure */ 3255 if (info->state & (1<<MD_DISK_WRITEMOSTLY)) 3256 set_bit(WriteMostly, &rdev->flags); 3257 3258 rdev->raid_disk = -1; 3259 err = bind_rdev_to_array(rdev, mddev); 3260 if (err) 3261 export_rdev(rdev); 3262 3263 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3264 md_wakeup_thread(mddev->thread); 3265 return err; 3266 } 3267 3268 /* otherwise, add_new_disk is only allowed 3269 * for major_version==0 superblocks 3270 */ 3271 if (mddev->major_version != 0) { 3272 printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n", 3273 mdname(mddev)); 3274 return -EINVAL; 3275 } 3276 3277 if (!(info->state & (1<<MD_DISK_FAULTY))) { 3278 int err; 3279 rdev = md_import_device (dev, -1, 0); 3280 if (IS_ERR(rdev)) { 3281 printk(KERN_WARNING 3282 "md: error, md_import_device() returned %ld\n", 3283 PTR_ERR(rdev)); 3284 return PTR_ERR(rdev); 3285 } 3286 rdev->desc_nr = info->number; 3287 if (info->raid_disk < mddev->raid_disks) 3288 rdev->raid_disk = info->raid_disk; 3289 else 3290 rdev->raid_disk = -1; 3291 3292 rdev->flags = 0; 3293 3294 if (rdev->raid_disk < mddev->raid_disks) 3295 if (info->state & (1<<MD_DISK_SYNC)) 3296 set_bit(In_sync, &rdev->flags); 3297 3298 if (info->state & (1<<MD_DISK_WRITEMOSTLY)) 3299 set_bit(WriteMostly, &rdev->flags); 3300 3301 if (!mddev->persistent) { 3302 printk(KERN_INFO "md: nonpersistent superblock ...\n"); 3303 rdev->sb_offset = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; 3304 } else 3305 rdev->sb_offset = calc_dev_sboffset(rdev->bdev); 3306 rdev->size = calc_dev_size(rdev, mddev->chunk_size); 3307 3308 err = bind_rdev_to_array(rdev, mddev); 3309 if (err) { 3310 export_rdev(rdev); 3311 return err; 3312 } 3313 } 3314 3315 return 0; 3316 } 3317 3318 static int hot_remove_disk(mddev_t * mddev, dev_t dev) 3319 { 3320 char b[BDEVNAME_SIZE]; 3321 mdk_rdev_t *rdev; 3322 3323 if (!mddev->pers) 3324 return -ENODEV; 3325 3326 rdev = find_rdev(mddev, dev); 3327 if (!rdev) 3328 return -ENXIO; 3329 3330 if (rdev->raid_disk >= 0) 3331 goto busy; 3332 3333 kick_rdev_from_array(rdev); 3334 md_update_sb(mddev); 3335 md_new_event(mddev); 3336 3337 return 0; 3338 busy: 3339 printk(KERN_WARNING "md: cannot remove active disk %s from %s ... \n", 3340 bdevname(rdev->bdev,b), mdname(mddev)); 3341 return -EBUSY; 3342 } 3343 3344 static int hot_add_disk(mddev_t * mddev, dev_t dev) 3345 { 3346 char b[BDEVNAME_SIZE]; 3347 int err; 3348 unsigned int size; 3349 mdk_rdev_t *rdev; 3350 3351 if (!mddev->pers) 3352 return -ENODEV; 3353 3354 if (mddev->major_version != 0) { 3355 printk(KERN_WARNING "%s: HOT_ADD may only be used with" 3356 " version-0 superblocks.\n", 3357 mdname(mddev)); 3358 return -EINVAL; 3359 } 3360 if (!mddev->pers->hot_add_disk) { 3361 printk(KERN_WARNING 3362 "%s: personality does not support diskops!\n", 3363 mdname(mddev)); 3364 return -EINVAL; 3365 } 3366 3367 rdev = md_import_device (dev, -1, 0); 3368 if (IS_ERR(rdev)) { 3369 printk(KERN_WARNING 3370 "md: error, md_import_device() returned %ld\n", 3371 PTR_ERR(rdev)); 3372 return -EINVAL; 3373 } 3374 3375 if (mddev->persistent) 3376 rdev->sb_offset = calc_dev_sboffset(rdev->bdev); 3377 else 3378 rdev->sb_offset = 3379 rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; 3380 3381 size = calc_dev_size(rdev, mddev->chunk_size); 3382 rdev->size = size; 3383 3384 if (test_bit(Faulty, &rdev->flags)) { 3385 printk(KERN_WARNING 3386 "md: can not hot-add faulty %s disk to %s!\n", 3387 bdevname(rdev->bdev,b), mdname(mddev)); 3388 err = -EINVAL; 3389 goto abort_export; 3390 } 3391 clear_bit(In_sync, &rdev->flags); 3392 rdev->desc_nr = -1; 3393 err = bind_rdev_to_array(rdev, mddev); 3394 if (err) 3395 goto abort_export; 3396 3397 /* 3398 * The rest should better be atomic, we can have disk failures 3399 * noticed in interrupt contexts ... 3400 */ 3401 3402 if (rdev->desc_nr == mddev->max_disks) { 3403 printk(KERN_WARNING "%s: can not hot-add to full array!\n", 3404 mdname(mddev)); 3405 err = -EBUSY; 3406 goto abort_unbind_export; 3407 } 3408 3409 rdev->raid_disk = -1; 3410 3411 md_update_sb(mddev); 3412 3413 /* 3414 * Kick recovery, maybe this spare has to be added to the 3415 * array immediately. 3416 */ 3417 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3418 md_wakeup_thread(mddev->thread); 3419 md_new_event(mddev); 3420 return 0; 3421 3422 abort_unbind_export: 3423 unbind_rdev_from_array(rdev); 3424 3425 abort_export: 3426 export_rdev(rdev); 3427 return err; 3428 } 3429 3430 /* similar to deny_write_access, but accounts for our holding a reference 3431 * to the file ourselves */ 3432 static int deny_bitmap_write_access(struct file * file) 3433 { 3434 struct inode *inode = file->f_mapping->host; 3435 3436 spin_lock(&inode->i_lock); 3437 if (atomic_read(&inode->i_writecount) > 1) { 3438 spin_unlock(&inode->i_lock); 3439 return -ETXTBSY; 3440 } 3441 atomic_set(&inode->i_writecount, -1); 3442 spin_unlock(&inode->i_lock); 3443 3444 return 0; 3445 } 3446 3447 static int set_bitmap_file(mddev_t *mddev, int fd) 3448 { 3449 int err; 3450 3451 if (mddev->pers) { 3452 if (!mddev->pers->quiesce) 3453 return -EBUSY; 3454 if (mddev->recovery || mddev->sync_thread) 3455 return -EBUSY; 3456 /* we should be able to change the bitmap.. */ 3457 } 3458 3459 3460 if (fd >= 0) { 3461 if (mddev->bitmap) 3462 return -EEXIST; /* cannot add when bitmap is present */ 3463 mddev->bitmap_file = fget(fd); 3464 3465 if (mddev->bitmap_file == NULL) { 3466 printk(KERN_ERR "%s: error: failed to get bitmap file\n", 3467 mdname(mddev)); 3468 return -EBADF; 3469 } 3470 3471 err = deny_bitmap_write_access(mddev->bitmap_file); 3472 if (err) { 3473 printk(KERN_ERR "%s: error: bitmap file is already in use\n", 3474 mdname(mddev)); 3475 fput(mddev->bitmap_file); 3476 mddev->bitmap_file = NULL; 3477 return err; 3478 } 3479 mddev->bitmap_offset = 0; /* file overrides offset */ 3480 } else if (mddev->bitmap == NULL) 3481 return -ENOENT; /* cannot remove what isn't there */ 3482 err = 0; 3483 if (mddev->pers) { 3484 mddev->pers->quiesce(mddev, 1); 3485 if (fd >= 0) 3486 err = bitmap_create(mddev); 3487 if (fd < 0 || err) 3488 bitmap_destroy(mddev); 3489 mddev->pers->quiesce(mddev, 0); 3490 } else if (fd < 0) { 3491 if (mddev->bitmap_file) 3492 fput(mddev->bitmap_file); 3493 mddev->bitmap_file = NULL; 3494 } 3495 3496 return err; 3497 } 3498 3499 /* 3500 * set_array_info is used two different ways 3501 * The original usage is when creating a new array. 3502 * In this usage, raid_disks is > 0 and it together with 3503 * level, size, not_persistent,layout,chunksize determine the 3504 * shape of the array. 3505 * This will always create an array with a type-0.90.0 superblock. 3506 * The newer usage is when assembling an array. 3507 * In this case raid_disks will be 0, and the major_version field is 3508 * use to determine which style super-blocks are to be found on the devices. 3509 * The minor and patch _version numbers are also kept incase the 3510 * super_block handler wishes to interpret them. 3511 */ 3512 static int set_array_info(mddev_t * mddev, mdu_array_info_t *info) 3513 { 3514 3515 if (info->raid_disks == 0) { 3516 /* just setting version number for superblock loading */ 3517 if (info->major_version < 0 || 3518 info->major_version >= sizeof(super_types)/sizeof(super_types[0]) || 3519 super_types[info->major_version].name == NULL) { 3520 /* maybe try to auto-load a module? */ 3521 printk(KERN_INFO 3522 "md: superblock version %d not known\n", 3523 info->major_version); 3524 return -EINVAL; 3525 } 3526 mddev->major_version = info->major_version; 3527 mddev->minor_version = info->minor_version; 3528 mddev->patch_version = info->patch_version; 3529 return 0; 3530 } 3531 mddev->major_version = MD_MAJOR_VERSION; 3532 mddev->minor_version = MD_MINOR_VERSION; 3533 mddev->patch_version = MD_PATCHLEVEL_VERSION; 3534 mddev->ctime = get_seconds(); 3535 3536 mddev->level = info->level; 3537 mddev->clevel[0] = 0; 3538 mddev->size = info->size; 3539 mddev->raid_disks = info->raid_disks; 3540 /* don't set md_minor, it is determined by which /dev/md* was 3541 * openned 3542 */ 3543 if (info->state & (1<<MD_SB_CLEAN)) 3544 mddev->recovery_cp = MaxSector; 3545 else 3546 mddev->recovery_cp = 0; 3547 mddev->persistent = ! info->not_persistent; 3548 3549 mddev->layout = info->layout; 3550 mddev->chunk_size = info->chunk_size; 3551 3552 mddev->max_disks = MD_SB_DISKS; 3553 3554 mddev->sb_dirty = 1; 3555 3556 mddev->default_bitmap_offset = MD_SB_BYTES >> 9; 3557 mddev->bitmap_offset = 0; 3558 3559 mddev->reshape_position = MaxSector; 3560 3561 /* 3562 * Generate a 128 bit UUID 3563 */ 3564 get_random_bytes(mddev->uuid, 16); 3565 3566 mddev->new_level = mddev->level; 3567 mddev->new_chunk = mddev->chunk_size; 3568 mddev->new_layout = mddev->layout; 3569 mddev->delta_disks = 0; 3570 3571 return 0; 3572 } 3573 3574 static int update_size(mddev_t *mddev, unsigned long size) 3575 { 3576 mdk_rdev_t * rdev; 3577 int rv; 3578 struct list_head *tmp; 3579 int fit = (size == 0); 3580 3581 if (mddev->pers->resize == NULL) 3582 return -EINVAL; 3583 /* The "size" is the amount of each device that is used. 3584 * This can only make sense for arrays with redundancy. 3585 * linear and raid0 always use whatever space is available 3586 * We can only consider changing the size if no resync 3587 * or reconstruction is happening, and if the new size 3588 * is acceptable. It must fit before the sb_offset or, 3589 * if that is <data_offset, it must fit before the 3590 * size of each device. 3591 * If size is zero, we find the largest size that fits. 3592 */ 3593 if (mddev->sync_thread) 3594 return -EBUSY; 3595 ITERATE_RDEV(mddev,rdev,tmp) { 3596 sector_t avail; 3597 if (rdev->sb_offset > rdev->data_offset) 3598 avail = (rdev->sb_offset*2) - rdev->data_offset; 3599 else 3600 avail = get_capacity(rdev->bdev->bd_disk) 3601 - rdev->data_offset; 3602 if (fit && (size == 0 || size > avail/2)) 3603 size = avail/2; 3604 if (avail < ((sector_t)size << 1)) 3605 return -ENOSPC; 3606 } 3607 rv = mddev->pers->resize(mddev, (sector_t)size *2); 3608 if (!rv) { 3609 struct block_device *bdev; 3610 3611 bdev = bdget_disk(mddev->gendisk, 0); 3612 if (bdev) { 3613 mutex_lock(&bdev->bd_inode->i_mutex); 3614 i_size_write(bdev->bd_inode, (loff_t)mddev->array_size << 10); 3615 mutex_unlock(&bdev->bd_inode->i_mutex); 3616 bdput(bdev); 3617 } 3618 } 3619 return rv; 3620 } 3621 3622 static int update_raid_disks(mddev_t *mddev, int raid_disks) 3623 { 3624 int rv; 3625 /* change the number of raid disks */ 3626 if (mddev->pers->check_reshape == NULL) 3627 return -EINVAL; 3628 if (raid_disks <= 0 || 3629 raid_disks >= mddev->max_disks) 3630 return -EINVAL; 3631 if (mddev->sync_thread || mddev->reshape_position != MaxSector) 3632 return -EBUSY; 3633 mddev->delta_disks = raid_disks - mddev->raid_disks; 3634 3635 rv = mddev->pers->check_reshape(mddev); 3636 return rv; 3637 } 3638 3639 3640 /* 3641 * update_array_info is used to change the configuration of an 3642 * on-line array. 3643 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size 3644 * fields in the info are checked against the array. 3645 * Any differences that cannot be handled will cause an error. 3646 * Normally, only one change can be managed at a time. 3647 */ 3648 static int update_array_info(mddev_t *mddev, mdu_array_info_t *info) 3649 { 3650 int rv = 0; 3651 int cnt = 0; 3652 int state = 0; 3653 3654 /* calculate expected state,ignoring low bits */ 3655 if (mddev->bitmap && mddev->bitmap_offset) 3656 state |= (1 << MD_SB_BITMAP_PRESENT); 3657 3658 if (mddev->major_version != info->major_version || 3659 mddev->minor_version != info->minor_version || 3660 /* mddev->patch_version != info->patch_version || */ 3661 mddev->ctime != info->ctime || 3662 mddev->level != info->level || 3663 /* mddev->layout != info->layout || */ 3664 !mddev->persistent != info->not_persistent|| 3665 mddev->chunk_size != info->chunk_size || 3666 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */ 3667 ((state^info->state) & 0xfffffe00) 3668 ) 3669 return -EINVAL; 3670 /* Check there is only one change */ 3671 if (info->size >= 0 && mddev->size != info->size) cnt++; 3672 if (mddev->raid_disks != info->raid_disks) cnt++; 3673 if (mddev->layout != info->layout) cnt++; 3674 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) cnt++; 3675 if (cnt == 0) return 0; 3676 if (cnt > 1) return -EINVAL; 3677 3678 if (mddev->layout != info->layout) { 3679 /* Change layout 3680 * we don't need to do anything at the md level, the 3681 * personality will take care of it all. 3682 */ 3683 if (mddev->pers->reconfig == NULL) 3684 return -EINVAL; 3685 else 3686 return mddev->pers->reconfig(mddev, info->layout, -1); 3687 } 3688 if (info->size >= 0 && mddev->size != info->size) 3689 rv = update_size(mddev, info->size); 3690 3691 if (mddev->raid_disks != info->raid_disks) 3692 rv = update_raid_disks(mddev, info->raid_disks); 3693 3694 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) { 3695 if (mddev->pers->quiesce == NULL) 3696 return -EINVAL; 3697 if (mddev->recovery || mddev->sync_thread) 3698 return -EBUSY; 3699 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) { 3700 /* add the bitmap */ 3701 if (mddev->bitmap) 3702 return -EEXIST; 3703 if (mddev->default_bitmap_offset == 0) 3704 return -EINVAL; 3705 mddev->bitmap_offset = mddev->default_bitmap_offset; 3706 mddev->pers->quiesce(mddev, 1); 3707 rv = bitmap_create(mddev); 3708 if (rv) 3709 bitmap_destroy(mddev); 3710 mddev->pers->quiesce(mddev, 0); 3711 } else { 3712 /* remove the bitmap */ 3713 if (!mddev->bitmap) 3714 return -ENOENT; 3715 if (mddev->bitmap->file) 3716 return -EINVAL; 3717 mddev->pers->quiesce(mddev, 1); 3718 bitmap_destroy(mddev); 3719 mddev->pers->quiesce(mddev, 0); 3720 mddev->bitmap_offset = 0; 3721 } 3722 } 3723 md_update_sb(mddev); 3724 return rv; 3725 } 3726 3727 static int set_disk_faulty(mddev_t *mddev, dev_t dev) 3728 { 3729 mdk_rdev_t *rdev; 3730 3731 if (mddev->pers == NULL) 3732 return -ENODEV; 3733 3734 rdev = find_rdev(mddev, dev); 3735 if (!rdev) 3736 return -ENODEV; 3737 3738 md_error(mddev, rdev); 3739 return 0; 3740 } 3741 3742 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo) 3743 { 3744 mddev_t *mddev = bdev->bd_disk->private_data; 3745 3746 geo->heads = 2; 3747 geo->sectors = 4; 3748 geo->cylinders = get_capacity(mddev->gendisk) / 8; 3749 return 0; 3750 } 3751 3752 static int md_ioctl(struct inode *inode, struct file *file, 3753 unsigned int cmd, unsigned long arg) 3754 { 3755 int err = 0; 3756 void __user *argp = (void __user *)arg; 3757 mddev_t *mddev = NULL; 3758 3759 if (!capable(CAP_SYS_ADMIN)) 3760 return -EACCES; 3761 3762 /* 3763 * Commands dealing with the RAID driver but not any 3764 * particular array: 3765 */ 3766 switch (cmd) 3767 { 3768 case RAID_VERSION: 3769 err = get_version(argp); 3770 goto done; 3771 3772 case PRINT_RAID_DEBUG: 3773 err = 0; 3774 md_print_devices(); 3775 goto done; 3776 3777 #ifndef MODULE 3778 case RAID_AUTORUN: 3779 err = 0; 3780 autostart_arrays(arg); 3781 goto done; 3782 #endif 3783 default:; 3784 } 3785 3786 /* 3787 * Commands creating/starting a new array: 3788 */ 3789 3790 mddev = inode->i_bdev->bd_disk->private_data; 3791 3792 if (!mddev) { 3793 BUG(); 3794 goto abort; 3795 } 3796 3797 3798 if (cmd == START_ARRAY) { 3799 /* START_ARRAY doesn't need to lock the array as autostart_array 3800 * does the locking, and it could even be a different array 3801 */ 3802 static int cnt = 3; 3803 if (cnt > 0 ) { 3804 printk(KERN_WARNING 3805 "md: %s(pid %d) used deprecated START_ARRAY ioctl. " 3806 "This will not be supported beyond July 2006\n", 3807 current->comm, current->pid); 3808 cnt--; 3809 } 3810 err = autostart_array(new_decode_dev(arg)); 3811 if (err) { 3812 printk(KERN_WARNING "md: autostart failed!\n"); 3813 goto abort; 3814 } 3815 goto done; 3816 } 3817 3818 err = mddev_lock(mddev); 3819 if (err) { 3820 printk(KERN_INFO 3821 "md: ioctl lock interrupted, reason %d, cmd %d\n", 3822 err, cmd); 3823 goto abort; 3824 } 3825 3826 switch (cmd) 3827 { 3828 case SET_ARRAY_INFO: 3829 { 3830 mdu_array_info_t info; 3831 if (!arg) 3832 memset(&info, 0, sizeof(info)); 3833 else if (copy_from_user(&info, argp, sizeof(info))) { 3834 err = -EFAULT; 3835 goto abort_unlock; 3836 } 3837 if (mddev->pers) { 3838 err = update_array_info(mddev, &info); 3839 if (err) { 3840 printk(KERN_WARNING "md: couldn't update" 3841 " array info. %d\n", err); 3842 goto abort_unlock; 3843 } 3844 goto done_unlock; 3845 } 3846 if (!list_empty(&mddev->disks)) { 3847 printk(KERN_WARNING 3848 "md: array %s already has disks!\n", 3849 mdname(mddev)); 3850 err = -EBUSY; 3851 goto abort_unlock; 3852 } 3853 if (mddev->raid_disks) { 3854 printk(KERN_WARNING 3855 "md: array %s already initialised!\n", 3856 mdname(mddev)); 3857 err = -EBUSY; 3858 goto abort_unlock; 3859 } 3860 err = set_array_info(mddev, &info); 3861 if (err) { 3862 printk(KERN_WARNING "md: couldn't set" 3863 " array info. %d\n", err); 3864 goto abort_unlock; 3865 } 3866 } 3867 goto done_unlock; 3868 3869 default:; 3870 } 3871 3872 /* 3873 * Commands querying/configuring an existing array: 3874 */ 3875 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY, 3876 * RUN_ARRAY, and SET_BITMAP_FILE are allowed */ 3877 if (!mddev->raid_disks && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY 3878 && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE) { 3879 err = -ENODEV; 3880 goto abort_unlock; 3881 } 3882 3883 /* 3884 * Commands even a read-only array can execute: 3885 */ 3886 switch (cmd) 3887 { 3888 case GET_ARRAY_INFO: 3889 err = get_array_info(mddev, argp); 3890 goto done_unlock; 3891 3892 case GET_BITMAP_FILE: 3893 err = get_bitmap_file(mddev, argp); 3894 goto done_unlock; 3895 3896 case GET_DISK_INFO: 3897 err = get_disk_info(mddev, argp); 3898 goto done_unlock; 3899 3900 case RESTART_ARRAY_RW: 3901 err = restart_array(mddev); 3902 goto done_unlock; 3903 3904 case STOP_ARRAY: 3905 err = do_md_stop (mddev, 0); 3906 goto done_unlock; 3907 3908 case STOP_ARRAY_RO: 3909 err = do_md_stop (mddev, 1); 3910 goto done_unlock; 3911 3912 /* 3913 * We have a problem here : there is no easy way to give a CHS 3914 * virtual geometry. We currently pretend that we have a 2 heads 3915 * 4 sectors (with a BIG number of cylinders...). This drives 3916 * dosfs just mad... ;-) 3917 */ 3918 } 3919 3920 /* 3921 * The remaining ioctls are changing the state of the 3922 * superblock, so we do not allow them on read-only arrays. 3923 * However non-MD ioctls (e.g. get-size) will still come through 3924 * here and hit the 'default' below, so only disallow 3925 * 'md' ioctls, and switch to rw mode if started auto-readonly. 3926 */ 3927 if (_IOC_TYPE(cmd) == MD_MAJOR && 3928 mddev->ro && mddev->pers) { 3929 if (mddev->ro == 2) { 3930 mddev->ro = 0; 3931 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3932 md_wakeup_thread(mddev->thread); 3933 3934 } else { 3935 err = -EROFS; 3936 goto abort_unlock; 3937 } 3938 } 3939 3940 switch (cmd) 3941 { 3942 case ADD_NEW_DISK: 3943 { 3944 mdu_disk_info_t info; 3945 if (copy_from_user(&info, argp, sizeof(info))) 3946 err = -EFAULT; 3947 else 3948 err = add_new_disk(mddev, &info); 3949 goto done_unlock; 3950 } 3951 3952 case HOT_REMOVE_DISK: 3953 err = hot_remove_disk(mddev, new_decode_dev(arg)); 3954 goto done_unlock; 3955 3956 case HOT_ADD_DISK: 3957 err = hot_add_disk(mddev, new_decode_dev(arg)); 3958 goto done_unlock; 3959 3960 case SET_DISK_FAULTY: 3961 err = set_disk_faulty(mddev, new_decode_dev(arg)); 3962 goto done_unlock; 3963 3964 case RUN_ARRAY: 3965 err = do_md_run (mddev); 3966 goto done_unlock; 3967 3968 case SET_BITMAP_FILE: 3969 err = set_bitmap_file(mddev, (int)arg); 3970 goto done_unlock; 3971 3972 default: 3973 if (_IOC_TYPE(cmd) == MD_MAJOR) 3974 printk(KERN_WARNING "md: %s(pid %d) used" 3975 " obsolete MD ioctl, upgrade your" 3976 " software to use new ictls.\n", 3977 current->comm, current->pid); 3978 err = -EINVAL; 3979 goto abort_unlock; 3980 } 3981 3982 done_unlock: 3983 abort_unlock: 3984 mddev_unlock(mddev); 3985 3986 return err; 3987 done: 3988 if (err) 3989 MD_BUG(); 3990 abort: 3991 return err; 3992 } 3993 3994 static int md_open(struct inode *inode, struct file *file) 3995 { 3996 /* 3997 * Succeed if we can lock the mddev, which confirms that 3998 * it isn't being stopped right now. 3999 */ 4000 mddev_t *mddev = inode->i_bdev->bd_disk->private_data; 4001 int err; 4002 4003 if ((err = mddev_lock(mddev))) 4004 goto out; 4005 4006 err = 0; 4007 mddev_get(mddev); 4008 mddev_unlock(mddev); 4009 4010 check_disk_change(inode->i_bdev); 4011 out: 4012 return err; 4013 } 4014 4015 static int md_release(struct inode *inode, struct file * file) 4016 { 4017 mddev_t *mddev = inode->i_bdev->bd_disk->private_data; 4018 4019 if (!mddev) 4020 BUG(); 4021 mddev_put(mddev); 4022 4023 return 0; 4024 } 4025 4026 static int md_media_changed(struct gendisk *disk) 4027 { 4028 mddev_t *mddev = disk->private_data; 4029 4030 return mddev->changed; 4031 } 4032 4033 static int md_revalidate(struct gendisk *disk) 4034 { 4035 mddev_t *mddev = disk->private_data; 4036 4037 mddev->changed = 0; 4038 return 0; 4039 } 4040 static struct block_device_operations md_fops = 4041 { 4042 .owner = THIS_MODULE, 4043 .open = md_open, 4044 .release = md_release, 4045 .ioctl = md_ioctl, 4046 .getgeo = md_getgeo, 4047 .media_changed = md_media_changed, 4048 .revalidate_disk= md_revalidate, 4049 }; 4050 4051 static int md_thread(void * arg) 4052 { 4053 mdk_thread_t *thread = arg; 4054 4055 /* 4056 * md_thread is a 'system-thread', it's priority should be very 4057 * high. We avoid resource deadlocks individually in each 4058 * raid personality. (RAID5 does preallocation) We also use RR and 4059 * the very same RT priority as kswapd, thus we will never get 4060 * into a priority inversion deadlock. 4061 * 4062 * we definitely have to have equal or higher priority than 4063 * bdflush, otherwise bdflush will deadlock if there are too 4064 * many dirty RAID5 blocks. 4065 */ 4066 4067 allow_signal(SIGKILL); 4068 while (!kthread_should_stop()) { 4069 4070 /* We need to wait INTERRUPTIBLE so that 4071 * we don't add to the load-average. 4072 * That means we need to be sure no signals are 4073 * pending 4074 */ 4075 if (signal_pending(current)) 4076 flush_signals(current); 4077 4078 wait_event_interruptible_timeout 4079 (thread->wqueue, 4080 test_bit(THREAD_WAKEUP, &thread->flags) 4081 || kthread_should_stop(), 4082 thread->timeout); 4083 try_to_freeze(); 4084 4085 clear_bit(THREAD_WAKEUP, &thread->flags); 4086 4087 thread->run(thread->mddev); 4088 } 4089 4090 return 0; 4091 } 4092 4093 void md_wakeup_thread(mdk_thread_t *thread) 4094 { 4095 if (thread) { 4096 dprintk("md: waking up MD thread %s.\n", thread->tsk->comm); 4097 set_bit(THREAD_WAKEUP, &thread->flags); 4098 wake_up(&thread->wqueue); 4099 } 4100 } 4101 4102 mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev, 4103 const char *name) 4104 { 4105 mdk_thread_t *thread; 4106 4107 thread = kzalloc(sizeof(mdk_thread_t), GFP_KERNEL); 4108 if (!thread) 4109 return NULL; 4110 4111 init_waitqueue_head(&thread->wqueue); 4112 4113 thread->run = run; 4114 thread->mddev = mddev; 4115 thread->timeout = MAX_SCHEDULE_TIMEOUT; 4116 thread->tsk = kthread_run(md_thread, thread, name, mdname(thread->mddev)); 4117 if (IS_ERR(thread->tsk)) { 4118 kfree(thread); 4119 return NULL; 4120 } 4121 return thread; 4122 } 4123 4124 void md_unregister_thread(mdk_thread_t *thread) 4125 { 4126 dprintk("interrupting MD-thread pid %d\n", thread->tsk->pid); 4127 4128 kthread_stop(thread->tsk); 4129 kfree(thread); 4130 } 4131 4132 void md_error(mddev_t *mddev, mdk_rdev_t *rdev) 4133 { 4134 if (!mddev) { 4135 MD_BUG(); 4136 return; 4137 } 4138 4139 if (!rdev || test_bit(Faulty, &rdev->flags)) 4140 return; 4141 /* 4142 dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n", 4143 mdname(mddev), 4144 MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev), 4145 __builtin_return_address(0),__builtin_return_address(1), 4146 __builtin_return_address(2),__builtin_return_address(3)); 4147 */ 4148 if (!mddev->pers->error_handler) 4149 return; 4150 mddev->pers->error_handler(mddev,rdev); 4151 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 4152 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4153 md_wakeup_thread(mddev->thread); 4154 md_new_event(mddev); 4155 } 4156 4157 /* seq_file implementation /proc/mdstat */ 4158 4159 static void status_unused(struct seq_file *seq) 4160 { 4161 int i = 0; 4162 mdk_rdev_t *rdev; 4163 struct list_head *tmp; 4164 4165 seq_printf(seq, "unused devices: "); 4166 4167 ITERATE_RDEV_PENDING(rdev,tmp) { 4168 char b[BDEVNAME_SIZE]; 4169 i++; 4170 seq_printf(seq, "%s ", 4171 bdevname(rdev->bdev,b)); 4172 } 4173 if (!i) 4174 seq_printf(seq, "<none>"); 4175 4176 seq_printf(seq, "\n"); 4177 } 4178 4179 4180 static void status_resync(struct seq_file *seq, mddev_t * mddev) 4181 { 4182 sector_t max_blocks, resync, res; 4183 unsigned long dt, db, rt; 4184 int scale; 4185 unsigned int per_milli; 4186 4187 resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active))/2; 4188 4189 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 4190 max_blocks = mddev->resync_max_sectors >> 1; 4191 else 4192 max_blocks = mddev->size; 4193 4194 /* 4195 * Should not happen. 4196 */ 4197 if (!max_blocks) { 4198 MD_BUG(); 4199 return; 4200 } 4201 /* Pick 'scale' such that (resync>>scale)*1000 will fit 4202 * in a sector_t, and (max_blocks>>scale) will fit in a 4203 * u32, as those are the requirements for sector_div. 4204 * Thus 'scale' must be at least 10 4205 */ 4206 scale = 10; 4207 if (sizeof(sector_t) > sizeof(unsigned long)) { 4208 while ( max_blocks/2 > (1ULL<<(scale+32))) 4209 scale++; 4210 } 4211 res = (resync>>scale)*1000; 4212 sector_div(res, (u32)((max_blocks>>scale)+1)); 4213 4214 per_milli = res; 4215 { 4216 int i, x = per_milli/50, y = 20-x; 4217 seq_printf(seq, "["); 4218 for (i = 0; i < x; i++) 4219 seq_printf(seq, "="); 4220 seq_printf(seq, ">"); 4221 for (i = 0; i < y; i++) 4222 seq_printf(seq, "."); 4223 seq_printf(seq, "] "); 4224 } 4225 seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)", 4226 (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)? 4227 "reshape" : 4228 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ? 4229 "resync" : "recovery")), 4230 per_milli/10, per_milli % 10, 4231 (unsigned long long) resync, 4232 (unsigned long long) max_blocks); 4233 4234 /* 4235 * We do not want to overflow, so the order of operands and 4236 * the * 100 / 100 trick are important. We do a +1 to be 4237 * safe against division by zero. We only estimate anyway. 4238 * 4239 * dt: time from mark until now 4240 * db: blocks written from mark until now 4241 * rt: remaining time 4242 */ 4243 dt = ((jiffies - mddev->resync_mark) / HZ); 4244 if (!dt) dt++; 4245 db = resync - (mddev->resync_mark_cnt/2); 4246 rt = (dt * ((unsigned long)(max_blocks-resync) / (db/100+1)))/100; 4247 4248 seq_printf(seq, " finish=%lu.%lumin", rt / 60, (rt % 60)/6); 4249 4250 seq_printf(seq, " speed=%ldK/sec", db/dt); 4251 } 4252 4253 static void *md_seq_start(struct seq_file *seq, loff_t *pos) 4254 { 4255 struct list_head *tmp; 4256 loff_t l = *pos; 4257 mddev_t *mddev; 4258 4259 if (l >= 0x10000) 4260 return NULL; 4261 if (!l--) 4262 /* header */ 4263 return (void*)1; 4264 4265 spin_lock(&all_mddevs_lock); 4266 list_for_each(tmp,&all_mddevs) 4267 if (!l--) { 4268 mddev = list_entry(tmp, mddev_t, all_mddevs); 4269 mddev_get(mddev); 4270 spin_unlock(&all_mddevs_lock); 4271 return mddev; 4272 } 4273 spin_unlock(&all_mddevs_lock); 4274 if (!l--) 4275 return (void*)2;/* tail */ 4276 return NULL; 4277 } 4278 4279 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos) 4280 { 4281 struct list_head *tmp; 4282 mddev_t *next_mddev, *mddev = v; 4283 4284 ++*pos; 4285 if (v == (void*)2) 4286 return NULL; 4287 4288 spin_lock(&all_mddevs_lock); 4289 if (v == (void*)1) 4290 tmp = all_mddevs.next; 4291 else 4292 tmp = mddev->all_mddevs.next; 4293 if (tmp != &all_mddevs) 4294 next_mddev = mddev_get(list_entry(tmp,mddev_t,all_mddevs)); 4295 else { 4296 next_mddev = (void*)2; 4297 *pos = 0x10000; 4298 } 4299 spin_unlock(&all_mddevs_lock); 4300 4301 if (v != (void*)1) 4302 mddev_put(mddev); 4303 return next_mddev; 4304 4305 } 4306 4307 static void md_seq_stop(struct seq_file *seq, void *v) 4308 { 4309 mddev_t *mddev = v; 4310 4311 if (mddev && v != (void*)1 && v != (void*)2) 4312 mddev_put(mddev); 4313 } 4314 4315 struct mdstat_info { 4316 int event; 4317 }; 4318 4319 static int md_seq_show(struct seq_file *seq, void *v) 4320 { 4321 mddev_t *mddev = v; 4322 sector_t size; 4323 struct list_head *tmp2; 4324 mdk_rdev_t *rdev; 4325 struct mdstat_info *mi = seq->private; 4326 struct bitmap *bitmap; 4327 4328 if (v == (void*)1) { 4329 struct mdk_personality *pers; 4330 seq_printf(seq, "Personalities : "); 4331 spin_lock(&pers_lock); 4332 list_for_each_entry(pers, &pers_list, list) 4333 seq_printf(seq, "[%s] ", pers->name); 4334 4335 spin_unlock(&pers_lock); 4336 seq_printf(seq, "\n"); 4337 mi->event = atomic_read(&md_event_count); 4338 return 0; 4339 } 4340 if (v == (void*)2) { 4341 status_unused(seq); 4342 return 0; 4343 } 4344 4345 if (mddev_lock(mddev)!=0) 4346 return -EINTR; 4347 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) { 4348 seq_printf(seq, "%s : %sactive", mdname(mddev), 4349 mddev->pers ? "" : "in"); 4350 if (mddev->pers) { 4351 if (mddev->ro==1) 4352 seq_printf(seq, " (read-only)"); 4353 if (mddev->ro==2) 4354 seq_printf(seq, "(auto-read-only)"); 4355 seq_printf(seq, " %s", mddev->pers->name); 4356 } 4357 4358 size = 0; 4359 ITERATE_RDEV(mddev,rdev,tmp2) { 4360 char b[BDEVNAME_SIZE]; 4361 seq_printf(seq, " %s[%d]", 4362 bdevname(rdev->bdev,b), rdev->desc_nr); 4363 if (test_bit(WriteMostly, &rdev->flags)) 4364 seq_printf(seq, "(W)"); 4365 if (test_bit(Faulty, &rdev->flags)) { 4366 seq_printf(seq, "(F)"); 4367 continue; 4368 } else if (rdev->raid_disk < 0) 4369 seq_printf(seq, "(S)"); /* spare */ 4370 size += rdev->size; 4371 } 4372 4373 if (!list_empty(&mddev->disks)) { 4374 if (mddev->pers) 4375 seq_printf(seq, "\n %llu blocks", 4376 (unsigned long long)mddev->array_size); 4377 else 4378 seq_printf(seq, "\n %llu blocks", 4379 (unsigned long long)size); 4380 } 4381 if (mddev->persistent) { 4382 if (mddev->major_version != 0 || 4383 mddev->minor_version != 90) { 4384 seq_printf(seq," super %d.%d", 4385 mddev->major_version, 4386 mddev->minor_version); 4387 } 4388 } else 4389 seq_printf(seq, " super non-persistent"); 4390 4391 if (mddev->pers) { 4392 mddev->pers->status (seq, mddev); 4393 seq_printf(seq, "\n "); 4394 if (mddev->pers->sync_request) { 4395 if (mddev->curr_resync > 2) { 4396 status_resync (seq, mddev); 4397 seq_printf(seq, "\n "); 4398 } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2) 4399 seq_printf(seq, "\tresync=DELAYED\n "); 4400 else if (mddev->recovery_cp < MaxSector) 4401 seq_printf(seq, "\tresync=PENDING\n "); 4402 } 4403 } else 4404 seq_printf(seq, "\n "); 4405 4406 if ((bitmap = mddev->bitmap)) { 4407 unsigned long chunk_kb; 4408 unsigned long flags; 4409 spin_lock_irqsave(&bitmap->lock, flags); 4410 chunk_kb = bitmap->chunksize >> 10; 4411 seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], " 4412 "%lu%s chunk", 4413 bitmap->pages - bitmap->missing_pages, 4414 bitmap->pages, 4415 (bitmap->pages - bitmap->missing_pages) 4416 << (PAGE_SHIFT - 10), 4417 chunk_kb ? chunk_kb : bitmap->chunksize, 4418 chunk_kb ? "KB" : "B"); 4419 if (bitmap->file) { 4420 seq_printf(seq, ", file: "); 4421 seq_path(seq, bitmap->file->f_vfsmnt, 4422 bitmap->file->f_dentry," \t\n"); 4423 } 4424 4425 seq_printf(seq, "\n"); 4426 spin_unlock_irqrestore(&bitmap->lock, flags); 4427 } 4428 4429 seq_printf(seq, "\n"); 4430 } 4431 mddev_unlock(mddev); 4432 4433 return 0; 4434 } 4435 4436 static struct seq_operations md_seq_ops = { 4437 .start = md_seq_start, 4438 .next = md_seq_next, 4439 .stop = md_seq_stop, 4440 .show = md_seq_show, 4441 }; 4442 4443 static int md_seq_open(struct inode *inode, struct file *file) 4444 { 4445 int error; 4446 struct mdstat_info *mi = kmalloc(sizeof(*mi), GFP_KERNEL); 4447 if (mi == NULL) 4448 return -ENOMEM; 4449 4450 error = seq_open(file, &md_seq_ops); 4451 if (error) 4452 kfree(mi); 4453 else { 4454 struct seq_file *p = file->private_data; 4455 p->private = mi; 4456 mi->event = atomic_read(&md_event_count); 4457 } 4458 return error; 4459 } 4460 4461 static int md_seq_release(struct inode *inode, struct file *file) 4462 { 4463 struct seq_file *m = file->private_data; 4464 struct mdstat_info *mi = m->private; 4465 m->private = NULL; 4466 kfree(mi); 4467 return seq_release(inode, file); 4468 } 4469 4470 static unsigned int mdstat_poll(struct file *filp, poll_table *wait) 4471 { 4472 struct seq_file *m = filp->private_data; 4473 struct mdstat_info *mi = m->private; 4474 int mask; 4475 4476 poll_wait(filp, &md_event_waiters, wait); 4477 4478 /* always allow read */ 4479 mask = POLLIN | POLLRDNORM; 4480 4481 if (mi->event != atomic_read(&md_event_count)) 4482 mask |= POLLERR | POLLPRI; 4483 return mask; 4484 } 4485 4486 static struct file_operations md_seq_fops = { 4487 .open = md_seq_open, 4488 .read = seq_read, 4489 .llseek = seq_lseek, 4490 .release = md_seq_release, 4491 .poll = mdstat_poll, 4492 }; 4493 4494 int register_md_personality(struct mdk_personality *p) 4495 { 4496 spin_lock(&pers_lock); 4497 list_add_tail(&p->list, &pers_list); 4498 printk(KERN_INFO "md: %s personality registered for level %d\n", p->name, p->level); 4499 spin_unlock(&pers_lock); 4500 return 0; 4501 } 4502 4503 int unregister_md_personality(struct mdk_personality *p) 4504 { 4505 printk(KERN_INFO "md: %s personality unregistered\n", p->name); 4506 spin_lock(&pers_lock); 4507 list_del_init(&p->list); 4508 spin_unlock(&pers_lock); 4509 return 0; 4510 } 4511 4512 static int is_mddev_idle(mddev_t *mddev) 4513 { 4514 mdk_rdev_t * rdev; 4515 struct list_head *tmp; 4516 int idle; 4517 unsigned long curr_events; 4518 4519 idle = 1; 4520 ITERATE_RDEV(mddev,rdev,tmp) { 4521 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk; 4522 curr_events = disk_stat_read(disk, sectors[0]) + 4523 disk_stat_read(disk, sectors[1]) - 4524 atomic_read(&disk->sync_io); 4525 /* The difference between curr_events and last_events 4526 * will be affected by any new non-sync IO (making 4527 * curr_events bigger) and any difference in the amount of 4528 * in-flight syncio (making current_events bigger or smaller) 4529 * The amount in-flight is currently limited to 4530 * 32*64K in raid1/10 and 256*PAGE_SIZE in raid5/6 4531 * which is at most 4096 sectors. 4532 * These numbers are fairly fragile and should be made 4533 * more robust, probably by enforcing the 4534 * 'window size' that md_do_sync sort-of uses. 4535 * 4536 * Note: the following is an unsigned comparison. 4537 */ 4538 if ((curr_events - rdev->last_events + 4096) > 8192) { 4539 rdev->last_events = curr_events; 4540 idle = 0; 4541 } 4542 } 4543 return idle; 4544 } 4545 4546 void md_done_sync(mddev_t *mddev, int blocks, int ok) 4547 { 4548 /* another "blocks" (512byte) blocks have been synced */ 4549 atomic_sub(blocks, &mddev->recovery_active); 4550 wake_up(&mddev->recovery_wait); 4551 if (!ok) { 4552 set_bit(MD_RECOVERY_ERR, &mddev->recovery); 4553 md_wakeup_thread(mddev->thread); 4554 // stop recovery, signal do_sync .... 4555 } 4556 } 4557 4558 4559 /* md_write_start(mddev, bi) 4560 * If we need to update some array metadata (e.g. 'active' flag 4561 * in superblock) before writing, schedule a superblock update 4562 * and wait for it to complete. 4563 */ 4564 void md_write_start(mddev_t *mddev, struct bio *bi) 4565 { 4566 if (bio_data_dir(bi) != WRITE) 4567 return; 4568 4569 BUG_ON(mddev->ro == 1); 4570 if (mddev->ro == 2) { 4571 /* need to switch to read/write */ 4572 mddev->ro = 0; 4573 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4574 md_wakeup_thread(mddev->thread); 4575 } 4576 atomic_inc(&mddev->writes_pending); 4577 if (mddev->in_sync) { 4578 spin_lock_irq(&mddev->write_lock); 4579 if (mddev->in_sync) { 4580 mddev->in_sync = 0; 4581 mddev->sb_dirty = 1; 4582 md_wakeup_thread(mddev->thread); 4583 } 4584 spin_unlock_irq(&mddev->write_lock); 4585 } 4586 wait_event(mddev->sb_wait, mddev->sb_dirty==0); 4587 } 4588 4589 void md_write_end(mddev_t *mddev) 4590 { 4591 if (atomic_dec_and_test(&mddev->writes_pending)) { 4592 if (mddev->safemode == 2) 4593 md_wakeup_thread(mddev->thread); 4594 else 4595 mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay); 4596 } 4597 } 4598 4599 static DECLARE_WAIT_QUEUE_HEAD(resync_wait); 4600 4601 #define SYNC_MARKS 10 4602 #define SYNC_MARK_STEP (3*HZ) 4603 void md_do_sync(mddev_t *mddev) 4604 { 4605 mddev_t *mddev2; 4606 unsigned int currspeed = 0, 4607 window; 4608 sector_t max_sectors,j, io_sectors; 4609 unsigned long mark[SYNC_MARKS]; 4610 sector_t mark_cnt[SYNC_MARKS]; 4611 int last_mark,m; 4612 struct list_head *tmp; 4613 sector_t last_check; 4614 int skipped = 0; 4615 4616 /* just incase thread restarts... */ 4617 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) 4618 return; 4619 4620 /* we overload curr_resync somewhat here. 4621 * 0 == not engaged in resync at all 4622 * 2 == checking that there is no conflict with another sync 4623 * 1 == like 2, but have yielded to allow conflicting resync to 4624 * commense 4625 * other == active in resync - this many blocks 4626 * 4627 * Before starting a resync we must have set curr_resync to 4628 * 2, and then checked that every "conflicting" array has curr_resync 4629 * less than ours. When we find one that is the same or higher 4630 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync 4631 * to 1 if we choose to yield (based arbitrarily on address of mddev structure). 4632 * This will mean we have to start checking from the beginning again. 4633 * 4634 */ 4635 4636 do { 4637 mddev->curr_resync = 2; 4638 4639 try_again: 4640 if (kthread_should_stop()) { 4641 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 4642 goto skip; 4643 } 4644 ITERATE_MDDEV(mddev2,tmp) { 4645 if (mddev2 == mddev) 4646 continue; 4647 if (mddev2->curr_resync && 4648 match_mddev_units(mddev,mddev2)) { 4649 DEFINE_WAIT(wq); 4650 if (mddev < mddev2 && mddev->curr_resync == 2) { 4651 /* arbitrarily yield */ 4652 mddev->curr_resync = 1; 4653 wake_up(&resync_wait); 4654 } 4655 if (mddev > mddev2 && mddev->curr_resync == 1) 4656 /* no need to wait here, we can wait the next 4657 * time 'round when curr_resync == 2 4658 */ 4659 continue; 4660 prepare_to_wait(&resync_wait, &wq, TASK_UNINTERRUPTIBLE); 4661 if (!kthread_should_stop() && 4662 mddev2->curr_resync >= mddev->curr_resync) { 4663 printk(KERN_INFO "md: delaying resync of %s" 4664 " until %s has finished resync (they" 4665 " share one or more physical units)\n", 4666 mdname(mddev), mdname(mddev2)); 4667 mddev_put(mddev2); 4668 schedule(); 4669 finish_wait(&resync_wait, &wq); 4670 goto try_again; 4671 } 4672 finish_wait(&resync_wait, &wq); 4673 } 4674 } 4675 } while (mddev->curr_resync < 2); 4676 4677 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 4678 /* resync follows the size requested by the personality, 4679 * which defaults to physical size, but can be virtual size 4680 */ 4681 max_sectors = mddev->resync_max_sectors; 4682 mddev->resync_mismatches = 0; 4683 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 4684 max_sectors = mddev->size << 1; 4685 else 4686 /* recovery follows the physical size of devices */ 4687 max_sectors = mddev->size << 1; 4688 4689 printk(KERN_INFO "md: syncing RAID array %s\n", mdname(mddev)); 4690 printk(KERN_INFO "md: minimum _guaranteed_ reconstruction speed:" 4691 " %d KB/sec/disc.\n", speed_min(mddev)); 4692 printk(KERN_INFO "md: using maximum available idle IO bandwidth " 4693 "(but not more than %d KB/sec) for reconstruction.\n", 4694 speed_max(mddev)); 4695 4696 is_mddev_idle(mddev); /* this also initializes IO event counters */ 4697 /* we don't use the checkpoint if there's a bitmap */ 4698 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && !mddev->bitmap 4699 && ! test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 4700 j = mddev->recovery_cp; 4701 else 4702 j = 0; 4703 io_sectors = 0; 4704 for (m = 0; m < SYNC_MARKS; m++) { 4705 mark[m] = jiffies; 4706 mark_cnt[m] = io_sectors; 4707 } 4708 last_mark = 0; 4709 mddev->resync_mark = mark[last_mark]; 4710 mddev->resync_mark_cnt = mark_cnt[last_mark]; 4711 4712 /* 4713 * Tune reconstruction: 4714 */ 4715 window = 32*(PAGE_SIZE/512); 4716 printk(KERN_INFO "md: using %dk window, over a total of %llu blocks.\n", 4717 window/2,(unsigned long long) max_sectors/2); 4718 4719 atomic_set(&mddev->recovery_active, 0); 4720 init_waitqueue_head(&mddev->recovery_wait); 4721 last_check = 0; 4722 4723 if (j>2) { 4724 printk(KERN_INFO 4725 "md: resuming recovery of %s from checkpoint.\n", 4726 mdname(mddev)); 4727 mddev->curr_resync = j; 4728 } 4729 4730 while (j < max_sectors) { 4731 sector_t sectors; 4732 4733 skipped = 0; 4734 sectors = mddev->pers->sync_request(mddev, j, &skipped, 4735 currspeed < speed_min(mddev)); 4736 if (sectors == 0) { 4737 set_bit(MD_RECOVERY_ERR, &mddev->recovery); 4738 goto out; 4739 } 4740 4741 if (!skipped) { /* actual IO requested */ 4742 io_sectors += sectors; 4743 atomic_add(sectors, &mddev->recovery_active); 4744 } 4745 4746 j += sectors; 4747 if (j>1) mddev->curr_resync = j; 4748 if (last_check == 0) 4749 /* this is the earliers that rebuilt will be 4750 * visible in /proc/mdstat 4751 */ 4752 md_new_event(mddev); 4753 4754 if (last_check + window > io_sectors || j == max_sectors) 4755 continue; 4756 4757 last_check = io_sectors; 4758 4759 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery) || 4760 test_bit(MD_RECOVERY_ERR, &mddev->recovery)) 4761 break; 4762 4763 repeat: 4764 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) { 4765 /* step marks */ 4766 int next = (last_mark+1) % SYNC_MARKS; 4767 4768 mddev->resync_mark = mark[next]; 4769 mddev->resync_mark_cnt = mark_cnt[next]; 4770 mark[next] = jiffies; 4771 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active); 4772 last_mark = next; 4773 } 4774 4775 4776 if (kthread_should_stop()) { 4777 /* 4778 * got a signal, exit. 4779 */ 4780 printk(KERN_INFO 4781 "md: md_do_sync() got signal ... exiting\n"); 4782 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 4783 goto out; 4784 } 4785 4786 /* 4787 * this loop exits only if either when we are slower than 4788 * the 'hard' speed limit, or the system was IO-idle for 4789 * a jiffy. 4790 * the system might be non-idle CPU-wise, but we only care 4791 * about not overloading the IO subsystem. (things like an 4792 * e2fsck being done on the RAID array should execute fast) 4793 */ 4794 mddev->queue->unplug_fn(mddev->queue); 4795 cond_resched(); 4796 4797 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2 4798 /((jiffies-mddev->resync_mark)/HZ +1) +1; 4799 4800 if (currspeed > speed_min(mddev)) { 4801 if ((currspeed > speed_max(mddev)) || 4802 !is_mddev_idle(mddev)) { 4803 msleep(500); 4804 goto repeat; 4805 } 4806 } 4807 } 4808 printk(KERN_INFO "md: %s: sync done.\n",mdname(mddev)); 4809 /* 4810 * this also signals 'finished resyncing' to md_stop 4811 */ 4812 out: 4813 mddev->queue->unplug_fn(mddev->queue); 4814 4815 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); 4816 4817 /* tell personality that we are finished */ 4818 mddev->pers->sync_request(mddev, max_sectors, &skipped, 1); 4819 4820 if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) && 4821 test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && 4822 !test_bit(MD_RECOVERY_CHECK, &mddev->recovery) && 4823 mddev->curr_resync > 2 && 4824 mddev->curr_resync >= mddev->recovery_cp) { 4825 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 4826 printk(KERN_INFO 4827 "md: checkpointing recovery of %s.\n", 4828 mdname(mddev)); 4829 mddev->recovery_cp = mddev->curr_resync; 4830 } else 4831 mddev->recovery_cp = MaxSector; 4832 } 4833 4834 skip: 4835 mddev->curr_resync = 0; 4836 wake_up(&resync_wait); 4837 set_bit(MD_RECOVERY_DONE, &mddev->recovery); 4838 md_wakeup_thread(mddev->thread); 4839 } 4840 EXPORT_SYMBOL_GPL(md_do_sync); 4841 4842 4843 /* 4844 * This routine is regularly called by all per-raid-array threads to 4845 * deal with generic issues like resync and super-block update. 4846 * Raid personalities that don't have a thread (linear/raid0) do not 4847 * need this as they never do any recovery or update the superblock. 4848 * 4849 * It does not do any resync itself, but rather "forks" off other threads 4850 * to do that as needed. 4851 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in 4852 * "->recovery" and create a thread at ->sync_thread. 4853 * When the thread finishes it sets MD_RECOVERY_DONE (and might set MD_RECOVERY_ERR) 4854 * and wakeups up this thread which will reap the thread and finish up. 4855 * This thread also removes any faulty devices (with nr_pending == 0). 4856 * 4857 * The overall approach is: 4858 * 1/ if the superblock needs updating, update it. 4859 * 2/ If a recovery thread is running, don't do anything else. 4860 * 3/ If recovery has finished, clean up, possibly marking spares active. 4861 * 4/ If there are any faulty devices, remove them. 4862 * 5/ If array is degraded, try to add spares devices 4863 * 6/ If array has spares or is not in-sync, start a resync thread. 4864 */ 4865 void md_check_recovery(mddev_t *mddev) 4866 { 4867 mdk_rdev_t *rdev; 4868 struct list_head *rtmp; 4869 4870 4871 if (mddev->bitmap) 4872 bitmap_daemon_work(mddev->bitmap); 4873 4874 if (mddev->ro) 4875 return; 4876 4877 if (signal_pending(current)) { 4878 if (mddev->pers->sync_request) { 4879 printk(KERN_INFO "md: %s in immediate safe mode\n", 4880 mdname(mddev)); 4881 mddev->safemode = 2; 4882 } 4883 flush_signals(current); 4884 } 4885 4886 if ( ! ( 4887 mddev->sb_dirty || 4888 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || 4889 test_bit(MD_RECOVERY_DONE, &mddev->recovery) || 4890 (mddev->safemode == 1) || 4891 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending) 4892 && !mddev->in_sync && mddev->recovery_cp == MaxSector) 4893 )) 4894 return; 4895 4896 if (mddev_trylock(mddev)) { 4897 int spares =0; 4898 4899 spin_lock_irq(&mddev->write_lock); 4900 if (mddev->safemode && !atomic_read(&mddev->writes_pending) && 4901 !mddev->in_sync && mddev->recovery_cp == MaxSector) { 4902 mddev->in_sync = 1; 4903 mddev->sb_dirty = 1; 4904 } 4905 if (mddev->safemode == 1) 4906 mddev->safemode = 0; 4907 spin_unlock_irq(&mddev->write_lock); 4908 4909 if (mddev->sb_dirty) 4910 md_update_sb(mddev); 4911 4912 4913 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && 4914 !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) { 4915 /* resync/recovery still happening */ 4916 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4917 goto unlock; 4918 } 4919 if (mddev->sync_thread) { 4920 /* resync has finished, collect result */ 4921 md_unregister_thread(mddev->sync_thread); 4922 mddev->sync_thread = NULL; 4923 if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) && 4924 !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 4925 /* success...*/ 4926 /* activate any spares */ 4927 mddev->pers->spare_active(mddev); 4928 } 4929 md_update_sb(mddev); 4930 4931 /* if array is no-longer degraded, then any saved_raid_disk 4932 * information must be scrapped 4933 */ 4934 if (!mddev->degraded) 4935 ITERATE_RDEV(mddev,rdev,rtmp) 4936 rdev->saved_raid_disk = -1; 4937 4938 mddev->recovery = 0; 4939 /* flag recovery needed just to double check */ 4940 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4941 md_new_event(mddev); 4942 goto unlock; 4943 } 4944 /* Clear some bits that don't mean anything, but 4945 * might be left set 4946 */ 4947 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4948 clear_bit(MD_RECOVERY_ERR, &mddev->recovery); 4949 clear_bit(MD_RECOVERY_INTR, &mddev->recovery); 4950 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); 4951 4952 /* no recovery is running. 4953 * remove any failed drives, then 4954 * add spares if possible. 4955 * Spare are also removed and re-added, to allow 4956 * the personality to fail the re-add. 4957 */ 4958 ITERATE_RDEV(mddev,rdev,rtmp) 4959 if (rdev->raid_disk >= 0 && 4960 (test_bit(Faulty, &rdev->flags) || ! test_bit(In_sync, &rdev->flags)) && 4961 atomic_read(&rdev->nr_pending)==0) { 4962 if (mddev->pers->hot_remove_disk(mddev, rdev->raid_disk)==0) { 4963 char nm[20]; 4964 sprintf(nm,"rd%d", rdev->raid_disk); 4965 sysfs_remove_link(&mddev->kobj, nm); 4966 rdev->raid_disk = -1; 4967 } 4968 } 4969 4970 if (mddev->degraded) { 4971 ITERATE_RDEV(mddev,rdev,rtmp) 4972 if (rdev->raid_disk < 0 4973 && !test_bit(Faulty, &rdev->flags)) { 4974 if (mddev->pers->hot_add_disk(mddev,rdev)) { 4975 char nm[20]; 4976 sprintf(nm, "rd%d", rdev->raid_disk); 4977 sysfs_create_link(&mddev->kobj, &rdev->kobj, nm); 4978 spares++; 4979 md_new_event(mddev); 4980 } else 4981 break; 4982 } 4983 } 4984 4985 if (spares) { 4986 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 4987 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 4988 } else if (mddev->recovery_cp < MaxSector) { 4989 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 4990 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 4991 /* nothing to be done ... */ 4992 goto unlock; 4993 4994 if (mddev->pers->sync_request) { 4995 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 4996 if (spares && mddev->bitmap && ! mddev->bitmap->file) { 4997 /* We are adding a device or devices to an array 4998 * which has the bitmap stored on all devices. 4999 * So make sure all bitmap pages get written 5000 */ 5001 bitmap_write_all(mddev->bitmap); 5002 } 5003 mddev->sync_thread = md_register_thread(md_do_sync, 5004 mddev, 5005 "%s_resync"); 5006 if (!mddev->sync_thread) { 5007 printk(KERN_ERR "%s: could not start resync" 5008 " thread...\n", 5009 mdname(mddev)); 5010 /* leave the spares where they are, it shouldn't hurt */ 5011 mddev->recovery = 0; 5012 } else 5013 md_wakeup_thread(mddev->sync_thread); 5014 md_new_event(mddev); 5015 } 5016 unlock: 5017 mddev_unlock(mddev); 5018 } 5019 } 5020 5021 static int md_notify_reboot(struct notifier_block *this, 5022 unsigned long code, void *x) 5023 { 5024 struct list_head *tmp; 5025 mddev_t *mddev; 5026 5027 if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) { 5028 5029 printk(KERN_INFO "md: stopping all md devices.\n"); 5030 5031 ITERATE_MDDEV(mddev,tmp) 5032 if (mddev_trylock(mddev)) 5033 do_md_stop (mddev, 1); 5034 /* 5035 * certain more exotic SCSI devices are known to be 5036 * volatile wrt too early system reboots. While the 5037 * right place to handle this issue is the given 5038 * driver, we do want to have a safe RAID driver ... 5039 */ 5040 mdelay(1000*1); 5041 } 5042 return NOTIFY_DONE; 5043 } 5044 5045 static struct notifier_block md_notifier = { 5046 .notifier_call = md_notify_reboot, 5047 .next = NULL, 5048 .priority = INT_MAX, /* before any real devices */ 5049 }; 5050 5051 static void md_geninit(void) 5052 { 5053 struct proc_dir_entry *p; 5054 5055 dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t)); 5056 5057 p = create_proc_entry("mdstat", S_IRUGO, NULL); 5058 if (p) 5059 p->proc_fops = &md_seq_fops; 5060 } 5061 5062 static int __init md_init(void) 5063 { 5064 int minor; 5065 5066 printk(KERN_INFO "md: md driver %d.%d.%d MAX_MD_DEVS=%d," 5067 " MD_SB_DISKS=%d\n", 5068 MD_MAJOR_VERSION, MD_MINOR_VERSION, 5069 MD_PATCHLEVEL_VERSION, MAX_MD_DEVS, MD_SB_DISKS); 5070 printk(KERN_INFO "md: bitmap version %d.%d\n", BITMAP_MAJOR_HI, 5071 BITMAP_MINOR); 5072 5073 if (register_blkdev(MAJOR_NR, "md")) 5074 return -1; 5075 if ((mdp_major=register_blkdev(0, "mdp"))<=0) { 5076 unregister_blkdev(MAJOR_NR, "md"); 5077 return -1; 5078 } 5079 devfs_mk_dir("md"); 5080 blk_register_region(MKDEV(MAJOR_NR, 0), MAX_MD_DEVS, THIS_MODULE, 5081 md_probe, NULL, NULL); 5082 blk_register_region(MKDEV(mdp_major, 0), MAX_MD_DEVS<<MdpMinorShift, THIS_MODULE, 5083 md_probe, NULL, NULL); 5084 5085 for (minor=0; minor < MAX_MD_DEVS; ++minor) 5086 devfs_mk_bdev(MKDEV(MAJOR_NR, minor), 5087 S_IFBLK|S_IRUSR|S_IWUSR, 5088 "md/%d", minor); 5089 5090 for (minor=0; minor < MAX_MD_DEVS; ++minor) 5091 devfs_mk_bdev(MKDEV(mdp_major, minor<<MdpMinorShift), 5092 S_IFBLK|S_IRUSR|S_IWUSR, 5093 "md/mdp%d", minor); 5094 5095 5096 register_reboot_notifier(&md_notifier); 5097 raid_table_header = register_sysctl_table(raid_root_table, 1); 5098 5099 md_geninit(); 5100 return (0); 5101 } 5102 5103 5104 #ifndef MODULE 5105 5106 /* 5107 * Searches all registered partitions for autorun RAID arrays 5108 * at boot time. 5109 */ 5110 static dev_t detected_devices[128]; 5111 static int dev_cnt; 5112 5113 void md_autodetect_dev(dev_t dev) 5114 { 5115 if (dev_cnt >= 0 && dev_cnt < 127) 5116 detected_devices[dev_cnt++] = dev; 5117 } 5118 5119 5120 static void autostart_arrays(int part) 5121 { 5122 mdk_rdev_t *rdev; 5123 int i; 5124 5125 printk(KERN_INFO "md: Autodetecting RAID arrays.\n"); 5126 5127 for (i = 0; i < dev_cnt; i++) { 5128 dev_t dev = detected_devices[i]; 5129 5130 rdev = md_import_device(dev,0, 0); 5131 if (IS_ERR(rdev)) 5132 continue; 5133 5134 if (test_bit(Faulty, &rdev->flags)) { 5135 MD_BUG(); 5136 continue; 5137 } 5138 list_add(&rdev->same_set, &pending_raid_disks); 5139 } 5140 dev_cnt = 0; 5141 5142 autorun_devices(part); 5143 } 5144 5145 #endif 5146 5147 static __exit void md_exit(void) 5148 { 5149 mddev_t *mddev; 5150 struct list_head *tmp; 5151 int i; 5152 blk_unregister_region(MKDEV(MAJOR_NR,0), MAX_MD_DEVS); 5153 blk_unregister_region(MKDEV(mdp_major,0), MAX_MD_DEVS << MdpMinorShift); 5154 for (i=0; i < MAX_MD_DEVS; i++) 5155 devfs_remove("md/%d", i); 5156 for (i=0; i < MAX_MD_DEVS; i++) 5157 devfs_remove("md/d%d", i); 5158 5159 devfs_remove("md"); 5160 5161 unregister_blkdev(MAJOR_NR,"md"); 5162 unregister_blkdev(mdp_major, "mdp"); 5163 unregister_reboot_notifier(&md_notifier); 5164 unregister_sysctl_table(raid_table_header); 5165 remove_proc_entry("mdstat", NULL); 5166 ITERATE_MDDEV(mddev,tmp) { 5167 struct gendisk *disk = mddev->gendisk; 5168 if (!disk) 5169 continue; 5170 export_array(mddev); 5171 del_gendisk(disk); 5172 put_disk(disk); 5173 mddev->gendisk = NULL; 5174 mddev_put(mddev); 5175 } 5176 } 5177 5178 module_init(md_init) 5179 module_exit(md_exit) 5180 5181 static int get_ro(char *buffer, struct kernel_param *kp) 5182 { 5183 return sprintf(buffer, "%d", start_readonly); 5184 } 5185 static int set_ro(const char *val, struct kernel_param *kp) 5186 { 5187 char *e; 5188 int num = simple_strtoul(val, &e, 10); 5189 if (*val && (*e == '\0' || *e == '\n')) { 5190 start_readonly = num; 5191 return 0; 5192 } 5193 return -EINVAL; 5194 } 5195 5196 module_param_call(start_ro, set_ro, get_ro, NULL, 0600); 5197 module_param(start_dirty_degraded, int, 0644); 5198 5199 5200 EXPORT_SYMBOL(register_md_personality); 5201 EXPORT_SYMBOL(unregister_md_personality); 5202 EXPORT_SYMBOL(md_error); 5203 EXPORT_SYMBOL(md_done_sync); 5204 EXPORT_SYMBOL(md_write_start); 5205 EXPORT_SYMBOL(md_write_end); 5206 EXPORT_SYMBOL(md_register_thread); 5207 EXPORT_SYMBOL(md_unregister_thread); 5208 EXPORT_SYMBOL(md_wakeup_thread); 5209 EXPORT_SYMBOL(md_print_devices); 5210 EXPORT_SYMBOL(md_check_recovery); 5211 MODULE_LICENSE("GPL"); 5212 MODULE_ALIAS("md"); 5213 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR); 5214