1 /* 2 md.c : Multiple Devices driver for Linux 3 Copyright (C) 1998, 1999, 2000 Ingo Molnar 4 5 completely rewritten, based on the MD driver code from Marc Zyngier 6 7 Changes: 8 9 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar 10 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com> 11 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net> 12 - kerneld support by Boris Tobotras <boris@xtalk.msk.su> 13 - kmod support by: Cyrus Durgin 14 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com> 15 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au> 16 17 - lots of fixes and improvements to the RAID1/RAID5 and generic 18 RAID code (such as request based resynchronization): 19 20 Neil Brown <neilb@cse.unsw.edu.au>. 21 22 - persistent bitmap code 23 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc. 24 25 This program is free software; you can redistribute it and/or modify 26 it under the terms of the GNU General Public License as published by 27 the Free Software Foundation; either version 2, or (at your option) 28 any later version. 29 30 You should have received a copy of the GNU General Public License 31 (for example /usr/src/linux/COPYING); if not, write to the Free 32 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 33 */ 34 35 #include <linux/module.h> 36 #include <linux/config.h> 37 #include <linux/kthread.h> 38 #include <linux/linkage.h> 39 #include <linux/raid/md.h> 40 #include <linux/raid/bitmap.h> 41 #include <linux/sysctl.h> 42 #include <linux/devfs_fs_kernel.h> 43 #include <linux/buffer_head.h> /* for invalidate_bdev */ 44 #include <linux/suspend.h> 45 #include <linux/poll.h> 46 #include <linux/mutex.h> 47 48 #include <linux/init.h> 49 50 #include <linux/file.h> 51 52 #ifdef CONFIG_KMOD 53 #include <linux/kmod.h> 54 #endif 55 56 #include <asm/unaligned.h> 57 58 #define MAJOR_NR MD_MAJOR 59 #define MD_DRIVER 60 61 /* 63 partitions with the alternate major number (mdp) */ 62 #define MdpMinorShift 6 63 64 #define DEBUG 0 65 #define dprintk(x...) ((void)(DEBUG && printk(x))) 66 67 68 #ifndef MODULE 69 static void autostart_arrays (int part); 70 #endif 71 72 static LIST_HEAD(pers_list); 73 static DEFINE_SPINLOCK(pers_lock); 74 75 /* 76 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit' 77 * is 1000 KB/sec, so the extra system load does not show up that much. 78 * Increase it if you want to have more _guaranteed_ speed. Note that 79 * the RAID driver will use the maximum available bandwidth if the IO 80 * subsystem is idle. There is also an 'absolute maximum' reconstruction 81 * speed limit - in case reconstruction slows down your system despite 82 * idle IO detection. 83 * 84 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max. 85 * or /sys/block/mdX/md/sync_speed_{min,max} 86 */ 87 88 static int sysctl_speed_limit_min = 1000; 89 static int sysctl_speed_limit_max = 200000; 90 static inline int speed_min(mddev_t *mddev) 91 { 92 return mddev->sync_speed_min ? 93 mddev->sync_speed_min : sysctl_speed_limit_min; 94 } 95 96 static inline int speed_max(mddev_t *mddev) 97 { 98 return mddev->sync_speed_max ? 99 mddev->sync_speed_max : sysctl_speed_limit_max; 100 } 101 102 static struct ctl_table_header *raid_table_header; 103 104 static ctl_table raid_table[] = { 105 { 106 .ctl_name = DEV_RAID_SPEED_LIMIT_MIN, 107 .procname = "speed_limit_min", 108 .data = &sysctl_speed_limit_min, 109 .maxlen = sizeof(int), 110 .mode = 0644, 111 .proc_handler = &proc_dointvec, 112 }, 113 { 114 .ctl_name = DEV_RAID_SPEED_LIMIT_MAX, 115 .procname = "speed_limit_max", 116 .data = &sysctl_speed_limit_max, 117 .maxlen = sizeof(int), 118 .mode = 0644, 119 .proc_handler = &proc_dointvec, 120 }, 121 { .ctl_name = 0 } 122 }; 123 124 static ctl_table raid_dir_table[] = { 125 { 126 .ctl_name = DEV_RAID, 127 .procname = "raid", 128 .maxlen = 0, 129 .mode = 0555, 130 .child = raid_table, 131 }, 132 { .ctl_name = 0 } 133 }; 134 135 static ctl_table raid_root_table[] = { 136 { 137 .ctl_name = CTL_DEV, 138 .procname = "dev", 139 .maxlen = 0, 140 .mode = 0555, 141 .child = raid_dir_table, 142 }, 143 { .ctl_name = 0 } 144 }; 145 146 static struct block_device_operations md_fops; 147 148 static int start_readonly; 149 150 /* 151 * We have a system wide 'event count' that is incremented 152 * on any 'interesting' event, and readers of /proc/mdstat 153 * can use 'poll' or 'select' to find out when the event 154 * count increases. 155 * 156 * Events are: 157 * start array, stop array, error, add device, remove device, 158 * start build, activate spare 159 */ 160 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters); 161 static atomic_t md_event_count; 162 void md_new_event(mddev_t *mddev) 163 { 164 atomic_inc(&md_event_count); 165 wake_up(&md_event_waiters); 166 sysfs_notify(&mddev->kobj, NULL, "sync_action"); 167 } 168 EXPORT_SYMBOL_GPL(md_new_event); 169 170 /* 171 * Enables to iterate over all existing md arrays 172 * all_mddevs_lock protects this list. 173 */ 174 static LIST_HEAD(all_mddevs); 175 static DEFINE_SPINLOCK(all_mddevs_lock); 176 177 178 /* 179 * iterates through all used mddevs in the system. 180 * We take care to grab the all_mddevs_lock whenever navigating 181 * the list, and to always hold a refcount when unlocked. 182 * Any code which breaks out of this loop while own 183 * a reference to the current mddev and must mddev_put it. 184 */ 185 #define ITERATE_MDDEV(mddev,tmp) \ 186 \ 187 for (({ spin_lock(&all_mddevs_lock); \ 188 tmp = all_mddevs.next; \ 189 mddev = NULL;}); \ 190 ({ if (tmp != &all_mddevs) \ 191 mddev_get(list_entry(tmp, mddev_t, all_mddevs));\ 192 spin_unlock(&all_mddevs_lock); \ 193 if (mddev) mddev_put(mddev); \ 194 mddev = list_entry(tmp, mddev_t, all_mddevs); \ 195 tmp != &all_mddevs;}); \ 196 ({ spin_lock(&all_mddevs_lock); \ 197 tmp = tmp->next;}) \ 198 ) 199 200 201 static int md_fail_request (request_queue_t *q, struct bio *bio) 202 { 203 bio_io_error(bio, bio->bi_size); 204 return 0; 205 } 206 207 static inline mddev_t *mddev_get(mddev_t *mddev) 208 { 209 atomic_inc(&mddev->active); 210 return mddev; 211 } 212 213 static void mddev_put(mddev_t *mddev) 214 { 215 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock)) 216 return; 217 if (!mddev->raid_disks && list_empty(&mddev->disks)) { 218 list_del(&mddev->all_mddevs); 219 spin_unlock(&all_mddevs_lock); 220 blk_cleanup_queue(mddev->queue); 221 kobject_unregister(&mddev->kobj); 222 } else 223 spin_unlock(&all_mddevs_lock); 224 } 225 226 static mddev_t * mddev_find(dev_t unit) 227 { 228 mddev_t *mddev, *new = NULL; 229 230 retry: 231 spin_lock(&all_mddevs_lock); 232 list_for_each_entry(mddev, &all_mddevs, all_mddevs) 233 if (mddev->unit == unit) { 234 mddev_get(mddev); 235 spin_unlock(&all_mddevs_lock); 236 kfree(new); 237 return mddev; 238 } 239 240 if (new) { 241 list_add(&new->all_mddevs, &all_mddevs); 242 spin_unlock(&all_mddevs_lock); 243 return new; 244 } 245 spin_unlock(&all_mddevs_lock); 246 247 new = kzalloc(sizeof(*new), GFP_KERNEL); 248 if (!new) 249 return NULL; 250 251 new->unit = unit; 252 if (MAJOR(unit) == MD_MAJOR) 253 new->md_minor = MINOR(unit); 254 else 255 new->md_minor = MINOR(unit) >> MdpMinorShift; 256 257 mutex_init(&new->reconfig_mutex); 258 INIT_LIST_HEAD(&new->disks); 259 INIT_LIST_HEAD(&new->all_mddevs); 260 init_timer(&new->safemode_timer); 261 atomic_set(&new->active, 1); 262 spin_lock_init(&new->write_lock); 263 init_waitqueue_head(&new->sb_wait); 264 265 new->queue = blk_alloc_queue(GFP_KERNEL); 266 if (!new->queue) { 267 kfree(new); 268 return NULL; 269 } 270 set_bit(QUEUE_FLAG_CLUSTER, &new->queue->queue_flags); 271 272 blk_queue_make_request(new->queue, md_fail_request); 273 274 goto retry; 275 } 276 277 static inline int mddev_lock(mddev_t * mddev) 278 { 279 return mutex_lock_interruptible(&mddev->reconfig_mutex); 280 } 281 282 static inline int mddev_trylock(mddev_t * mddev) 283 { 284 return mutex_trylock(&mddev->reconfig_mutex); 285 } 286 287 static inline void mddev_unlock(mddev_t * mddev) 288 { 289 mutex_unlock(&mddev->reconfig_mutex); 290 291 md_wakeup_thread(mddev->thread); 292 } 293 294 static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr) 295 { 296 mdk_rdev_t * rdev; 297 struct list_head *tmp; 298 299 ITERATE_RDEV(mddev,rdev,tmp) { 300 if (rdev->desc_nr == nr) 301 return rdev; 302 } 303 return NULL; 304 } 305 306 static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev) 307 { 308 struct list_head *tmp; 309 mdk_rdev_t *rdev; 310 311 ITERATE_RDEV(mddev,rdev,tmp) { 312 if (rdev->bdev->bd_dev == dev) 313 return rdev; 314 } 315 return NULL; 316 } 317 318 static struct mdk_personality *find_pers(int level, char *clevel) 319 { 320 struct mdk_personality *pers; 321 list_for_each_entry(pers, &pers_list, list) { 322 if (level != LEVEL_NONE && pers->level == level) 323 return pers; 324 if (strcmp(pers->name, clevel)==0) 325 return pers; 326 } 327 return NULL; 328 } 329 330 static inline sector_t calc_dev_sboffset(struct block_device *bdev) 331 { 332 sector_t size = bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; 333 return MD_NEW_SIZE_BLOCKS(size); 334 } 335 336 static sector_t calc_dev_size(mdk_rdev_t *rdev, unsigned chunk_size) 337 { 338 sector_t size; 339 340 size = rdev->sb_offset; 341 342 if (chunk_size) 343 size &= ~((sector_t)chunk_size/1024 - 1); 344 return size; 345 } 346 347 static int alloc_disk_sb(mdk_rdev_t * rdev) 348 { 349 if (rdev->sb_page) 350 MD_BUG(); 351 352 rdev->sb_page = alloc_page(GFP_KERNEL); 353 if (!rdev->sb_page) { 354 printk(KERN_ALERT "md: out of memory.\n"); 355 return -EINVAL; 356 } 357 358 return 0; 359 } 360 361 static void free_disk_sb(mdk_rdev_t * rdev) 362 { 363 if (rdev->sb_page) { 364 put_page(rdev->sb_page); 365 rdev->sb_loaded = 0; 366 rdev->sb_page = NULL; 367 rdev->sb_offset = 0; 368 rdev->size = 0; 369 } 370 } 371 372 373 static int super_written(struct bio *bio, unsigned int bytes_done, int error) 374 { 375 mdk_rdev_t *rdev = bio->bi_private; 376 mddev_t *mddev = rdev->mddev; 377 if (bio->bi_size) 378 return 1; 379 380 if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) 381 md_error(mddev, rdev); 382 383 if (atomic_dec_and_test(&mddev->pending_writes)) 384 wake_up(&mddev->sb_wait); 385 bio_put(bio); 386 return 0; 387 } 388 389 static int super_written_barrier(struct bio *bio, unsigned int bytes_done, int error) 390 { 391 struct bio *bio2 = bio->bi_private; 392 mdk_rdev_t *rdev = bio2->bi_private; 393 mddev_t *mddev = rdev->mddev; 394 if (bio->bi_size) 395 return 1; 396 397 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && 398 error == -EOPNOTSUPP) { 399 unsigned long flags; 400 /* barriers don't appear to be supported :-( */ 401 set_bit(BarriersNotsupp, &rdev->flags); 402 mddev->barriers_work = 0; 403 spin_lock_irqsave(&mddev->write_lock, flags); 404 bio2->bi_next = mddev->biolist; 405 mddev->biolist = bio2; 406 spin_unlock_irqrestore(&mddev->write_lock, flags); 407 wake_up(&mddev->sb_wait); 408 bio_put(bio); 409 return 0; 410 } 411 bio_put(bio2); 412 bio->bi_private = rdev; 413 return super_written(bio, bytes_done, error); 414 } 415 416 void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, 417 sector_t sector, int size, struct page *page) 418 { 419 /* write first size bytes of page to sector of rdev 420 * Increment mddev->pending_writes before returning 421 * and decrement it on completion, waking up sb_wait 422 * if zero is reached. 423 * If an error occurred, call md_error 424 * 425 * As we might need to resubmit the request if BIO_RW_BARRIER 426 * causes ENOTSUPP, we allocate a spare bio... 427 */ 428 struct bio *bio = bio_alloc(GFP_NOIO, 1); 429 int rw = (1<<BIO_RW) | (1<<BIO_RW_SYNC); 430 431 bio->bi_bdev = rdev->bdev; 432 bio->bi_sector = sector; 433 bio_add_page(bio, page, size, 0); 434 bio->bi_private = rdev; 435 bio->bi_end_io = super_written; 436 bio->bi_rw = rw; 437 438 atomic_inc(&mddev->pending_writes); 439 if (!test_bit(BarriersNotsupp, &rdev->flags)) { 440 struct bio *rbio; 441 rw |= (1<<BIO_RW_BARRIER); 442 rbio = bio_clone(bio, GFP_NOIO); 443 rbio->bi_private = bio; 444 rbio->bi_end_io = super_written_barrier; 445 submit_bio(rw, rbio); 446 } else 447 submit_bio(rw, bio); 448 } 449 450 void md_super_wait(mddev_t *mddev) 451 { 452 /* wait for all superblock writes that were scheduled to complete. 453 * if any had to be retried (due to BARRIER problems), retry them 454 */ 455 DEFINE_WAIT(wq); 456 for(;;) { 457 prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE); 458 if (atomic_read(&mddev->pending_writes)==0) 459 break; 460 while (mddev->biolist) { 461 struct bio *bio; 462 spin_lock_irq(&mddev->write_lock); 463 bio = mddev->biolist; 464 mddev->biolist = bio->bi_next ; 465 bio->bi_next = NULL; 466 spin_unlock_irq(&mddev->write_lock); 467 submit_bio(bio->bi_rw, bio); 468 } 469 schedule(); 470 } 471 finish_wait(&mddev->sb_wait, &wq); 472 } 473 474 static int bi_complete(struct bio *bio, unsigned int bytes_done, int error) 475 { 476 if (bio->bi_size) 477 return 1; 478 479 complete((struct completion*)bio->bi_private); 480 return 0; 481 } 482 483 int sync_page_io(struct block_device *bdev, sector_t sector, int size, 484 struct page *page, int rw) 485 { 486 struct bio *bio = bio_alloc(GFP_NOIO, 1); 487 struct completion event; 488 int ret; 489 490 rw |= (1 << BIO_RW_SYNC); 491 492 bio->bi_bdev = bdev; 493 bio->bi_sector = sector; 494 bio_add_page(bio, page, size, 0); 495 init_completion(&event); 496 bio->bi_private = &event; 497 bio->bi_end_io = bi_complete; 498 submit_bio(rw, bio); 499 wait_for_completion(&event); 500 501 ret = test_bit(BIO_UPTODATE, &bio->bi_flags); 502 bio_put(bio); 503 return ret; 504 } 505 EXPORT_SYMBOL_GPL(sync_page_io); 506 507 static int read_disk_sb(mdk_rdev_t * rdev, int size) 508 { 509 char b[BDEVNAME_SIZE]; 510 if (!rdev->sb_page) { 511 MD_BUG(); 512 return -EINVAL; 513 } 514 if (rdev->sb_loaded) 515 return 0; 516 517 518 if (!sync_page_io(rdev->bdev, rdev->sb_offset<<1, size, rdev->sb_page, READ)) 519 goto fail; 520 rdev->sb_loaded = 1; 521 return 0; 522 523 fail: 524 printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n", 525 bdevname(rdev->bdev,b)); 526 return -EINVAL; 527 } 528 529 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2) 530 { 531 if ( (sb1->set_uuid0 == sb2->set_uuid0) && 532 (sb1->set_uuid1 == sb2->set_uuid1) && 533 (sb1->set_uuid2 == sb2->set_uuid2) && 534 (sb1->set_uuid3 == sb2->set_uuid3)) 535 536 return 1; 537 538 return 0; 539 } 540 541 542 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2) 543 { 544 int ret; 545 mdp_super_t *tmp1, *tmp2; 546 547 tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL); 548 tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL); 549 550 if (!tmp1 || !tmp2) { 551 ret = 0; 552 printk(KERN_INFO "md.c: sb1 is not equal to sb2!\n"); 553 goto abort; 554 } 555 556 *tmp1 = *sb1; 557 *tmp2 = *sb2; 558 559 /* 560 * nr_disks is not constant 561 */ 562 tmp1->nr_disks = 0; 563 tmp2->nr_disks = 0; 564 565 if (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4)) 566 ret = 0; 567 else 568 ret = 1; 569 570 abort: 571 kfree(tmp1); 572 kfree(tmp2); 573 return ret; 574 } 575 576 static unsigned int calc_sb_csum(mdp_super_t * sb) 577 { 578 unsigned int disk_csum, csum; 579 580 disk_csum = sb->sb_csum; 581 sb->sb_csum = 0; 582 csum = csum_partial((void *)sb, MD_SB_BYTES, 0); 583 sb->sb_csum = disk_csum; 584 return csum; 585 } 586 587 588 /* 589 * Handle superblock details. 590 * We want to be able to handle multiple superblock formats 591 * so we have a common interface to them all, and an array of 592 * different handlers. 593 * We rely on user-space to write the initial superblock, and support 594 * reading and updating of superblocks. 595 * Interface methods are: 596 * int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version) 597 * loads and validates a superblock on dev. 598 * if refdev != NULL, compare superblocks on both devices 599 * Return: 600 * 0 - dev has a superblock that is compatible with refdev 601 * 1 - dev has a superblock that is compatible and newer than refdev 602 * so dev should be used as the refdev in future 603 * -EINVAL superblock incompatible or invalid 604 * -othererror e.g. -EIO 605 * 606 * int validate_super(mddev_t *mddev, mdk_rdev_t *dev) 607 * Verify that dev is acceptable into mddev. 608 * The first time, mddev->raid_disks will be 0, and data from 609 * dev should be merged in. Subsequent calls check that dev 610 * is new enough. Return 0 or -EINVAL 611 * 612 * void sync_super(mddev_t *mddev, mdk_rdev_t *dev) 613 * Update the superblock for rdev with data in mddev 614 * This does not write to disc. 615 * 616 */ 617 618 struct super_type { 619 char *name; 620 struct module *owner; 621 int (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version); 622 int (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev); 623 void (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev); 624 }; 625 626 /* 627 * load_super for 0.90.0 628 */ 629 static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) 630 { 631 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 632 mdp_super_t *sb; 633 int ret; 634 sector_t sb_offset; 635 636 /* 637 * Calculate the position of the superblock, 638 * it's at the end of the disk. 639 * 640 * It also happens to be a multiple of 4Kb. 641 */ 642 sb_offset = calc_dev_sboffset(rdev->bdev); 643 rdev->sb_offset = sb_offset; 644 645 ret = read_disk_sb(rdev, MD_SB_BYTES); 646 if (ret) return ret; 647 648 ret = -EINVAL; 649 650 bdevname(rdev->bdev, b); 651 sb = (mdp_super_t*)page_address(rdev->sb_page); 652 653 if (sb->md_magic != MD_SB_MAGIC) { 654 printk(KERN_ERR "md: invalid raid superblock magic on %s\n", 655 b); 656 goto abort; 657 } 658 659 if (sb->major_version != 0 || 660 sb->minor_version < 90 || 661 sb->minor_version > 91) { 662 printk(KERN_WARNING "Bad version number %d.%d on %s\n", 663 sb->major_version, sb->minor_version, 664 b); 665 goto abort; 666 } 667 668 if (sb->raid_disks <= 0) 669 goto abort; 670 671 if (csum_fold(calc_sb_csum(sb)) != csum_fold(sb->sb_csum)) { 672 printk(KERN_WARNING "md: invalid superblock checksum on %s\n", 673 b); 674 goto abort; 675 } 676 677 rdev->preferred_minor = sb->md_minor; 678 rdev->data_offset = 0; 679 rdev->sb_size = MD_SB_BYTES; 680 681 if (sb->level == LEVEL_MULTIPATH) 682 rdev->desc_nr = -1; 683 else 684 rdev->desc_nr = sb->this_disk.number; 685 686 if (refdev == 0) 687 ret = 1; 688 else { 689 __u64 ev1, ev2; 690 mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page); 691 if (!uuid_equal(refsb, sb)) { 692 printk(KERN_WARNING "md: %s has different UUID to %s\n", 693 b, bdevname(refdev->bdev,b2)); 694 goto abort; 695 } 696 if (!sb_equal(refsb, sb)) { 697 printk(KERN_WARNING "md: %s has same UUID" 698 " but different superblock to %s\n", 699 b, bdevname(refdev->bdev, b2)); 700 goto abort; 701 } 702 ev1 = md_event(sb); 703 ev2 = md_event(refsb); 704 if (ev1 > ev2) 705 ret = 1; 706 else 707 ret = 0; 708 } 709 rdev->size = calc_dev_size(rdev, sb->chunk_size); 710 711 if (rdev->size < sb->size && sb->level > 1) 712 /* "this cannot possibly happen" ... */ 713 ret = -EINVAL; 714 715 abort: 716 return ret; 717 } 718 719 /* 720 * validate_super for 0.90.0 721 */ 722 static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev) 723 { 724 mdp_disk_t *desc; 725 mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page); 726 727 rdev->raid_disk = -1; 728 rdev->flags = 0; 729 if (mddev->raid_disks == 0) { 730 mddev->major_version = 0; 731 mddev->minor_version = sb->minor_version; 732 mddev->patch_version = sb->patch_version; 733 mddev->persistent = ! sb->not_persistent; 734 mddev->chunk_size = sb->chunk_size; 735 mddev->ctime = sb->ctime; 736 mddev->utime = sb->utime; 737 mddev->level = sb->level; 738 mddev->clevel[0] = 0; 739 mddev->layout = sb->layout; 740 mddev->raid_disks = sb->raid_disks; 741 mddev->size = sb->size; 742 mddev->events = md_event(sb); 743 mddev->bitmap_offset = 0; 744 mddev->default_bitmap_offset = MD_SB_BYTES >> 9; 745 746 if (mddev->minor_version >= 91) { 747 mddev->reshape_position = sb->reshape_position; 748 mddev->delta_disks = sb->delta_disks; 749 mddev->new_level = sb->new_level; 750 mddev->new_layout = sb->new_layout; 751 mddev->new_chunk = sb->new_chunk; 752 } else { 753 mddev->reshape_position = MaxSector; 754 mddev->delta_disks = 0; 755 mddev->new_level = mddev->level; 756 mddev->new_layout = mddev->layout; 757 mddev->new_chunk = mddev->chunk_size; 758 } 759 760 if (sb->state & (1<<MD_SB_CLEAN)) 761 mddev->recovery_cp = MaxSector; 762 else { 763 if (sb->events_hi == sb->cp_events_hi && 764 sb->events_lo == sb->cp_events_lo) { 765 mddev->recovery_cp = sb->recovery_cp; 766 } else 767 mddev->recovery_cp = 0; 768 } 769 770 memcpy(mddev->uuid+0, &sb->set_uuid0, 4); 771 memcpy(mddev->uuid+4, &sb->set_uuid1, 4); 772 memcpy(mddev->uuid+8, &sb->set_uuid2, 4); 773 memcpy(mddev->uuid+12,&sb->set_uuid3, 4); 774 775 mddev->max_disks = MD_SB_DISKS; 776 777 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) && 778 mddev->bitmap_file == NULL) { 779 if (mddev->level != 1 && mddev->level != 4 780 && mddev->level != 5 && mddev->level != 6 781 && mddev->level != 10) { 782 /* FIXME use a better test */ 783 printk(KERN_WARNING "md: bitmaps not supported for this level.\n"); 784 return -EINVAL; 785 } 786 mddev->bitmap_offset = mddev->default_bitmap_offset; 787 } 788 789 } else if (mddev->pers == NULL) { 790 /* Insist on good event counter while assembling */ 791 __u64 ev1 = md_event(sb); 792 ++ev1; 793 if (ev1 < mddev->events) 794 return -EINVAL; 795 } else if (mddev->bitmap) { 796 /* if adding to array with a bitmap, then we can accept an 797 * older device ... but not too old. 798 */ 799 __u64 ev1 = md_event(sb); 800 if (ev1 < mddev->bitmap->events_cleared) 801 return 0; 802 } else /* just a hot-add of a new device, leave raid_disk at -1 */ 803 return 0; 804 805 if (mddev->level != LEVEL_MULTIPATH) { 806 desc = sb->disks + rdev->desc_nr; 807 808 if (desc->state & (1<<MD_DISK_FAULTY)) 809 set_bit(Faulty, &rdev->flags); 810 else if (desc->state & (1<<MD_DISK_SYNC) && 811 desc->raid_disk < mddev->raid_disks) { 812 set_bit(In_sync, &rdev->flags); 813 rdev->raid_disk = desc->raid_disk; 814 } 815 if (desc->state & (1<<MD_DISK_WRITEMOSTLY)) 816 set_bit(WriteMostly, &rdev->flags); 817 } else /* MULTIPATH are always insync */ 818 set_bit(In_sync, &rdev->flags); 819 return 0; 820 } 821 822 /* 823 * sync_super for 0.90.0 824 */ 825 static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev) 826 { 827 mdp_super_t *sb; 828 struct list_head *tmp; 829 mdk_rdev_t *rdev2; 830 int next_spare = mddev->raid_disks; 831 832 833 /* make rdev->sb match mddev data.. 834 * 835 * 1/ zero out disks 836 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare); 837 * 3/ any empty disks < next_spare become removed 838 * 839 * disks[0] gets initialised to REMOVED because 840 * we cannot be sure from other fields if it has 841 * been initialised or not. 842 */ 843 int i; 844 int active=0, working=0,failed=0,spare=0,nr_disks=0; 845 846 rdev->sb_size = MD_SB_BYTES; 847 848 sb = (mdp_super_t*)page_address(rdev->sb_page); 849 850 memset(sb, 0, sizeof(*sb)); 851 852 sb->md_magic = MD_SB_MAGIC; 853 sb->major_version = mddev->major_version; 854 sb->patch_version = mddev->patch_version; 855 sb->gvalid_words = 0; /* ignored */ 856 memcpy(&sb->set_uuid0, mddev->uuid+0, 4); 857 memcpy(&sb->set_uuid1, mddev->uuid+4, 4); 858 memcpy(&sb->set_uuid2, mddev->uuid+8, 4); 859 memcpy(&sb->set_uuid3, mddev->uuid+12,4); 860 861 sb->ctime = mddev->ctime; 862 sb->level = mddev->level; 863 sb->size = mddev->size; 864 sb->raid_disks = mddev->raid_disks; 865 sb->md_minor = mddev->md_minor; 866 sb->not_persistent = !mddev->persistent; 867 sb->utime = mddev->utime; 868 sb->state = 0; 869 sb->events_hi = (mddev->events>>32); 870 sb->events_lo = (u32)mddev->events; 871 872 if (mddev->reshape_position == MaxSector) 873 sb->minor_version = 90; 874 else { 875 sb->minor_version = 91; 876 sb->reshape_position = mddev->reshape_position; 877 sb->new_level = mddev->new_level; 878 sb->delta_disks = mddev->delta_disks; 879 sb->new_layout = mddev->new_layout; 880 sb->new_chunk = mddev->new_chunk; 881 } 882 mddev->minor_version = sb->minor_version; 883 if (mddev->in_sync) 884 { 885 sb->recovery_cp = mddev->recovery_cp; 886 sb->cp_events_hi = (mddev->events>>32); 887 sb->cp_events_lo = (u32)mddev->events; 888 if (mddev->recovery_cp == MaxSector) 889 sb->state = (1<< MD_SB_CLEAN); 890 } else 891 sb->recovery_cp = 0; 892 893 sb->layout = mddev->layout; 894 sb->chunk_size = mddev->chunk_size; 895 896 if (mddev->bitmap && mddev->bitmap_file == NULL) 897 sb->state |= (1<<MD_SB_BITMAP_PRESENT); 898 899 sb->disks[0].state = (1<<MD_DISK_REMOVED); 900 ITERATE_RDEV(mddev,rdev2,tmp) { 901 mdp_disk_t *d; 902 int desc_nr; 903 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags) 904 && !test_bit(Faulty, &rdev2->flags)) 905 desc_nr = rdev2->raid_disk; 906 else 907 desc_nr = next_spare++; 908 rdev2->desc_nr = desc_nr; 909 d = &sb->disks[rdev2->desc_nr]; 910 nr_disks++; 911 d->number = rdev2->desc_nr; 912 d->major = MAJOR(rdev2->bdev->bd_dev); 913 d->minor = MINOR(rdev2->bdev->bd_dev); 914 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags) 915 && !test_bit(Faulty, &rdev2->flags)) 916 d->raid_disk = rdev2->raid_disk; 917 else 918 d->raid_disk = rdev2->desc_nr; /* compatibility */ 919 if (test_bit(Faulty, &rdev2->flags)) 920 d->state = (1<<MD_DISK_FAULTY); 921 else if (test_bit(In_sync, &rdev2->flags)) { 922 d->state = (1<<MD_DISK_ACTIVE); 923 d->state |= (1<<MD_DISK_SYNC); 924 active++; 925 working++; 926 } else { 927 d->state = 0; 928 spare++; 929 working++; 930 } 931 if (test_bit(WriteMostly, &rdev2->flags)) 932 d->state |= (1<<MD_DISK_WRITEMOSTLY); 933 } 934 /* now set the "removed" and "faulty" bits on any missing devices */ 935 for (i=0 ; i < mddev->raid_disks ; i++) { 936 mdp_disk_t *d = &sb->disks[i]; 937 if (d->state == 0 && d->number == 0) { 938 d->number = i; 939 d->raid_disk = i; 940 d->state = (1<<MD_DISK_REMOVED); 941 d->state |= (1<<MD_DISK_FAULTY); 942 failed++; 943 } 944 } 945 sb->nr_disks = nr_disks; 946 sb->active_disks = active; 947 sb->working_disks = working; 948 sb->failed_disks = failed; 949 sb->spare_disks = spare; 950 951 sb->this_disk = sb->disks[rdev->desc_nr]; 952 sb->sb_csum = calc_sb_csum(sb); 953 } 954 955 /* 956 * version 1 superblock 957 */ 958 959 static unsigned int calc_sb_1_csum(struct mdp_superblock_1 * sb) 960 { 961 unsigned int disk_csum, csum; 962 unsigned long long newcsum; 963 int size = 256 + le32_to_cpu(sb->max_dev)*2; 964 unsigned int *isuper = (unsigned int*)sb; 965 int i; 966 967 disk_csum = sb->sb_csum; 968 sb->sb_csum = 0; 969 newcsum = 0; 970 for (i=0; size>=4; size -= 4 ) 971 newcsum += le32_to_cpu(*isuper++); 972 973 if (size == 2) 974 newcsum += le16_to_cpu(*(unsigned short*) isuper); 975 976 csum = (newcsum & 0xffffffff) + (newcsum >> 32); 977 sb->sb_csum = disk_csum; 978 return cpu_to_le32(csum); 979 } 980 981 static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) 982 { 983 struct mdp_superblock_1 *sb; 984 int ret; 985 sector_t sb_offset; 986 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 987 int bmask; 988 989 /* 990 * Calculate the position of the superblock. 991 * It is always aligned to a 4K boundary and 992 * depeding on minor_version, it can be: 993 * 0: At least 8K, but less than 12K, from end of device 994 * 1: At start of device 995 * 2: 4K from start of device. 996 */ 997 switch(minor_version) { 998 case 0: 999 sb_offset = rdev->bdev->bd_inode->i_size >> 9; 1000 sb_offset -= 8*2; 1001 sb_offset &= ~(sector_t)(4*2-1); 1002 /* convert from sectors to K */ 1003 sb_offset /= 2; 1004 break; 1005 case 1: 1006 sb_offset = 0; 1007 break; 1008 case 2: 1009 sb_offset = 4; 1010 break; 1011 default: 1012 return -EINVAL; 1013 } 1014 rdev->sb_offset = sb_offset; 1015 1016 /* superblock is rarely larger than 1K, but it can be larger, 1017 * and it is safe to read 4k, so we do that 1018 */ 1019 ret = read_disk_sb(rdev, 4096); 1020 if (ret) return ret; 1021 1022 1023 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page); 1024 1025 if (sb->magic != cpu_to_le32(MD_SB_MAGIC) || 1026 sb->major_version != cpu_to_le32(1) || 1027 le32_to_cpu(sb->max_dev) > (4096-256)/2 || 1028 le64_to_cpu(sb->super_offset) != (rdev->sb_offset<<1) || 1029 (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0) 1030 return -EINVAL; 1031 1032 if (calc_sb_1_csum(sb) != sb->sb_csum) { 1033 printk("md: invalid superblock checksum on %s\n", 1034 bdevname(rdev->bdev,b)); 1035 return -EINVAL; 1036 } 1037 if (le64_to_cpu(sb->data_size) < 10) { 1038 printk("md: data_size too small on %s\n", 1039 bdevname(rdev->bdev,b)); 1040 return -EINVAL; 1041 } 1042 rdev->preferred_minor = 0xffff; 1043 rdev->data_offset = le64_to_cpu(sb->data_offset); 1044 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); 1045 1046 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; 1047 bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1; 1048 if (rdev->sb_size & bmask) 1049 rdev-> sb_size = (rdev->sb_size | bmask)+1; 1050 1051 if (refdev == 0) 1052 ret = 1; 1053 else { 1054 __u64 ev1, ev2; 1055 struct mdp_superblock_1 *refsb = 1056 (struct mdp_superblock_1*)page_address(refdev->sb_page); 1057 1058 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 || 1059 sb->level != refsb->level || 1060 sb->layout != refsb->layout || 1061 sb->chunksize != refsb->chunksize) { 1062 printk(KERN_WARNING "md: %s has strangely different" 1063 " superblock to %s\n", 1064 bdevname(rdev->bdev,b), 1065 bdevname(refdev->bdev,b2)); 1066 return -EINVAL; 1067 } 1068 ev1 = le64_to_cpu(sb->events); 1069 ev2 = le64_to_cpu(refsb->events); 1070 1071 if (ev1 > ev2) 1072 ret = 1; 1073 else 1074 ret = 0; 1075 } 1076 if (minor_version) 1077 rdev->size = ((rdev->bdev->bd_inode->i_size>>9) - le64_to_cpu(sb->data_offset)) / 2; 1078 else 1079 rdev->size = rdev->sb_offset; 1080 if (rdev->size < le64_to_cpu(sb->data_size)/2) 1081 return -EINVAL; 1082 rdev->size = le64_to_cpu(sb->data_size)/2; 1083 if (le32_to_cpu(sb->chunksize)) 1084 rdev->size &= ~((sector_t)le32_to_cpu(sb->chunksize)/2 - 1); 1085 1086 if (le32_to_cpu(sb->size) > rdev->size*2) 1087 return -EINVAL; 1088 return ret; 1089 } 1090 1091 static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev) 1092 { 1093 struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page); 1094 1095 rdev->raid_disk = -1; 1096 rdev->flags = 0; 1097 if (mddev->raid_disks == 0) { 1098 mddev->major_version = 1; 1099 mddev->patch_version = 0; 1100 mddev->persistent = 1; 1101 mddev->chunk_size = le32_to_cpu(sb->chunksize) << 9; 1102 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1); 1103 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1); 1104 mddev->level = le32_to_cpu(sb->level); 1105 mddev->clevel[0] = 0; 1106 mddev->layout = le32_to_cpu(sb->layout); 1107 mddev->raid_disks = le32_to_cpu(sb->raid_disks); 1108 mddev->size = le64_to_cpu(sb->size)/2; 1109 mddev->events = le64_to_cpu(sb->events); 1110 mddev->bitmap_offset = 0; 1111 mddev->default_bitmap_offset = 1024 >> 9; 1112 1113 mddev->recovery_cp = le64_to_cpu(sb->resync_offset); 1114 memcpy(mddev->uuid, sb->set_uuid, 16); 1115 1116 mddev->max_disks = (4096-256)/2; 1117 1118 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) && 1119 mddev->bitmap_file == NULL ) { 1120 if (mddev->level != 1 && mddev->level != 5 && mddev->level != 6 1121 && mddev->level != 10) { 1122 printk(KERN_WARNING "md: bitmaps not supported for this level.\n"); 1123 return -EINVAL; 1124 } 1125 mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset); 1126 } 1127 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) { 1128 mddev->reshape_position = le64_to_cpu(sb->reshape_position); 1129 mddev->delta_disks = le32_to_cpu(sb->delta_disks); 1130 mddev->new_level = le32_to_cpu(sb->new_level); 1131 mddev->new_layout = le32_to_cpu(sb->new_layout); 1132 mddev->new_chunk = le32_to_cpu(sb->new_chunk)<<9; 1133 } else { 1134 mddev->reshape_position = MaxSector; 1135 mddev->delta_disks = 0; 1136 mddev->new_level = mddev->level; 1137 mddev->new_layout = mddev->layout; 1138 mddev->new_chunk = mddev->chunk_size; 1139 } 1140 1141 } else if (mddev->pers == NULL) { 1142 /* Insist of good event counter while assembling */ 1143 __u64 ev1 = le64_to_cpu(sb->events); 1144 ++ev1; 1145 if (ev1 < mddev->events) 1146 return -EINVAL; 1147 } else if (mddev->bitmap) { 1148 /* If adding to array with a bitmap, then we can accept an 1149 * older device, but not too old. 1150 */ 1151 __u64 ev1 = le64_to_cpu(sb->events); 1152 if (ev1 < mddev->bitmap->events_cleared) 1153 return 0; 1154 } else /* just a hot-add of a new device, leave raid_disk at -1 */ 1155 return 0; 1156 1157 if (mddev->level != LEVEL_MULTIPATH) { 1158 int role; 1159 rdev->desc_nr = le32_to_cpu(sb->dev_number); 1160 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]); 1161 switch(role) { 1162 case 0xffff: /* spare */ 1163 break; 1164 case 0xfffe: /* faulty */ 1165 set_bit(Faulty, &rdev->flags); 1166 break; 1167 default: 1168 set_bit(In_sync, &rdev->flags); 1169 rdev->raid_disk = role; 1170 break; 1171 } 1172 if (sb->devflags & WriteMostly1) 1173 set_bit(WriteMostly, &rdev->flags); 1174 } else /* MULTIPATH are always insync */ 1175 set_bit(In_sync, &rdev->flags); 1176 1177 return 0; 1178 } 1179 1180 static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev) 1181 { 1182 struct mdp_superblock_1 *sb; 1183 struct list_head *tmp; 1184 mdk_rdev_t *rdev2; 1185 int max_dev, i; 1186 /* make rdev->sb match mddev and rdev data. */ 1187 1188 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page); 1189 1190 sb->feature_map = 0; 1191 sb->pad0 = 0; 1192 memset(sb->pad1, 0, sizeof(sb->pad1)); 1193 memset(sb->pad2, 0, sizeof(sb->pad2)); 1194 memset(sb->pad3, 0, sizeof(sb->pad3)); 1195 1196 sb->utime = cpu_to_le64((__u64)mddev->utime); 1197 sb->events = cpu_to_le64(mddev->events); 1198 if (mddev->in_sync) 1199 sb->resync_offset = cpu_to_le64(mddev->recovery_cp); 1200 else 1201 sb->resync_offset = cpu_to_le64(0); 1202 1203 sb->cnt_corrected_read = atomic_read(&rdev->corrected_errors); 1204 1205 sb->raid_disks = cpu_to_le32(mddev->raid_disks); 1206 sb->size = cpu_to_le64(mddev->size<<1); 1207 1208 if (mddev->bitmap && mddev->bitmap_file == NULL) { 1209 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset); 1210 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET); 1211 } 1212 if (mddev->reshape_position != MaxSector) { 1213 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE); 1214 sb->reshape_position = cpu_to_le64(mddev->reshape_position); 1215 sb->new_layout = cpu_to_le32(mddev->new_layout); 1216 sb->delta_disks = cpu_to_le32(mddev->delta_disks); 1217 sb->new_level = cpu_to_le32(mddev->new_level); 1218 sb->new_chunk = cpu_to_le32(mddev->new_chunk>>9); 1219 } 1220 1221 max_dev = 0; 1222 ITERATE_RDEV(mddev,rdev2,tmp) 1223 if (rdev2->desc_nr+1 > max_dev) 1224 max_dev = rdev2->desc_nr+1; 1225 1226 sb->max_dev = cpu_to_le32(max_dev); 1227 for (i=0; i<max_dev;i++) 1228 sb->dev_roles[i] = cpu_to_le16(0xfffe); 1229 1230 ITERATE_RDEV(mddev,rdev2,tmp) { 1231 i = rdev2->desc_nr; 1232 if (test_bit(Faulty, &rdev2->flags)) 1233 sb->dev_roles[i] = cpu_to_le16(0xfffe); 1234 else if (test_bit(In_sync, &rdev2->flags)) 1235 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); 1236 else 1237 sb->dev_roles[i] = cpu_to_le16(0xffff); 1238 } 1239 1240 sb->recovery_offset = cpu_to_le64(0); /* not supported yet */ 1241 sb->sb_csum = calc_sb_1_csum(sb); 1242 } 1243 1244 1245 static struct super_type super_types[] = { 1246 [0] = { 1247 .name = "0.90.0", 1248 .owner = THIS_MODULE, 1249 .load_super = super_90_load, 1250 .validate_super = super_90_validate, 1251 .sync_super = super_90_sync, 1252 }, 1253 [1] = { 1254 .name = "md-1", 1255 .owner = THIS_MODULE, 1256 .load_super = super_1_load, 1257 .validate_super = super_1_validate, 1258 .sync_super = super_1_sync, 1259 }, 1260 }; 1261 1262 static mdk_rdev_t * match_dev_unit(mddev_t *mddev, mdk_rdev_t *dev) 1263 { 1264 struct list_head *tmp; 1265 mdk_rdev_t *rdev; 1266 1267 ITERATE_RDEV(mddev,rdev,tmp) 1268 if (rdev->bdev->bd_contains == dev->bdev->bd_contains) 1269 return rdev; 1270 1271 return NULL; 1272 } 1273 1274 static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2) 1275 { 1276 struct list_head *tmp; 1277 mdk_rdev_t *rdev; 1278 1279 ITERATE_RDEV(mddev1,rdev,tmp) 1280 if (match_dev_unit(mddev2, rdev)) 1281 return 1; 1282 1283 return 0; 1284 } 1285 1286 static LIST_HEAD(pending_raid_disks); 1287 1288 static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev) 1289 { 1290 mdk_rdev_t *same_pdev; 1291 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 1292 struct kobject *ko; 1293 char *s; 1294 1295 if (rdev->mddev) { 1296 MD_BUG(); 1297 return -EINVAL; 1298 } 1299 /* make sure rdev->size exceeds mddev->size */ 1300 if (rdev->size && (mddev->size == 0 || rdev->size < mddev->size)) { 1301 if (mddev->pers) 1302 /* Cannot change size, so fail */ 1303 return -ENOSPC; 1304 else 1305 mddev->size = rdev->size; 1306 } 1307 same_pdev = match_dev_unit(mddev, rdev); 1308 if (same_pdev) 1309 printk(KERN_WARNING 1310 "%s: WARNING: %s appears to be on the same physical" 1311 " disk as %s. True\n protection against single-disk" 1312 " failure might be compromised.\n", 1313 mdname(mddev), bdevname(rdev->bdev,b), 1314 bdevname(same_pdev->bdev,b2)); 1315 1316 /* Verify rdev->desc_nr is unique. 1317 * If it is -1, assign a free number, else 1318 * check number is not in use 1319 */ 1320 if (rdev->desc_nr < 0) { 1321 int choice = 0; 1322 if (mddev->pers) choice = mddev->raid_disks; 1323 while (find_rdev_nr(mddev, choice)) 1324 choice++; 1325 rdev->desc_nr = choice; 1326 } else { 1327 if (find_rdev_nr(mddev, rdev->desc_nr)) 1328 return -EBUSY; 1329 } 1330 bdevname(rdev->bdev,b); 1331 if (kobject_set_name(&rdev->kobj, "dev-%s", b) < 0) 1332 return -ENOMEM; 1333 while ( (s=strchr(rdev->kobj.k_name, '/')) != NULL) 1334 *s = '!'; 1335 1336 list_add(&rdev->same_set, &mddev->disks); 1337 rdev->mddev = mddev; 1338 printk(KERN_INFO "md: bind<%s>\n", b); 1339 1340 rdev->kobj.parent = &mddev->kobj; 1341 kobject_add(&rdev->kobj); 1342 1343 if (rdev->bdev->bd_part) 1344 ko = &rdev->bdev->bd_part->kobj; 1345 else 1346 ko = &rdev->bdev->bd_disk->kobj; 1347 sysfs_create_link(&rdev->kobj, ko, "block"); 1348 bd_claim_by_disk(rdev->bdev, rdev, mddev->gendisk); 1349 return 0; 1350 } 1351 1352 static void unbind_rdev_from_array(mdk_rdev_t * rdev) 1353 { 1354 char b[BDEVNAME_SIZE]; 1355 if (!rdev->mddev) { 1356 MD_BUG(); 1357 return; 1358 } 1359 bd_release_from_disk(rdev->bdev, rdev->mddev->gendisk); 1360 list_del_init(&rdev->same_set); 1361 printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b)); 1362 rdev->mddev = NULL; 1363 sysfs_remove_link(&rdev->kobj, "block"); 1364 kobject_del(&rdev->kobj); 1365 } 1366 1367 /* 1368 * prevent the device from being mounted, repartitioned or 1369 * otherwise reused by a RAID array (or any other kernel 1370 * subsystem), by bd_claiming the device. 1371 */ 1372 static int lock_rdev(mdk_rdev_t *rdev, dev_t dev) 1373 { 1374 int err = 0; 1375 struct block_device *bdev; 1376 char b[BDEVNAME_SIZE]; 1377 1378 bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE); 1379 if (IS_ERR(bdev)) { 1380 printk(KERN_ERR "md: could not open %s.\n", 1381 __bdevname(dev, b)); 1382 return PTR_ERR(bdev); 1383 } 1384 err = bd_claim(bdev, rdev); 1385 if (err) { 1386 printk(KERN_ERR "md: could not bd_claim %s.\n", 1387 bdevname(bdev, b)); 1388 blkdev_put(bdev); 1389 return err; 1390 } 1391 rdev->bdev = bdev; 1392 return err; 1393 } 1394 1395 static void unlock_rdev(mdk_rdev_t *rdev) 1396 { 1397 struct block_device *bdev = rdev->bdev; 1398 rdev->bdev = NULL; 1399 if (!bdev) 1400 MD_BUG(); 1401 bd_release(bdev); 1402 blkdev_put(bdev); 1403 } 1404 1405 void md_autodetect_dev(dev_t dev); 1406 1407 static void export_rdev(mdk_rdev_t * rdev) 1408 { 1409 char b[BDEVNAME_SIZE]; 1410 printk(KERN_INFO "md: export_rdev(%s)\n", 1411 bdevname(rdev->bdev,b)); 1412 if (rdev->mddev) 1413 MD_BUG(); 1414 free_disk_sb(rdev); 1415 list_del_init(&rdev->same_set); 1416 #ifndef MODULE 1417 md_autodetect_dev(rdev->bdev->bd_dev); 1418 #endif 1419 unlock_rdev(rdev); 1420 kobject_put(&rdev->kobj); 1421 } 1422 1423 static void kick_rdev_from_array(mdk_rdev_t * rdev) 1424 { 1425 unbind_rdev_from_array(rdev); 1426 export_rdev(rdev); 1427 } 1428 1429 static void export_array(mddev_t *mddev) 1430 { 1431 struct list_head *tmp; 1432 mdk_rdev_t *rdev; 1433 1434 ITERATE_RDEV(mddev,rdev,tmp) { 1435 if (!rdev->mddev) { 1436 MD_BUG(); 1437 continue; 1438 } 1439 kick_rdev_from_array(rdev); 1440 } 1441 if (!list_empty(&mddev->disks)) 1442 MD_BUG(); 1443 mddev->raid_disks = 0; 1444 mddev->major_version = 0; 1445 } 1446 1447 static void print_desc(mdp_disk_t *desc) 1448 { 1449 printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number, 1450 desc->major,desc->minor,desc->raid_disk,desc->state); 1451 } 1452 1453 static void print_sb(mdp_super_t *sb) 1454 { 1455 int i; 1456 1457 printk(KERN_INFO 1458 "md: SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n", 1459 sb->major_version, sb->minor_version, sb->patch_version, 1460 sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3, 1461 sb->ctime); 1462 printk(KERN_INFO "md: L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n", 1463 sb->level, sb->size, sb->nr_disks, sb->raid_disks, 1464 sb->md_minor, sb->layout, sb->chunk_size); 1465 printk(KERN_INFO "md: UT:%08x ST:%d AD:%d WD:%d" 1466 " FD:%d SD:%d CSUM:%08x E:%08lx\n", 1467 sb->utime, sb->state, sb->active_disks, sb->working_disks, 1468 sb->failed_disks, sb->spare_disks, 1469 sb->sb_csum, (unsigned long)sb->events_lo); 1470 1471 printk(KERN_INFO); 1472 for (i = 0; i < MD_SB_DISKS; i++) { 1473 mdp_disk_t *desc; 1474 1475 desc = sb->disks + i; 1476 if (desc->number || desc->major || desc->minor || 1477 desc->raid_disk || (desc->state && (desc->state != 4))) { 1478 printk(" D %2d: ", i); 1479 print_desc(desc); 1480 } 1481 } 1482 printk(KERN_INFO "md: THIS: "); 1483 print_desc(&sb->this_disk); 1484 1485 } 1486 1487 static void print_rdev(mdk_rdev_t *rdev) 1488 { 1489 char b[BDEVNAME_SIZE]; 1490 printk(KERN_INFO "md: rdev %s, SZ:%08llu F:%d S:%d DN:%u\n", 1491 bdevname(rdev->bdev,b), (unsigned long long)rdev->size, 1492 test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags), 1493 rdev->desc_nr); 1494 if (rdev->sb_loaded) { 1495 printk(KERN_INFO "md: rdev superblock:\n"); 1496 print_sb((mdp_super_t*)page_address(rdev->sb_page)); 1497 } else 1498 printk(KERN_INFO "md: no rdev superblock!\n"); 1499 } 1500 1501 void md_print_devices(void) 1502 { 1503 struct list_head *tmp, *tmp2; 1504 mdk_rdev_t *rdev; 1505 mddev_t *mddev; 1506 char b[BDEVNAME_SIZE]; 1507 1508 printk("\n"); 1509 printk("md: **********************************\n"); 1510 printk("md: * <COMPLETE RAID STATE PRINTOUT> *\n"); 1511 printk("md: **********************************\n"); 1512 ITERATE_MDDEV(mddev,tmp) { 1513 1514 if (mddev->bitmap) 1515 bitmap_print_sb(mddev->bitmap); 1516 else 1517 printk("%s: ", mdname(mddev)); 1518 ITERATE_RDEV(mddev,rdev,tmp2) 1519 printk("<%s>", bdevname(rdev->bdev,b)); 1520 printk("\n"); 1521 1522 ITERATE_RDEV(mddev,rdev,tmp2) 1523 print_rdev(rdev); 1524 } 1525 printk("md: **********************************\n"); 1526 printk("\n"); 1527 } 1528 1529 1530 static void sync_sbs(mddev_t * mddev) 1531 { 1532 mdk_rdev_t *rdev; 1533 struct list_head *tmp; 1534 1535 ITERATE_RDEV(mddev,rdev,tmp) { 1536 super_types[mddev->major_version]. 1537 sync_super(mddev, rdev); 1538 rdev->sb_loaded = 1; 1539 } 1540 } 1541 1542 void md_update_sb(mddev_t * mddev) 1543 { 1544 int err; 1545 struct list_head *tmp; 1546 mdk_rdev_t *rdev; 1547 int sync_req; 1548 1549 repeat: 1550 spin_lock_irq(&mddev->write_lock); 1551 sync_req = mddev->in_sync; 1552 mddev->utime = get_seconds(); 1553 mddev->events ++; 1554 1555 if (!mddev->events) { 1556 /* 1557 * oops, this 64-bit counter should never wrap. 1558 * Either we are in around ~1 trillion A.C., assuming 1559 * 1 reboot per second, or we have a bug: 1560 */ 1561 MD_BUG(); 1562 mddev->events --; 1563 } 1564 mddev->sb_dirty = 2; 1565 sync_sbs(mddev); 1566 1567 /* 1568 * do not write anything to disk if using 1569 * nonpersistent superblocks 1570 */ 1571 if (!mddev->persistent) { 1572 mddev->sb_dirty = 0; 1573 spin_unlock_irq(&mddev->write_lock); 1574 wake_up(&mddev->sb_wait); 1575 return; 1576 } 1577 spin_unlock_irq(&mddev->write_lock); 1578 1579 dprintk(KERN_INFO 1580 "md: updating %s RAID superblock on device (in sync %d)\n", 1581 mdname(mddev),mddev->in_sync); 1582 1583 err = bitmap_update_sb(mddev->bitmap); 1584 ITERATE_RDEV(mddev,rdev,tmp) { 1585 char b[BDEVNAME_SIZE]; 1586 dprintk(KERN_INFO "md: "); 1587 if (test_bit(Faulty, &rdev->flags)) 1588 dprintk("(skipping faulty "); 1589 1590 dprintk("%s ", bdevname(rdev->bdev,b)); 1591 if (!test_bit(Faulty, &rdev->flags)) { 1592 md_super_write(mddev,rdev, 1593 rdev->sb_offset<<1, rdev->sb_size, 1594 rdev->sb_page); 1595 dprintk(KERN_INFO "(write) %s's sb offset: %llu\n", 1596 bdevname(rdev->bdev,b), 1597 (unsigned long long)rdev->sb_offset); 1598 1599 } else 1600 dprintk(")\n"); 1601 if (mddev->level == LEVEL_MULTIPATH) 1602 /* only need to write one superblock... */ 1603 break; 1604 } 1605 md_super_wait(mddev); 1606 /* if there was a failure, sb_dirty was set to 1, and we re-write super */ 1607 1608 spin_lock_irq(&mddev->write_lock); 1609 if (mddev->in_sync != sync_req|| mddev->sb_dirty == 1) { 1610 /* have to write it out again */ 1611 spin_unlock_irq(&mddev->write_lock); 1612 goto repeat; 1613 } 1614 mddev->sb_dirty = 0; 1615 spin_unlock_irq(&mddev->write_lock); 1616 wake_up(&mddev->sb_wait); 1617 1618 } 1619 EXPORT_SYMBOL_GPL(md_update_sb); 1620 1621 /* words written to sysfs files may, or my not, be \n terminated. 1622 * We want to accept with case. For this we use cmd_match. 1623 */ 1624 static int cmd_match(const char *cmd, const char *str) 1625 { 1626 /* See if cmd, written into a sysfs file, matches 1627 * str. They must either be the same, or cmd can 1628 * have a trailing newline 1629 */ 1630 while (*cmd && *str && *cmd == *str) { 1631 cmd++; 1632 str++; 1633 } 1634 if (*cmd == '\n') 1635 cmd++; 1636 if (*str || *cmd) 1637 return 0; 1638 return 1; 1639 } 1640 1641 struct rdev_sysfs_entry { 1642 struct attribute attr; 1643 ssize_t (*show)(mdk_rdev_t *, char *); 1644 ssize_t (*store)(mdk_rdev_t *, const char *, size_t); 1645 }; 1646 1647 static ssize_t 1648 state_show(mdk_rdev_t *rdev, char *page) 1649 { 1650 char *sep = ""; 1651 int len=0; 1652 1653 if (test_bit(Faulty, &rdev->flags)) { 1654 len+= sprintf(page+len, "%sfaulty",sep); 1655 sep = ","; 1656 } 1657 if (test_bit(In_sync, &rdev->flags)) { 1658 len += sprintf(page+len, "%sin_sync",sep); 1659 sep = ","; 1660 } 1661 if (!test_bit(Faulty, &rdev->flags) && 1662 !test_bit(In_sync, &rdev->flags)) { 1663 len += sprintf(page+len, "%sspare", sep); 1664 sep = ","; 1665 } 1666 return len+sprintf(page+len, "\n"); 1667 } 1668 1669 static struct rdev_sysfs_entry 1670 rdev_state = __ATTR_RO(state); 1671 1672 static ssize_t 1673 super_show(mdk_rdev_t *rdev, char *page) 1674 { 1675 if (rdev->sb_loaded && rdev->sb_size) { 1676 memcpy(page, page_address(rdev->sb_page), rdev->sb_size); 1677 return rdev->sb_size; 1678 } else 1679 return 0; 1680 } 1681 static struct rdev_sysfs_entry rdev_super = __ATTR_RO(super); 1682 1683 static ssize_t 1684 errors_show(mdk_rdev_t *rdev, char *page) 1685 { 1686 return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors)); 1687 } 1688 1689 static ssize_t 1690 errors_store(mdk_rdev_t *rdev, const char *buf, size_t len) 1691 { 1692 char *e; 1693 unsigned long n = simple_strtoul(buf, &e, 10); 1694 if (*buf && (*e == 0 || *e == '\n')) { 1695 atomic_set(&rdev->corrected_errors, n); 1696 return len; 1697 } 1698 return -EINVAL; 1699 } 1700 static struct rdev_sysfs_entry rdev_errors = 1701 __ATTR(errors, 0644, errors_show, errors_store); 1702 1703 static ssize_t 1704 slot_show(mdk_rdev_t *rdev, char *page) 1705 { 1706 if (rdev->raid_disk < 0) 1707 return sprintf(page, "none\n"); 1708 else 1709 return sprintf(page, "%d\n", rdev->raid_disk); 1710 } 1711 1712 static ssize_t 1713 slot_store(mdk_rdev_t *rdev, const char *buf, size_t len) 1714 { 1715 char *e; 1716 int slot = simple_strtoul(buf, &e, 10); 1717 if (strncmp(buf, "none", 4)==0) 1718 slot = -1; 1719 else if (e==buf || (*e && *e!= '\n')) 1720 return -EINVAL; 1721 if (rdev->mddev->pers) 1722 /* Cannot set slot in active array (yet) */ 1723 return -EBUSY; 1724 if (slot >= rdev->mddev->raid_disks) 1725 return -ENOSPC; 1726 rdev->raid_disk = slot; 1727 /* assume it is working */ 1728 rdev->flags = 0; 1729 set_bit(In_sync, &rdev->flags); 1730 return len; 1731 } 1732 1733 1734 static struct rdev_sysfs_entry rdev_slot = 1735 __ATTR(slot, 0644, slot_show, slot_store); 1736 1737 static ssize_t 1738 offset_show(mdk_rdev_t *rdev, char *page) 1739 { 1740 return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset); 1741 } 1742 1743 static ssize_t 1744 offset_store(mdk_rdev_t *rdev, const char *buf, size_t len) 1745 { 1746 char *e; 1747 unsigned long long offset = simple_strtoull(buf, &e, 10); 1748 if (e==buf || (*e && *e != '\n')) 1749 return -EINVAL; 1750 if (rdev->mddev->pers) 1751 return -EBUSY; 1752 rdev->data_offset = offset; 1753 return len; 1754 } 1755 1756 static struct rdev_sysfs_entry rdev_offset = 1757 __ATTR(offset, 0644, offset_show, offset_store); 1758 1759 static ssize_t 1760 rdev_size_show(mdk_rdev_t *rdev, char *page) 1761 { 1762 return sprintf(page, "%llu\n", (unsigned long long)rdev->size); 1763 } 1764 1765 static ssize_t 1766 rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len) 1767 { 1768 char *e; 1769 unsigned long long size = simple_strtoull(buf, &e, 10); 1770 if (e==buf || (*e && *e != '\n')) 1771 return -EINVAL; 1772 if (rdev->mddev->pers) 1773 return -EBUSY; 1774 rdev->size = size; 1775 if (size < rdev->mddev->size || rdev->mddev->size == 0) 1776 rdev->mddev->size = size; 1777 return len; 1778 } 1779 1780 static struct rdev_sysfs_entry rdev_size = 1781 __ATTR(size, 0644, rdev_size_show, rdev_size_store); 1782 1783 static struct attribute *rdev_default_attrs[] = { 1784 &rdev_state.attr, 1785 &rdev_super.attr, 1786 &rdev_errors.attr, 1787 &rdev_slot.attr, 1788 &rdev_offset.attr, 1789 &rdev_size.attr, 1790 NULL, 1791 }; 1792 static ssize_t 1793 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 1794 { 1795 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); 1796 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj); 1797 1798 if (!entry->show) 1799 return -EIO; 1800 return entry->show(rdev, page); 1801 } 1802 1803 static ssize_t 1804 rdev_attr_store(struct kobject *kobj, struct attribute *attr, 1805 const char *page, size_t length) 1806 { 1807 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); 1808 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj); 1809 1810 if (!entry->store) 1811 return -EIO; 1812 return entry->store(rdev, page, length); 1813 } 1814 1815 static void rdev_free(struct kobject *ko) 1816 { 1817 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj); 1818 kfree(rdev); 1819 } 1820 static struct sysfs_ops rdev_sysfs_ops = { 1821 .show = rdev_attr_show, 1822 .store = rdev_attr_store, 1823 }; 1824 static struct kobj_type rdev_ktype = { 1825 .release = rdev_free, 1826 .sysfs_ops = &rdev_sysfs_ops, 1827 .default_attrs = rdev_default_attrs, 1828 }; 1829 1830 /* 1831 * Import a device. If 'super_format' >= 0, then sanity check the superblock 1832 * 1833 * mark the device faulty if: 1834 * 1835 * - the device is nonexistent (zero size) 1836 * - the device has no valid superblock 1837 * 1838 * a faulty rdev _never_ has rdev->sb set. 1839 */ 1840 static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_minor) 1841 { 1842 char b[BDEVNAME_SIZE]; 1843 int err; 1844 mdk_rdev_t *rdev; 1845 sector_t size; 1846 1847 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); 1848 if (!rdev) { 1849 printk(KERN_ERR "md: could not alloc mem for new device!\n"); 1850 return ERR_PTR(-ENOMEM); 1851 } 1852 1853 if ((err = alloc_disk_sb(rdev))) 1854 goto abort_free; 1855 1856 err = lock_rdev(rdev, newdev); 1857 if (err) 1858 goto abort_free; 1859 1860 rdev->kobj.parent = NULL; 1861 rdev->kobj.ktype = &rdev_ktype; 1862 kobject_init(&rdev->kobj); 1863 1864 rdev->desc_nr = -1; 1865 rdev->flags = 0; 1866 rdev->data_offset = 0; 1867 atomic_set(&rdev->nr_pending, 0); 1868 atomic_set(&rdev->read_errors, 0); 1869 atomic_set(&rdev->corrected_errors, 0); 1870 1871 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; 1872 if (!size) { 1873 printk(KERN_WARNING 1874 "md: %s has zero or unknown size, marking faulty!\n", 1875 bdevname(rdev->bdev,b)); 1876 err = -EINVAL; 1877 goto abort_free; 1878 } 1879 1880 if (super_format >= 0) { 1881 err = super_types[super_format]. 1882 load_super(rdev, NULL, super_minor); 1883 if (err == -EINVAL) { 1884 printk(KERN_WARNING 1885 "md: %s has invalid sb, not importing!\n", 1886 bdevname(rdev->bdev,b)); 1887 goto abort_free; 1888 } 1889 if (err < 0) { 1890 printk(KERN_WARNING 1891 "md: could not read %s's sb, not importing!\n", 1892 bdevname(rdev->bdev,b)); 1893 goto abort_free; 1894 } 1895 } 1896 INIT_LIST_HEAD(&rdev->same_set); 1897 1898 return rdev; 1899 1900 abort_free: 1901 if (rdev->sb_page) { 1902 if (rdev->bdev) 1903 unlock_rdev(rdev); 1904 free_disk_sb(rdev); 1905 } 1906 kfree(rdev); 1907 return ERR_PTR(err); 1908 } 1909 1910 /* 1911 * Check a full RAID array for plausibility 1912 */ 1913 1914 1915 static void analyze_sbs(mddev_t * mddev) 1916 { 1917 int i; 1918 struct list_head *tmp; 1919 mdk_rdev_t *rdev, *freshest; 1920 char b[BDEVNAME_SIZE]; 1921 1922 freshest = NULL; 1923 ITERATE_RDEV(mddev,rdev,tmp) 1924 switch (super_types[mddev->major_version]. 1925 load_super(rdev, freshest, mddev->minor_version)) { 1926 case 1: 1927 freshest = rdev; 1928 break; 1929 case 0: 1930 break; 1931 default: 1932 printk( KERN_ERR \ 1933 "md: fatal superblock inconsistency in %s" 1934 " -- removing from array\n", 1935 bdevname(rdev->bdev,b)); 1936 kick_rdev_from_array(rdev); 1937 } 1938 1939 1940 super_types[mddev->major_version]. 1941 validate_super(mddev, freshest); 1942 1943 i = 0; 1944 ITERATE_RDEV(mddev,rdev,tmp) { 1945 if (rdev != freshest) 1946 if (super_types[mddev->major_version]. 1947 validate_super(mddev, rdev)) { 1948 printk(KERN_WARNING "md: kicking non-fresh %s" 1949 " from array!\n", 1950 bdevname(rdev->bdev,b)); 1951 kick_rdev_from_array(rdev); 1952 continue; 1953 } 1954 if (mddev->level == LEVEL_MULTIPATH) { 1955 rdev->desc_nr = i++; 1956 rdev->raid_disk = rdev->desc_nr; 1957 set_bit(In_sync, &rdev->flags); 1958 } 1959 } 1960 1961 1962 1963 if (mddev->recovery_cp != MaxSector && 1964 mddev->level >= 1) 1965 printk(KERN_ERR "md: %s: raid array is not clean" 1966 " -- starting background reconstruction\n", 1967 mdname(mddev)); 1968 1969 } 1970 1971 static ssize_t 1972 level_show(mddev_t *mddev, char *page) 1973 { 1974 struct mdk_personality *p = mddev->pers; 1975 if (p) 1976 return sprintf(page, "%s\n", p->name); 1977 else if (mddev->clevel[0]) 1978 return sprintf(page, "%s\n", mddev->clevel); 1979 else if (mddev->level != LEVEL_NONE) 1980 return sprintf(page, "%d\n", mddev->level); 1981 else 1982 return 0; 1983 } 1984 1985 static ssize_t 1986 level_store(mddev_t *mddev, const char *buf, size_t len) 1987 { 1988 int rv = len; 1989 if (mddev->pers) 1990 return -EBUSY; 1991 if (len == 0) 1992 return 0; 1993 if (len >= sizeof(mddev->clevel)) 1994 return -ENOSPC; 1995 strncpy(mddev->clevel, buf, len); 1996 if (mddev->clevel[len-1] == '\n') 1997 len--; 1998 mddev->clevel[len] = 0; 1999 mddev->level = LEVEL_NONE; 2000 return rv; 2001 } 2002 2003 static struct md_sysfs_entry md_level = 2004 __ATTR(level, 0644, level_show, level_store); 2005 2006 static ssize_t 2007 raid_disks_show(mddev_t *mddev, char *page) 2008 { 2009 if (mddev->raid_disks == 0) 2010 return 0; 2011 return sprintf(page, "%d\n", mddev->raid_disks); 2012 } 2013 2014 static int update_raid_disks(mddev_t *mddev, int raid_disks); 2015 2016 static ssize_t 2017 raid_disks_store(mddev_t *mddev, const char *buf, size_t len) 2018 { 2019 /* can only set raid_disks if array is not yet active */ 2020 char *e; 2021 int rv = 0; 2022 unsigned long n = simple_strtoul(buf, &e, 10); 2023 2024 if (!*buf || (*e && *e != '\n')) 2025 return -EINVAL; 2026 2027 if (mddev->pers) 2028 rv = update_raid_disks(mddev, n); 2029 else 2030 mddev->raid_disks = n; 2031 return rv ? rv : len; 2032 } 2033 static struct md_sysfs_entry md_raid_disks = 2034 __ATTR(raid_disks, 0644, raid_disks_show, raid_disks_store); 2035 2036 static ssize_t 2037 chunk_size_show(mddev_t *mddev, char *page) 2038 { 2039 return sprintf(page, "%d\n", mddev->chunk_size); 2040 } 2041 2042 static ssize_t 2043 chunk_size_store(mddev_t *mddev, const char *buf, size_t len) 2044 { 2045 /* can only set chunk_size if array is not yet active */ 2046 char *e; 2047 unsigned long n = simple_strtoul(buf, &e, 10); 2048 2049 if (mddev->pers) 2050 return -EBUSY; 2051 if (!*buf || (*e && *e != '\n')) 2052 return -EINVAL; 2053 2054 mddev->chunk_size = n; 2055 return len; 2056 } 2057 static struct md_sysfs_entry md_chunk_size = 2058 __ATTR(chunk_size, 0644, chunk_size_show, chunk_size_store); 2059 2060 static ssize_t 2061 null_show(mddev_t *mddev, char *page) 2062 { 2063 return -EINVAL; 2064 } 2065 2066 static ssize_t 2067 new_dev_store(mddev_t *mddev, const char *buf, size_t len) 2068 { 2069 /* buf must be %d:%d\n? giving major and minor numbers */ 2070 /* The new device is added to the array. 2071 * If the array has a persistent superblock, we read the 2072 * superblock to initialise info and check validity. 2073 * Otherwise, only checking done is that in bind_rdev_to_array, 2074 * which mainly checks size. 2075 */ 2076 char *e; 2077 int major = simple_strtoul(buf, &e, 10); 2078 int minor; 2079 dev_t dev; 2080 mdk_rdev_t *rdev; 2081 int err; 2082 2083 if (!*buf || *e != ':' || !e[1] || e[1] == '\n') 2084 return -EINVAL; 2085 minor = simple_strtoul(e+1, &e, 10); 2086 if (*e && *e != '\n') 2087 return -EINVAL; 2088 dev = MKDEV(major, minor); 2089 if (major != MAJOR(dev) || 2090 minor != MINOR(dev)) 2091 return -EOVERFLOW; 2092 2093 2094 if (mddev->persistent) { 2095 rdev = md_import_device(dev, mddev->major_version, 2096 mddev->minor_version); 2097 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) { 2098 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next, 2099 mdk_rdev_t, same_set); 2100 err = super_types[mddev->major_version] 2101 .load_super(rdev, rdev0, mddev->minor_version); 2102 if (err < 0) 2103 goto out; 2104 } 2105 } else 2106 rdev = md_import_device(dev, -1, -1); 2107 2108 if (IS_ERR(rdev)) 2109 return PTR_ERR(rdev); 2110 err = bind_rdev_to_array(rdev, mddev); 2111 out: 2112 if (err) 2113 export_rdev(rdev); 2114 return err ? err : len; 2115 } 2116 2117 static struct md_sysfs_entry md_new_device = 2118 __ATTR(new_dev, 0200, null_show, new_dev_store); 2119 2120 static ssize_t 2121 size_show(mddev_t *mddev, char *page) 2122 { 2123 return sprintf(page, "%llu\n", (unsigned long long)mddev->size); 2124 } 2125 2126 static int update_size(mddev_t *mddev, unsigned long size); 2127 2128 static ssize_t 2129 size_store(mddev_t *mddev, const char *buf, size_t len) 2130 { 2131 /* If array is inactive, we can reduce the component size, but 2132 * not increase it (except from 0). 2133 * If array is active, we can try an on-line resize 2134 */ 2135 char *e; 2136 int err = 0; 2137 unsigned long long size = simple_strtoull(buf, &e, 10); 2138 if (!*buf || *buf == '\n' || 2139 (*e && *e != '\n')) 2140 return -EINVAL; 2141 2142 if (mddev->pers) { 2143 err = update_size(mddev, size); 2144 md_update_sb(mddev); 2145 } else { 2146 if (mddev->size == 0 || 2147 mddev->size > size) 2148 mddev->size = size; 2149 else 2150 err = -ENOSPC; 2151 } 2152 return err ? err : len; 2153 } 2154 2155 static struct md_sysfs_entry md_size = 2156 __ATTR(component_size, 0644, size_show, size_store); 2157 2158 2159 /* Metdata version. 2160 * This is either 'none' for arrays with externally managed metadata, 2161 * or N.M for internally known formats 2162 */ 2163 static ssize_t 2164 metadata_show(mddev_t *mddev, char *page) 2165 { 2166 if (mddev->persistent) 2167 return sprintf(page, "%d.%d\n", 2168 mddev->major_version, mddev->minor_version); 2169 else 2170 return sprintf(page, "none\n"); 2171 } 2172 2173 static ssize_t 2174 metadata_store(mddev_t *mddev, const char *buf, size_t len) 2175 { 2176 int major, minor; 2177 char *e; 2178 if (!list_empty(&mddev->disks)) 2179 return -EBUSY; 2180 2181 if (cmd_match(buf, "none")) { 2182 mddev->persistent = 0; 2183 mddev->major_version = 0; 2184 mddev->minor_version = 90; 2185 return len; 2186 } 2187 major = simple_strtoul(buf, &e, 10); 2188 if (e==buf || *e != '.') 2189 return -EINVAL; 2190 buf = e+1; 2191 minor = simple_strtoul(buf, &e, 10); 2192 if (e==buf || *e != '\n') 2193 return -EINVAL; 2194 if (major >= sizeof(super_types)/sizeof(super_types[0]) || 2195 super_types[major].name == NULL) 2196 return -ENOENT; 2197 mddev->major_version = major; 2198 mddev->minor_version = minor; 2199 mddev->persistent = 1; 2200 return len; 2201 } 2202 2203 static struct md_sysfs_entry md_metadata = 2204 __ATTR(metadata_version, 0644, metadata_show, metadata_store); 2205 2206 static ssize_t 2207 action_show(mddev_t *mddev, char *page) 2208 { 2209 char *type = "idle"; 2210 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 2211 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) { 2212 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 2213 type = "reshape"; 2214 else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 2215 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 2216 type = "resync"; 2217 else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) 2218 type = "check"; 2219 else 2220 type = "repair"; 2221 } else 2222 type = "recover"; 2223 } 2224 return sprintf(page, "%s\n", type); 2225 } 2226 2227 static ssize_t 2228 action_store(mddev_t *mddev, const char *page, size_t len) 2229 { 2230 if (!mddev->pers || !mddev->pers->sync_request) 2231 return -EINVAL; 2232 2233 if (cmd_match(page, "idle")) { 2234 if (mddev->sync_thread) { 2235 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 2236 md_unregister_thread(mddev->sync_thread); 2237 mddev->sync_thread = NULL; 2238 mddev->recovery = 0; 2239 } 2240 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 2241 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) 2242 return -EBUSY; 2243 else if (cmd_match(page, "resync") || cmd_match(page, "recover")) 2244 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 2245 else if (cmd_match(page, "reshape")) { 2246 int err; 2247 if (mddev->pers->start_reshape == NULL) 2248 return -EINVAL; 2249 err = mddev->pers->start_reshape(mddev); 2250 if (err) 2251 return err; 2252 } else { 2253 if (cmd_match(page, "check")) 2254 set_bit(MD_RECOVERY_CHECK, &mddev->recovery); 2255 else if (cmd_match(page, "repair")) 2256 return -EINVAL; 2257 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 2258 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 2259 } 2260 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 2261 md_wakeup_thread(mddev->thread); 2262 return len; 2263 } 2264 2265 static ssize_t 2266 mismatch_cnt_show(mddev_t *mddev, char *page) 2267 { 2268 return sprintf(page, "%llu\n", 2269 (unsigned long long) mddev->resync_mismatches); 2270 } 2271 2272 static struct md_sysfs_entry 2273 md_scan_mode = __ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store); 2274 2275 2276 static struct md_sysfs_entry 2277 md_mismatches = __ATTR_RO(mismatch_cnt); 2278 2279 static ssize_t 2280 sync_min_show(mddev_t *mddev, char *page) 2281 { 2282 return sprintf(page, "%d (%s)\n", speed_min(mddev), 2283 mddev->sync_speed_min ? "local": "system"); 2284 } 2285 2286 static ssize_t 2287 sync_min_store(mddev_t *mddev, const char *buf, size_t len) 2288 { 2289 int min; 2290 char *e; 2291 if (strncmp(buf, "system", 6)==0) { 2292 mddev->sync_speed_min = 0; 2293 return len; 2294 } 2295 min = simple_strtoul(buf, &e, 10); 2296 if (buf == e || (*e && *e != '\n') || min <= 0) 2297 return -EINVAL; 2298 mddev->sync_speed_min = min; 2299 return len; 2300 } 2301 2302 static struct md_sysfs_entry md_sync_min = 2303 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store); 2304 2305 static ssize_t 2306 sync_max_show(mddev_t *mddev, char *page) 2307 { 2308 return sprintf(page, "%d (%s)\n", speed_max(mddev), 2309 mddev->sync_speed_max ? "local": "system"); 2310 } 2311 2312 static ssize_t 2313 sync_max_store(mddev_t *mddev, const char *buf, size_t len) 2314 { 2315 int max; 2316 char *e; 2317 if (strncmp(buf, "system", 6)==0) { 2318 mddev->sync_speed_max = 0; 2319 return len; 2320 } 2321 max = simple_strtoul(buf, &e, 10); 2322 if (buf == e || (*e && *e != '\n') || max <= 0) 2323 return -EINVAL; 2324 mddev->sync_speed_max = max; 2325 return len; 2326 } 2327 2328 static struct md_sysfs_entry md_sync_max = 2329 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store); 2330 2331 2332 static ssize_t 2333 sync_speed_show(mddev_t *mddev, char *page) 2334 { 2335 unsigned long resync, dt, db; 2336 resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active)); 2337 dt = ((jiffies - mddev->resync_mark) / HZ); 2338 if (!dt) dt++; 2339 db = resync - (mddev->resync_mark_cnt); 2340 return sprintf(page, "%ld\n", db/dt/2); /* K/sec */ 2341 } 2342 2343 static struct md_sysfs_entry 2344 md_sync_speed = __ATTR_RO(sync_speed); 2345 2346 static ssize_t 2347 sync_completed_show(mddev_t *mddev, char *page) 2348 { 2349 unsigned long max_blocks, resync; 2350 2351 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 2352 max_blocks = mddev->resync_max_sectors; 2353 else 2354 max_blocks = mddev->size << 1; 2355 2356 resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active)); 2357 return sprintf(page, "%lu / %lu\n", resync, max_blocks); 2358 } 2359 2360 static struct md_sysfs_entry 2361 md_sync_completed = __ATTR_RO(sync_completed); 2362 2363 static ssize_t 2364 suspend_lo_show(mddev_t *mddev, char *page) 2365 { 2366 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo); 2367 } 2368 2369 static ssize_t 2370 suspend_lo_store(mddev_t *mddev, const char *buf, size_t len) 2371 { 2372 char *e; 2373 unsigned long long new = simple_strtoull(buf, &e, 10); 2374 2375 if (mddev->pers->quiesce == NULL) 2376 return -EINVAL; 2377 if (buf == e || (*e && *e != '\n')) 2378 return -EINVAL; 2379 if (new >= mddev->suspend_hi || 2380 (new > mddev->suspend_lo && new < mddev->suspend_hi)) { 2381 mddev->suspend_lo = new; 2382 mddev->pers->quiesce(mddev, 2); 2383 return len; 2384 } else 2385 return -EINVAL; 2386 } 2387 static struct md_sysfs_entry md_suspend_lo = 2388 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store); 2389 2390 2391 static ssize_t 2392 suspend_hi_show(mddev_t *mddev, char *page) 2393 { 2394 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi); 2395 } 2396 2397 static ssize_t 2398 suspend_hi_store(mddev_t *mddev, const char *buf, size_t len) 2399 { 2400 char *e; 2401 unsigned long long new = simple_strtoull(buf, &e, 10); 2402 2403 if (mddev->pers->quiesce == NULL) 2404 return -EINVAL; 2405 if (buf == e || (*e && *e != '\n')) 2406 return -EINVAL; 2407 if ((new <= mddev->suspend_lo && mddev->suspend_lo >= mddev->suspend_hi) || 2408 (new > mddev->suspend_lo && new > mddev->suspend_hi)) { 2409 mddev->suspend_hi = new; 2410 mddev->pers->quiesce(mddev, 1); 2411 mddev->pers->quiesce(mddev, 0); 2412 return len; 2413 } else 2414 return -EINVAL; 2415 } 2416 static struct md_sysfs_entry md_suspend_hi = 2417 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store); 2418 2419 2420 static struct attribute *md_default_attrs[] = { 2421 &md_level.attr, 2422 &md_raid_disks.attr, 2423 &md_chunk_size.attr, 2424 &md_size.attr, 2425 &md_metadata.attr, 2426 &md_new_device.attr, 2427 NULL, 2428 }; 2429 2430 static struct attribute *md_redundancy_attrs[] = { 2431 &md_scan_mode.attr, 2432 &md_mismatches.attr, 2433 &md_sync_min.attr, 2434 &md_sync_max.attr, 2435 &md_sync_speed.attr, 2436 &md_sync_completed.attr, 2437 &md_suspend_lo.attr, 2438 &md_suspend_hi.attr, 2439 NULL, 2440 }; 2441 static struct attribute_group md_redundancy_group = { 2442 .name = NULL, 2443 .attrs = md_redundancy_attrs, 2444 }; 2445 2446 2447 static ssize_t 2448 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 2449 { 2450 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); 2451 mddev_t *mddev = container_of(kobj, struct mddev_s, kobj); 2452 ssize_t rv; 2453 2454 if (!entry->show) 2455 return -EIO; 2456 rv = mddev_lock(mddev); 2457 if (!rv) { 2458 rv = entry->show(mddev, page); 2459 mddev_unlock(mddev); 2460 } 2461 return rv; 2462 } 2463 2464 static ssize_t 2465 md_attr_store(struct kobject *kobj, struct attribute *attr, 2466 const char *page, size_t length) 2467 { 2468 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); 2469 mddev_t *mddev = container_of(kobj, struct mddev_s, kobj); 2470 ssize_t rv; 2471 2472 if (!entry->store) 2473 return -EIO; 2474 rv = mddev_lock(mddev); 2475 if (!rv) { 2476 rv = entry->store(mddev, page, length); 2477 mddev_unlock(mddev); 2478 } 2479 return rv; 2480 } 2481 2482 static void md_free(struct kobject *ko) 2483 { 2484 mddev_t *mddev = container_of(ko, mddev_t, kobj); 2485 kfree(mddev); 2486 } 2487 2488 static struct sysfs_ops md_sysfs_ops = { 2489 .show = md_attr_show, 2490 .store = md_attr_store, 2491 }; 2492 static struct kobj_type md_ktype = { 2493 .release = md_free, 2494 .sysfs_ops = &md_sysfs_ops, 2495 .default_attrs = md_default_attrs, 2496 }; 2497 2498 int mdp_major = 0; 2499 2500 static struct kobject *md_probe(dev_t dev, int *part, void *data) 2501 { 2502 static DEFINE_MUTEX(disks_mutex); 2503 mddev_t *mddev = mddev_find(dev); 2504 struct gendisk *disk; 2505 int partitioned = (MAJOR(dev) != MD_MAJOR); 2506 int shift = partitioned ? MdpMinorShift : 0; 2507 int unit = MINOR(dev) >> shift; 2508 2509 if (!mddev) 2510 return NULL; 2511 2512 mutex_lock(&disks_mutex); 2513 if (mddev->gendisk) { 2514 mutex_unlock(&disks_mutex); 2515 mddev_put(mddev); 2516 return NULL; 2517 } 2518 disk = alloc_disk(1 << shift); 2519 if (!disk) { 2520 mutex_unlock(&disks_mutex); 2521 mddev_put(mddev); 2522 return NULL; 2523 } 2524 disk->major = MAJOR(dev); 2525 disk->first_minor = unit << shift; 2526 if (partitioned) { 2527 sprintf(disk->disk_name, "md_d%d", unit); 2528 sprintf(disk->devfs_name, "md/d%d", unit); 2529 } else { 2530 sprintf(disk->disk_name, "md%d", unit); 2531 sprintf(disk->devfs_name, "md/%d", unit); 2532 } 2533 disk->fops = &md_fops; 2534 disk->private_data = mddev; 2535 disk->queue = mddev->queue; 2536 add_disk(disk); 2537 mddev->gendisk = disk; 2538 mutex_unlock(&disks_mutex); 2539 mddev->kobj.parent = &disk->kobj; 2540 mddev->kobj.k_name = NULL; 2541 snprintf(mddev->kobj.name, KOBJ_NAME_LEN, "%s", "md"); 2542 mddev->kobj.ktype = &md_ktype; 2543 kobject_register(&mddev->kobj); 2544 return NULL; 2545 } 2546 2547 void md_wakeup_thread(mdk_thread_t *thread); 2548 2549 static void md_safemode_timeout(unsigned long data) 2550 { 2551 mddev_t *mddev = (mddev_t *) data; 2552 2553 mddev->safemode = 1; 2554 md_wakeup_thread(mddev->thread); 2555 } 2556 2557 static int start_dirty_degraded; 2558 2559 static int do_md_run(mddev_t * mddev) 2560 { 2561 int err; 2562 int chunk_size; 2563 struct list_head *tmp; 2564 mdk_rdev_t *rdev; 2565 struct gendisk *disk; 2566 struct mdk_personality *pers; 2567 char b[BDEVNAME_SIZE]; 2568 2569 if (list_empty(&mddev->disks)) 2570 /* cannot run an array with no devices.. */ 2571 return -EINVAL; 2572 2573 if (mddev->pers) 2574 return -EBUSY; 2575 2576 /* 2577 * Analyze all RAID superblock(s) 2578 */ 2579 if (!mddev->raid_disks) 2580 analyze_sbs(mddev); 2581 2582 chunk_size = mddev->chunk_size; 2583 2584 if (chunk_size) { 2585 if (chunk_size > MAX_CHUNK_SIZE) { 2586 printk(KERN_ERR "too big chunk_size: %d > %d\n", 2587 chunk_size, MAX_CHUNK_SIZE); 2588 return -EINVAL; 2589 } 2590 /* 2591 * chunk-size has to be a power of 2 and multiples of PAGE_SIZE 2592 */ 2593 if ( (1 << ffz(~chunk_size)) != chunk_size) { 2594 printk(KERN_ERR "chunk_size of %d not valid\n", chunk_size); 2595 return -EINVAL; 2596 } 2597 if (chunk_size < PAGE_SIZE) { 2598 printk(KERN_ERR "too small chunk_size: %d < %ld\n", 2599 chunk_size, PAGE_SIZE); 2600 return -EINVAL; 2601 } 2602 2603 /* devices must have minimum size of one chunk */ 2604 ITERATE_RDEV(mddev,rdev,tmp) { 2605 if (test_bit(Faulty, &rdev->flags)) 2606 continue; 2607 if (rdev->size < chunk_size / 1024) { 2608 printk(KERN_WARNING 2609 "md: Dev %s smaller than chunk_size:" 2610 " %lluk < %dk\n", 2611 bdevname(rdev->bdev,b), 2612 (unsigned long long)rdev->size, 2613 chunk_size / 1024); 2614 return -EINVAL; 2615 } 2616 } 2617 } 2618 2619 #ifdef CONFIG_KMOD 2620 if (mddev->level != LEVEL_NONE) 2621 request_module("md-level-%d", mddev->level); 2622 else if (mddev->clevel[0]) 2623 request_module("md-%s", mddev->clevel); 2624 #endif 2625 2626 /* 2627 * Drop all container device buffers, from now on 2628 * the only valid external interface is through the md 2629 * device. 2630 * Also find largest hardsector size 2631 */ 2632 ITERATE_RDEV(mddev,rdev,tmp) { 2633 if (test_bit(Faulty, &rdev->flags)) 2634 continue; 2635 sync_blockdev(rdev->bdev); 2636 invalidate_bdev(rdev->bdev, 0); 2637 } 2638 2639 md_probe(mddev->unit, NULL, NULL); 2640 disk = mddev->gendisk; 2641 if (!disk) 2642 return -ENOMEM; 2643 2644 spin_lock(&pers_lock); 2645 pers = find_pers(mddev->level, mddev->clevel); 2646 if (!pers || !try_module_get(pers->owner)) { 2647 spin_unlock(&pers_lock); 2648 if (mddev->level != LEVEL_NONE) 2649 printk(KERN_WARNING "md: personality for level %d is not loaded!\n", 2650 mddev->level); 2651 else 2652 printk(KERN_WARNING "md: personality for level %s is not loaded!\n", 2653 mddev->clevel); 2654 return -EINVAL; 2655 } 2656 mddev->pers = pers; 2657 spin_unlock(&pers_lock); 2658 mddev->level = pers->level; 2659 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); 2660 2661 if (mddev->reshape_position != MaxSector && 2662 pers->start_reshape == NULL) { 2663 /* This personality cannot handle reshaping... */ 2664 mddev->pers = NULL; 2665 module_put(pers->owner); 2666 return -EINVAL; 2667 } 2668 2669 mddev->recovery = 0; 2670 mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */ 2671 mddev->barriers_work = 1; 2672 mddev->ok_start_degraded = start_dirty_degraded; 2673 2674 if (start_readonly) 2675 mddev->ro = 2; /* read-only, but switch on first write */ 2676 2677 err = mddev->pers->run(mddev); 2678 if (!err && mddev->pers->sync_request) { 2679 err = bitmap_create(mddev); 2680 if (err) { 2681 printk(KERN_ERR "%s: failed to create bitmap (%d)\n", 2682 mdname(mddev), err); 2683 mddev->pers->stop(mddev); 2684 } 2685 } 2686 if (err) { 2687 printk(KERN_ERR "md: pers->run() failed ...\n"); 2688 module_put(mddev->pers->owner); 2689 mddev->pers = NULL; 2690 bitmap_destroy(mddev); 2691 return err; 2692 } 2693 if (mddev->pers->sync_request) 2694 sysfs_create_group(&mddev->kobj, &md_redundancy_group); 2695 else if (mddev->ro == 2) /* auto-readonly not meaningful */ 2696 mddev->ro = 0; 2697 2698 atomic_set(&mddev->writes_pending,0); 2699 mddev->safemode = 0; 2700 mddev->safemode_timer.function = md_safemode_timeout; 2701 mddev->safemode_timer.data = (unsigned long) mddev; 2702 mddev->safemode_delay = (20 * HZ)/1000 +1; /* 20 msec delay */ 2703 mddev->in_sync = 1; 2704 2705 ITERATE_RDEV(mddev,rdev,tmp) 2706 if (rdev->raid_disk >= 0) { 2707 char nm[20]; 2708 sprintf(nm, "rd%d", rdev->raid_disk); 2709 sysfs_create_link(&mddev->kobj, &rdev->kobj, nm); 2710 } 2711 2712 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 2713 md_wakeup_thread(mddev->thread); 2714 2715 if (mddev->sb_dirty) 2716 md_update_sb(mddev); 2717 2718 set_capacity(disk, mddev->array_size<<1); 2719 2720 /* If we call blk_queue_make_request here, it will 2721 * re-initialise max_sectors etc which may have been 2722 * refined inside -> run. So just set the bits we need to set. 2723 * Most initialisation happended when we called 2724 * blk_queue_make_request(..., md_fail_request) 2725 * earlier. 2726 */ 2727 mddev->queue->queuedata = mddev; 2728 mddev->queue->make_request_fn = mddev->pers->make_request; 2729 2730 mddev->changed = 1; 2731 md_new_event(mddev); 2732 return 0; 2733 } 2734 2735 static int restart_array(mddev_t *mddev) 2736 { 2737 struct gendisk *disk = mddev->gendisk; 2738 int err; 2739 2740 /* 2741 * Complain if it has no devices 2742 */ 2743 err = -ENXIO; 2744 if (list_empty(&mddev->disks)) 2745 goto out; 2746 2747 if (mddev->pers) { 2748 err = -EBUSY; 2749 if (!mddev->ro) 2750 goto out; 2751 2752 mddev->safemode = 0; 2753 mddev->ro = 0; 2754 set_disk_ro(disk, 0); 2755 2756 printk(KERN_INFO "md: %s switched to read-write mode.\n", 2757 mdname(mddev)); 2758 /* 2759 * Kick recovery or resync if necessary 2760 */ 2761 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 2762 md_wakeup_thread(mddev->thread); 2763 err = 0; 2764 } else { 2765 printk(KERN_ERR "md: %s has no personality assigned.\n", 2766 mdname(mddev)); 2767 err = -EINVAL; 2768 } 2769 2770 out: 2771 return err; 2772 } 2773 2774 static int do_md_stop(mddev_t * mddev, int ro) 2775 { 2776 int err = 0; 2777 struct gendisk *disk = mddev->gendisk; 2778 2779 if (mddev->pers) { 2780 if (atomic_read(&mddev->active)>2) { 2781 printk("md: %s still in use.\n",mdname(mddev)); 2782 return -EBUSY; 2783 } 2784 2785 if (mddev->sync_thread) { 2786 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 2787 md_unregister_thread(mddev->sync_thread); 2788 mddev->sync_thread = NULL; 2789 } 2790 2791 del_timer_sync(&mddev->safemode_timer); 2792 2793 invalidate_partition(disk, 0); 2794 2795 if (ro) { 2796 err = -ENXIO; 2797 if (mddev->ro==1) 2798 goto out; 2799 mddev->ro = 1; 2800 } else { 2801 bitmap_flush(mddev); 2802 md_super_wait(mddev); 2803 if (mddev->ro) 2804 set_disk_ro(disk, 0); 2805 blk_queue_make_request(mddev->queue, md_fail_request); 2806 mddev->pers->stop(mddev); 2807 if (mddev->pers->sync_request) 2808 sysfs_remove_group(&mddev->kobj, &md_redundancy_group); 2809 2810 module_put(mddev->pers->owner); 2811 mddev->pers = NULL; 2812 if (mddev->ro) 2813 mddev->ro = 0; 2814 } 2815 if (!mddev->in_sync) { 2816 /* mark array as shutdown cleanly */ 2817 mddev->in_sync = 1; 2818 md_update_sb(mddev); 2819 } 2820 if (ro) 2821 set_disk_ro(disk, 1); 2822 } 2823 2824 /* 2825 * Free resources if final stop 2826 */ 2827 if (!ro) { 2828 mdk_rdev_t *rdev; 2829 struct list_head *tmp; 2830 struct gendisk *disk; 2831 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev)); 2832 2833 bitmap_destroy(mddev); 2834 if (mddev->bitmap_file) { 2835 atomic_set(&mddev->bitmap_file->f_dentry->d_inode->i_writecount, 1); 2836 fput(mddev->bitmap_file); 2837 mddev->bitmap_file = NULL; 2838 } 2839 mddev->bitmap_offset = 0; 2840 2841 ITERATE_RDEV(mddev,rdev,tmp) 2842 if (rdev->raid_disk >= 0) { 2843 char nm[20]; 2844 sprintf(nm, "rd%d", rdev->raid_disk); 2845 sysfs_remove_link(&mddev->kobj, nm); 2846 } 2847 2848 export_array(mddev); 2849 2850 mddev->array_size = 0; 2851 disk = mddev->gendisk; 2852 if (disk) 2853 set_capacity(disk, 0); 2854 mddev->changed = 1; 2855 } else 2856 printk(KERN_INFO "md: %s switched to read-only mode.\n", 2857 mdname(mddev)); 2858 err = 0; 2859 md_new_event(mddev); 2860 out: 2861 return err; 2862 } 2863 2864 static void autorun_array(mddev_t *mddev) 2865 { 2866 mdk_rdev_t *rdev; 2867 struct list_head *tmp; 2868 int err; 2869 2870 if (list_empty(&mddev->disks)) 2871 return; 2872 2873 printk(KERN_INFO "md: running: "); 2874 2875 ITERATE_RDEV(mddev,rdev,tmp) { 2876 char b[BDEVNAME_SIZE]; 2877 printk("<%s>", bdevname(rdev->bdev,b)); 2878 } 2879 printk("\n"); 2880 2881 err = do_md_run (mddev); 2882 if (err) { 2883 printk(KERN_WARNING "md: do_md_run() returned %d\n", err); 2884 do_md_stop (mddev, 0); 2885 } 2886 } 2887 2888 /* 2889 * lets try to run arrays based on all disks that have arrived 2890 * until now. (those are in pending_raid_disks) 2891 * 2892 * the method: pick the first pending disk, collect all disks with 2893 * the same UUID, remove all from the pending list and put them into 2894 * the 'same_array' list. Then order this list based on superblock 2895 * update time (freshest comes first), kick out 'old' disks and 2896 * compare superblocks. If everything's fine then run it. 2897 * 2898 * If "unit" is allocated, then bump its reference count 2899 */ 2900 static void autorun_devices(int part) 2901 { 2902 struct list_head *tmp; 2903 mdk_rdev_t *rdev0, *rdev; 2904 mddev_t *mddev; 2905 char b[BDEVNAME_SIZE]; 2906 2907 printk(KERN_INFO "md: autorun ...\n"); 2908 while (!list_empty(&pending_raid_disks)) { 2909 dev_t dev; 2910 LIST_HEAD(candidates); 2911 rdev0 = list_entry(pending_raid_disks.next, 2912 mdk_rdev_t, same_set); 2913 2914 printk(KERN_INFO "md: considering %s ...\n", 2915 bdevname(rdev0->bdev,b)); 2916 INIT_LIST_HEAD(&candidates); 2917 ITERATE_RDEV_PENDING(rdev,tmp) 2918 if (super_90_load(rdev, rdev0, 0) >= 0) { 2919 printk(KERN_INFO "md: adding %s ...\n", 2920 bdevname(rdev->bdev,b)); 2921 list_move(&rdev->same_set, &candidates); 2922 } 2923 /* 2924 * now we have a set of devices, with all of them having 2925 * mostly sane superblocks. It's time to allocate the 2926 * mddev. 2927 */ 2928 if (rdev0->preferred_minor < 0 || rdev0->preferred_minor >= MAX_MD_DEVS) { 2929 printk(KERN_INFO "md: unit number in %s is bad: %d\n", 2930 bdevname(rdev0->bdev, b), rdev0->preferred_minor); 2931 break; 2932 } 2933 if (part) 2934 dev = MKDEV(mdp_major, 2935 rdev0->preferred_minor << MdpMinorShift); 2936 else 2937 dev = MKDEV(MD_MAJOR, rdev0->preferred_minor); 2938 2939 md_probe(dev, NULL, NULL); 2940 mddev = mddev_find(dev); 2941 if (!mddev) { 2942 printk(KERN_ERR 2943 "md: cannot allocate memory for md drive.\n"); 2944 break; 2945 } 2946 if (mddev_lock(mddev)) 2947 printk(KERN_WARNING "md: %s locked, cannot run\n", 2948 mdname(mddev)); 2949 else if (mddev->raid_disks || mddev->major_version 2950 || !list_empty(&mddev->disks)) { 2951 printk(KERN_WARNING 2952 "md: %s already running, cannot run %s\n", 2953 mdname(mddev), bdevname(rdev0->bdev,b)); 2954 mddev_unlock(mddev); 2955 } else { 2956 printk(KERN_INFO "md: created %s\n", mdname(mddev)); 2957 ITERATE_RDEV_GENERIC(candidates,rdev,tmp) { 2958 list_del_init(&rdev->same_set); 2959 if (bind_rdev_to_array(rdev, mddev)) 2960 export_rdev(rdev); 2961 } 2962 autorun_array(mddev); 2963 mddev_unlock(mddev); 2964 } 2965 /* on success, candidates will be empty, on error 2966 * it won't... 2967 */ 2968 ITERATE_RDEV_GENERIC(candidates,rdev,tmp) 2969 export_rdev(rdev); 2970 mddev_put(mddev); 2971 } 2972 printk(KERN_INFO "md: ... autorun DONE.\n"); 2973 } 2974 2975 /* 2976 * import RAID devices based on one partition 2977 * if possible, the array gets run as well. 2978 */ 2979 2980 static int autostart_array(dev_t startdev) 2981 { 2982 char b[BDEVNAME_SIZE]; 2983 int err = -EINVAL, i; 2984 mdp_super_t *sb = NULL; 2985 mdk_rdev_t *start_rdev = NULL, *rdev; 2986 2987 start_rdev = md_import_device(startdev, 0, 0); 2988 if (IS_ERR(start_rdev)) 2989 return err; 2990 2991 2992 /* NOTE: this can only work for 0.90.0 superblocks */ 2993 sb = (mdp_super_t*)page_address(start_rdev->sb_page); 2994 if (sb->major_version != 0 || 2995 sb->minor_version != 90 ) { 2996 printk(KERN_WARNING "md: can only autostart 0.90.0 arrays\n"); 2997 export_rdev(start_rdev); 2998 return err; 2999 } 3000 3001 if (test_bit(Faulty, &start_rdev->flags)) { 3002 printk(KERN_WARNING 3003 "md: can not autostart based on faulty %s!\n", 3004 bdevname(start_rdev->bdev,b)); 3005 export_rdev(start_rdev); 3006 return err; 3007 } 3008 list_add(&start_rdev->same_set, &pending_raid_disks); 3009 3010 for (i = 0; i < MD_SB_DISKS; i++) { 3011 mdp_disk_t *desc = sb->disks + i; 3012 dev_t dev = MKDEV(desc->major, desc->minor); 3013 3014 if (!dev) 3015 continue; 3016 if (dev == startdev) 3017 continue; 3018 if (MAJOR(dev) != desc->major || MINOR(dev) != desc->minor) 3019 continue; 3020 rdev = md_import_device(dev, 0, 0); 3021 if (IS_ERR(rdev)) 3022 continue; 3023 3024 list_add(&rdev->same_set, &pending_raid_disks); 3025 } 3026 3027 /* 3028 * possibly return codes 3029 */ 3030 autorun_devices(0); 3031 return 0; 3032 3033 } 3034 3035 3036 static int get_version(void __user * arg) 3037 { 3038 mdu_version_t ver; 3039 3040 ver.major = MD_MAJOR_VERSION; 3041 ver.minor = MD_MINOR_VERSION; 3042 ver.patchlevel = MD_PATCHLEVEL_VERSION; 3043 3044 if (copy_to_user(arg, &ver, sizeof(ver))) 3045 return -EFAULT; 3046 3047 return 0; 3048 } 3049 3050 static int get_array_info(mddev_t * mddev, void __user * arg) 3051 { 3052 mdu_array_info_t info; 3053 int nr,working,active,failed,spare; 3054 mdk_rdev_t *rdev; 3055 struct list_head *tmp; 3056 3057 nr=working=active=failed=spare=0; 3058 ITERATE_RDEV(mddev,rdev,tmp) { 3059 nr++; 3060 if (test_bit(Faulty, &rdev->flags)) 3061 failed++; 3062 else { 3063 working++; 3064 if (test_bit(In_sync, &rdev->flags)) 3065 active++; 3066 else 3067 spare++; 3068 } 3069 } 3070 3071 info.major_version = mddev->major_version; 3072 info.minor_version = mddev->minor_version; 3073 info.patch_version = MD_PATCHLEVEL_VERSION; 3074 info.ctime = mddev->ctime; 3075 info.level = mddev->level; 3076 info.size = mddev->size; 3077 if (info.size != mddev->size) /* overflow */ 3078 info.size = -1; 3079 info.nr_disks = nr; 3080 info.raid_disks = mddev->raid_disks; 3081 info.md_minor = mddev->md_minor; 3082 info.not_persistent= !mddev->persistent; 3083 3084 info.utime = mddev->utime; 3085 info.state = 0; 3086 if (mddev->in_sync) 3087 info.state = (1<<MD_SB_CLEAN); 3088 if (mddev->bitmap && mddev->bitmap_offset) 3089 info.state = (1<<MD_SB_BITMAP_PRESENT); 3090 info.active_disks = active; 3091 info.working_disks = working; 3092 info.failed_disks = failed; 3093 info.spare_disks = spare; 3094 3095 info.layout = mddev->layout; 3096 info.chunk_size = mddev->chunk_size; 3097 3098 if (copy_to_user(arg, &info, sizeof(info))) 3099 return -EFAULT; 3100 3101 return 0; 3102 } 3103 3104 static int get_bitmap_file(mddev_t * mddev, void __user * arg) 3105 { 3106 mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */ 3107 char *ptr, *buf = NULL; 3108 int err = -ENOMEM; 3109 3110 file = kmalloc(sizeof(*file), GFP_KERNEL); 3111 if (!file) 3112 goto out; 3113 3114 /* bitmap disabled, zero the first byte and copy out */ 3115 if (!mddev->bitmap || !mddev->bitmap->file) { 3116 file->pathname[0] = '\0'; 3117 goto copy_out; 3118 } 3119 3120 buf = kmalloc(sizeof(file->pathname), GFP_KERNEL); 3121 if (!buf) 3122 goto out; 3123 3124 ptr = file_path(mddev->bitmap->file, buf, sizeof(file->pathname)); 3125 if (!ptr) 3126 goto out; 3127 3128 strcpy(file->pathname, ptr); 3129 3130 copy_out: 3131 err = 0; 3132 if (copy_to_user(arg, file, sizeof(*file))) 3133 err = -EFAULT; 3134 out: 3135 kfree(buf); 3136 kfree(file); 3137 return err; 3138 } 3139 3140 static int get_disk_info(mddev_t * mddev, void __user * arg) 3141 { 3142 mdu_disk_info_t info; 3143 unsigned int nr; 3144 mdk_rdev_t *rdev; 3145 3146 if (copy_from_user(&info, arg, sizeof(info))) 3147 return -EFAULT; 3148 3149 nr = info.number; 3150 3151 rdev = find_rdev_nr(mddev, nr); 3152 if (rdev) { 3153 info.major = MAJOR(rdev->bdev->bd_dev); 3154 info.minor = MINOR(rdev->bdev->bd_dev); 3155 info.raid_disk = rdev->raid_disk; 3156 info.state = 0; 3157 if (test_bit(Faulty, &rdev->flags)) 3158 info.state |= (1<<MD_DISK_FAULTY); 3159 else if (test_bit(In_sync, &rdev->flags)) { 3160 info.state |= (1<<MD_DISK_ACTIVE); 3161 info.state |= (1<<MD_DISK_SYNC); 3162 } 3163 if (test_bit(WriteMostly, &rdev->flags)) 3164 info.state |= (1<<MD_DISK_WRITEMOSTLY); 3165 } else { 3166 info.major = info.minor = 0; 3167 info.raid_disk = -1; 3168 info.state = (1<<MD_DISK_REMOVED); 3169 } 3170 3171 if (copy_to_user(arg, &info, sizeof(info))) 3172 return -EFAULT; 3173 3174 return 0; 3175 } 3176 3177 static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info) 3178 { 3179 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 3180 mdk_rdev_t *rdev; 3181 dev_t dev = MKDEV(info->major,info->minor); 3182 3183 if (info->major != MAJOR(dev) || info->minor != MINOR(dev)) 3184 return -EOVERFLOW; 3185 3186 if (!mddev->raid_disks) { 3187 int err; 3188 /* expecting a device which has a superblock */ 3189 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version); 3190 if (IS_ERR(rdev)) { 3191 printk(KERN_WARNING 3192 "md: md_import_device returned %ld\n", 3193 PTR_ERR(rdev)); 3194 return PTR_ERR(rdev); 3195 } 3196 if (!list_empty(&mddev->disks)) { 3197 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next, 3198 mdk_rdev_t, same_set); 3199 int err = super_types[mddev->major_version] 3200 .load_super(rdev, rdev0, mddev->minor_version); 3201 if (err < 0) { 3202 printk(KERN_WARNING 3203 "md: %s has different UUID to %s\n", 3204 bdevname(rdev->bdev,b), 3205 bdevname(rdev0->bdev,b2)); 3206 export_rdev(rdev); 3207 return -EINVAL; 3208 } 3209 } 3210 err = bind_rdev_to_array(rdev, mddev); 3211 if (err) 3212 export_rdev(rdev); 3213 return err; 3214 } 3215 3216 /* 3217 * add_new_disk can be used once the array is assembled 3218 * to add "hot spares". They must already have a superblock 3219 * written 3220 */ 3221 if (mddev->pers) { 3222 int err; 3223 if (!mddev->pers->hot_add_disk) { 3224 printk(KERN_WARNING 3225 "%s: personality does not support diskops!\n", 3226 mdname(mddev)); 3227 return -EINVAL; 3228 } 3229 if (mddev->persistent) 3230 rdev = md_import_device(dev, mddev->major_version, 3231 mddev->minor_version); 3232 else 3233 rdev = md_import_device(dev, -1, -1); 3234 if (IS_ERR(rdev)) { 3235 printk(KERN_WARNING 3236 "md: md_import_device returned %ld\n", 3237 PTR_ERR(rdev)); 3238 return PTR_ERR(rdev); 3239 } 3240 /* set save_raid_disk if appropriate */ 3241 if (!mddev->persistent) { 3242 if (info->state & (1<<MD_DISK_SYNC) && 3243 info->raid_disk < mddev->raid_disks) 3244 rdev->raid_disk = info->raid_disk; 3245 else 3246 rdev->raid_disk = -1; 3247 } else 3248 super_types[mddev->major_version]. 3249 validate_super(mddev, rdev); 3250 rdev->saved_raid_disk = rdev->raid_disk; 3251 3252 clear_bit(In_sync, &rdev->flags); /* just to be sure */ 3253 if (info->state & (1<<MD_DISK_WRITEMOSTLY)) 3254 set_bit(WriteMostly, &rdev->flags); 3255 3256 rdev->raid_disk = -1; 3257 err = bind_rdev_to_array(rdev, mddev); 3258 if (err) 3259 export_rdev(rdev); 3260 3261 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3262 md_wakeup_thread(mddev->thread); 3263 return err; 3264 } 3265 3266 /* otherwise, add_new_disk is only allowed 3267 * for major_version==0 superblocks 3268 */ 3269 if (mddev->major_version != 0) { 3270 printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n", 3271 mdname(mddev)); 3272 return -EINVAL; 3273 } 3274 3275 if (!(info->state & (1<<MD_DISK_FAULTY))) { 3276 int err; 3277 rdev = md_import_device (dev, -1, 0); 3278 if (IS_ERR(rdev)) { 3279 printk(KERN_WARNING 3280 "md: error, md_import_device() returned %ld\n", 3281 PTR_ERR(rdev)); 3282 return PTR_ERR(rdev); 3283 } 3284 rdev->desc_nr = info->number; 3285 if (info->raid_disk < mddev->raid_disks) 3286 rdev->raid_disk = info->raid_disk; 3287 else 3288 rdev->raid_disk = -1; 3289 3290 rdev->flags = 0; 3291 3292 if (rdev->raid_disk < mddev->raid_disks) 3293 if (info->state & (1<<MD_DISK_SYNC)) 3294 set_bit(In_sync, &rdev->flags); 3295 3296 if (info->state & (1<<MD_DISK_WRITEMOSTLY)) 3297 set_bit(WriteMostly, &rdev->flags); 3298 3299 if (!mddev->persistent) { 3300 printk(KERN_INFO "md: nonpersistent superblock ...\n"); 3301 rdev->sb_offset = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; 3302 } else 3303 rdev->sb_offset = calc_dev_sboffset(rdev->bdev); 3304 rdev->size = calc_dev_size(rdev, mddev->chunk_size); 3305 3306 err = bind_rdev_to_array(rdev, mddev); 3307 if (err) { 3308 export_rdev(rdev); 3309 return err; 3310 } 3311 } 3312 3313 return 0; 3314 } 3315 3316 static int hot_remove_disk(mddev_t * mddev, dev_t dev) 3317 { 3318 char b[BDEVNAME_SIZE]; 3319 mdk_rdev_t *rdev; 3320 3321 if (!mddev->pers) 3322 return -ENODEV; 3323 3324 rdev = find_rdev(mddev, dev); 3325 if (!rdev) 3326 return -ENXIO; 3327 3328 if (rdev->raid_disk >= 0) 3329 goto busy; 3330 3331 kick_rdev_from_array(rdev); 3332 md_update_sb(mddev); 3333 md_new_event(mddev); 3334 3335 return 0; 3336 busy: 3337 printk(KERN_WARNING "md: cannot remove active disk %s from %s ... \n", 3338 bdevname(rdev->bdev,b), mdname(mddev)); 3339 return -EBUSY; 3340 } 3341 3342 static int hot_add_disk(mddev_t * mddev, dev_t dev) 3343 { 3344 char b[BDEVNAME_SIZE]; 3345 int err; 3346 unsigned int size; 3347 mdk_rdev_t *rdev; 3348 3349 if (!mddev->pers) 3350 return -ENODEV; 3351 3352 if (mddev->major_version != 0) { 3353 printk(KERN_WARNING "%s: HOT_ADD may only be used with" 3354 " version-0 superblocks.\n", 3355 mdname(mddev)); 3356 return -EINVAL; 3357 } 3358 if (!mddev->pers->hot_add_disk) { 3359 printk(KERN_WARNING 3360 "%s: personality does not support diskops!\n", 3361 mdname(mddev)); 3362 return -EINVAL; 3363 } 3364 3365 rdev = md_import_device (dev, -1, 0); 3366 if (IS_ERR(rdev)) { 3367 printk(KERN_WARNING 3368 "md: error, md_import_device() returned %ld\n", 3369 PTR_ERR(rdev)); 3370 return -EINVAL; 3371 } 3372 3373 if (mddev->persistent) 3374 rdev->sb_offset = calc_dev_sboffset(rdev->bdev); 3375 else 3376 rdev->sb_offset = 3377 rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; 3378 3379 size = calc_dev_size(rdev, mddev->chunk_size); 3380 rdev->size = size; 3381 3382 if (test_bit(Faulty, &rdev->flags)) { 3383 printk(KERN_WARNING 3384 "md: can not hot-add faulty %s disk to %s!\n", 3385 bdevname(rdev->bdev,b), mdname(mddev)); 3386 err = -EINVAL; 3387 goto abort_export; 3388 } 3389 clear_bit(In_sync, &rdev->flags); 3390 rdev->desc_nr = -1; 3391 err = bind_rdev_to_array(rdev, mddev); 3392 if (err) 3393 goto abort_export; 3394 3395 /* 3396 * The rest should better be atomic, we can have disk failures 3397 * noticed in interrupt contexts ... 3398 */ 3399 3400 if (rdev->desc_nr == mddev->max_disks) { 3401 printk(KERN_WARNING "%s: can not hot-add to full array!\n", 3402 mdname(mddev)); 3403 err = -EBUSY; 3404 goto abort_unbind_export; 3405 } 3406 3407 rdev->raid_disk = -1; 3408 3409 md_update_sb(mddev); 3410 3411 /* 3412 * Kick recovery, maybe this spare has to be added to the 3413 * array immediately. 3414 */ 3415 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3416 md_wakeup_thread(mddev->thread); 3417 md_new_event(mddev); 3418 return 0; 3419 3420 abort_unbind_export: 3421 unbind_rdev_from_array(rdev); 3422 3423 abort_export: 3424 export_rdev(rdev); 3425 return err; 3426 } 3427 3428 /* similar to deny_write_access, but accounts for our holding a reference 3429 * to the file ourselves */ 3430 static int deny_bitmap_write_access(struct file * file) 3431 { 3432 struct inode *inode = file->f_mapping->host; 3433 3434 spin_lock(&inode->i_lock); 3435 if (atomic_read(&inode->i_writecount) > 1) { 3436 spin_unlock(&inode->i_lock); 3437 return -ETXTBSY; 3438 } 3439 atomic_set(&inode->i_writecount, -1); 3440 spin_unlock(&inode->i_lock); 3441 3442 return 0; 3443 } 3444 3445 static int set_bitmap_file(mddev_t *mddev, int fd) 3446 { 3447 int err; 3448 3449 if (mddev->pers) { 3450 if (!mddev->pers->quiesce) 3451 return -EBUSY; 3452 if (mddev->recovery || mddev->sync_thread) 3453 return -EBUSY; 3454 /* we should be able to change the bitmap.. */ 3455 } 3456 3457 3458 if (fd >= 0) { 3459 if (mddev->bitmap) 3460 return -EEXIST; /* cannot add when bitmap is present */ 3461 mddev->bitmap_file = fget(fd); 3462 3463 if (mddev->bitmap_file == NULL) { 3464 printk(KERN_ERR "%s: error: failed to get bitmap file\n", 3465 mdname(mddev)); 3466 return -EBADF; 3467 } 3468 3469 err = deny_bitmap_write_access(mddev->bitmap_file); 3470 if (err) { 3471 printk(KERN_ERR "%s: error: bitmap file is already in use\n", 3472 mdname(mddev)); 3473 fput(mddev->bitmap_file); 3474 mddev->bitmap_file = NULL; 3475 return err; 3476 } 3477 mddev->bitmap_offset = 0; /* file overrides offset */ 3478 } else if (mddev->bitmap == NULL) 3479 return -ENOENT; /* cannot remove what isn't there */ 3480 err = 0; 3481 if (mddev->pers) { 3482 mddev->pers->quiesce(mddev, 1); 3483 if (fd >= 0) 3484 err = bitmap_create(mddev); 3485 if (fd < 0 || err) 3486 bitmap_destroy(mddev); 3487 mddev->pers->quiesce(mddev, 0); 3488 } else if (fd < 0) { 3489 if (mddev->bitmap_file) 3490 fput(mddev->bitmap_file); 3491 mddev->bitmap_file = NULL; 3492 } 3493 3494 return err; 3495 } 3496 3497 /* 3498 * set_array_info is used two different ways 3499 * The original usage is when creating a new array. 3500 * In this usage, raid_disks is > 0 and it together with 3501 * level, size, not_persistent,layout,chunksize determine the 3502 * shape of the array. 3503 * This will always create an array with a type-0.90.0 superblock. 3504 * The newer usage is when assembling an array. 3505 * In this case raid_disks will be 0, and the major_version field is 3506 * use to determine which style super-blocks are to be found on the devices. 3507 * The minor and patch _version numbers are also kept incase the 3508 * super_block handler wishes to interpret them. 3509 */ 3510 static int set_array_info(mddev_t * mddev, mdu_array_info_t *info) 3511 { 3512 3513 if (info->raid_disks == 0) { 3514 /* just setting version number for superblock loading */ 3515 if (info->major_version < 0 || 3516 info->major_version >= sizeof(super_types)/sizeof(super_types[0]) || 3517 super_types[info->major_version].name == NULL) { 3518 /* maybe try to auto-load a module? */ 3519 printk(KERN_INFO 3520 "md: superblock version %d not known\n", 3521 info->major_version); 3522 return -EINVAL; 3523 } 3524 mddev->major_version = info->major_version; 3525 mddev->minor_version = info->minor_version; 3526 mddev->patch_version = info->patch_version; 3527 return 0; 3528 } 3529 mddev->major_version = MD_MAJOR_VERSION; 3530 mddev->minor_version = MD_MINOR_VERSION; 3531 mddev->patch_version = MD_PATCHLEVEL_VERSION; 3532 mddev->ctime = get_seconds(); 3533 3534 mddev->level = info->level; 3535 mddev->clevel[0] = 0; 3536 mddev->size = info->size; 3537 mddev->raid_disks = info->raid_disks; 3538 /* don't set md_minor, it is determined by which /dev/md* was 3539 * openned 3540 */ 3541 if (info->state & (1<<MD_SB_CLEAN)) 3542 mddev->recovery_cp = MaxSector; 3543 else 3544 mddev->recovery_cp = 0; 3545 mddev->persistent = ! info->not_persistent; 3546 3547 mddev->layout = info->layout; 3548 mddev->chunk_size = info->chunk_size; 3549 3550 mddev->max_disks = MD_SB_DISKS; 3551 3552 mddev->sb_dirty = 1; 3553 3554 mddev->default_bitmap_offset = MD_SB_BYTES >> 9; 3555 mddev->bitmap_offset = 0; 3556 3557 mddev->reshape_position = MaxSector; 3558 3559 /* 3560 * Generate a 128 bit UUID 3561 */ 3562 get_random_bytes(mddev->uuid, 16); 3563 3564 mddev->new_level = mddev->level; 3565 mddev->new_chunk = mddev->chunk_size; 3566 mddev->new_layout = mddev->layout; 3567 mddev->delta_disks = 0; 3568 3569 return 0; 3570 } 3571 3572 static int update_size(mddev_t *mddev, unsigned long size) 3573 { 3574 mdk_rdev_t * rdev; 3575 int rv; 3576 struct list_head *tmp; 3577 int fit = (size == 0); 3578 3579 if (mddev->pers->resize == NULL) 3580 return -EINVAL; 3581 /* The "size" is the amount of each device that is used. 3582 * This can only make sense for arrays with redundancy. 3583 * linear and raid0 always use whatever space is available 3584 * We can only consider changing the size if no resync 3585 * or reconstruction is happening, and if the new size 3586 * is acceptable. It must fit before the sb_offset or, 3587 * if that is <data_offset, it must fit before the 3588 * size of each device. 3589 * If size is zero, we find the largest size that fits. 3590 */ 3591 if (mddev->sync_thread) 3592 return -EBUSY; 3593 ITERATE_RDEV(mddev,rdev,tmp) { 3594 sector_t avail; 3595 if (rdev->sb_offset > rdev->data_offset) 3596 avail = (rdev->sb_offset*2) - rdev->data_offset; 3597 else 3598 avail = get_capacity(rdev->bdev->bd_disk) 3599 - rdev->data_offset; 3600 if (fit && (size == 0 || size > avail/2)) 3601 size = avail/2; 3602 if (avail < ((sector_t)size << 1)) 3603 return -ENOSPC; 3604 } 3605 rv = mddev->pers->resize(mddev, (sector_t)size *2); 3606 if (!rv) { 3607 struct block_device *bdev; 3608 3609 bdev = bdget_disk(mddev->gendisk, 0); 3610 if (bdev) { 3611 mutex_lock(&bdev->bd_inode->i_mutex); 3612 i_size_write(bdev->bd_inode, (loff_t)mddev->array_size << 10); 3613 mutex_unlock(&bdev->bd_inode->i_mutex); 3614 bdput(bdev); 3615 } 3616 } 3617 return rv; 3618 } 3619 3620 static int update_raid_disks(mddev_t *mddev, int raid_disks) 3621 { 3622 int rv; 3623 /* change the number of raid disks */ 3624 if (mddev->pers->check_reshape == NULL) 3625 return -EINVAL; 3626 if (raid_disks <= 0 || 3627 raid_disks >= mddev->max_disks) 3628 return -EINVAL; 3629 if (mddev->sync_thread || mddev->reshape_position != MaxSector) 3630 return -EBUSY; 3631 mddev->delta_disks = raid_disks - mddev->raid_disks; 3632 3633 rv = mddev->pers->check_reshape(mddev); 3634 return rv; 3635 } 3636 3637 3638 /* 3639 * update_array_info is used to change the configuration of an 3640 * on-line array. 3641 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size 3642 * fields in the info are checked against the array. 3643 * Any differences that cannot be handled will cause an error. 3644 * Normally, only one change can be managed at a time. 3645 */ 3646 static int update_array_info(mddev_t *mddev, mdu_array_info_t *info) 3647 { 3648 int rv = 0; 3649 int cnt = 0; 3650 int state = 0; 3651 3652 /* calculate expected state,ignoring low bits */ 3653 if (mddev->bitmap && mddev->bitmap_offset) 3654 state |= (1 << MD_SB_BITMAP_PRESENT); 3655 3656 if (mddev->major_version != info->major_version || 3657 mddev->minor_version != info->minor_version || 3658 /* mddev->patch_version != info->patch_version || */ 3659 mddev->ctime != info->ctime || 3660 mddev->level != info->level || 3661 /* mddev->layout != info->layout || */ 3662 !mddev->persistent != info->not_persistent|| 3663 mddev->chunk_size != info->chunk_size || 3664 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */ 3665 ((state^info->state) & 0xfffffe00) 3666 ) 3667 return -EINVAL; 3668 /* Check there is only one change */ 3669 if (info->size >= 0 && mddev->size != info->size) cnt++; 3670 if (mddev->raid_disks != info->raid_disks) cnt++; 3671 if (mddev->layout != info->layout) cnt++; 3672 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) cnt++; 3673 if (cnt == 0) return 0; 3674 if (cnt > 1) return -EINVAL; 3675 3676 if (mddev->layout != info->layout) { 3677 /* Change layout 3678 * we don't need to do anything at the md level, the 3679 * personality will take care of it all. 3680 */ 3681 if (mddev->pers->reconfig == NULL) 3682 return -EINVAL; 3683 else 3684 return mddev->pers->reconfig(mddev, info->layout, -1); 3685 } 3686 if (info->size >= 0 && mddev->size != info->size) 3687 rv = update_size(mddev, info->size); 3688 3689 if (mddev->raid_disks != info->raid_disks) 3690 rv = update_raid_disks(mddev, info->raid_disks); 3691 3692 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) { 3693 if (mddev->pers->quiesce == NULL) 3694 return -EINVAL; 3695 if (mddev->recovery || mddev->sync_thread) 3696 return -EBUSY; 3697 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) { 3698 /* add the bitmap */ 3699 if (mddev->bitmap) 3700 return -EEXIST; 3701 if (mddev->default_bitmap_offset == 0) 3702 return -EINVAL; 3703 mddev->bitmap_offset = mddev->default_bitmap_offset; 3704 mddev->pers->quiesce(mddev, 1); 3705 rv = bitmap_create(mddev); 3706 if (rv) 3707 bitmap_destroy(mddev); 3708 mddev->pers->quiesce(mddev, 0); 3709 } else { 3710 /* remove the bitmap */ 3711 if (!mddev->bitmap) 3712 return -ENOENT; 3713 if (mddev->bitmap->file) 3714 return -EINVAL; 3715 mddev->pers->quiesce(mddev, 1); 3716 bitmap_destroy(mddev); 3717 mddev->pers->quiesce(mddev, 0); 3718 mddev->bitmap_offset = 0; 3719 } 3720 } 3721 md_update_sb(mddev); 3722 return rv; 3723 } 3724 3725 static int set_disk_faulty(mddev_t *mddev, dev_t dev) 3726 { 3727 mdk_rdev_t *rdev; 3728 3729 if (mddev->pers == NULL) 3730 return -ENODEV; 3731 3732 rdev = find_rdev(mddev, dev); 3733 if (!rdev) 3734 return -ENODEV; 3735 3736 md_error(mddev, rdev); 3737 return 0; 3738 } 3739 3740 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo) 3741 { 3742 mddev_t *mddev = bdev->bd_disk->private_data; 3743 3744 geo->heads = 2; 3745 geo->sectors = 4; 3746 geo->cylinders = get_capacity(mddev->gendisk) / 8; 3747 return 0; 3748 } 3749 3750 static int md_ioctl(struct inode *inode, struct file *file, 3751 unsigned int cmd, unsigned long arg) 3752 { 3753 int err = 0; 3754 void __user *argp = (void __user *)arg; 3755 mddev_t *mddev = NULL; 3756 3757 if (!capable(CAP_SYS_ADMIN)) 3758 return -EACCES; 3759 3760 /* 3761 * Commands dealing with the RAID driver but not any 3762 * particular array: 3763 */ 3764 switch (cmd) 3765 { 3766 case RAID_VERSION: 3767 err = get_version(argp); 3768 goto done; 3769 3770 case PRINT_RAID_DEBUG: 3771 err = 0; 3772 md_print_devices(); 3773 goto done; 3774 3775 #ifndef MODULE 3776 case RAID_AUTORUN: 3777 err = 0; 3778 autostart_arrays(arg); 3779 goto done; 3780 #endif 3781 default:; 3782 } 3783 3784 /* 3785 * Commands creating/starting a new array: 3786 */ 3787 3788 mddev = inode->i_bdev->bd_disk->private_data; 3789 3790 if (!mddev) { 3791 BUG(); 3792 goto abort; 3793 } 3794 3795 3796 if (cmd == START_ARRAY) { 3797 /* START_ARRAY doesn't need to lock the array as autostart_array 3798 * does the locking, and it could even be a different array 3799 */ 3800 static int cnt = 3; 3801 if (cnt > 0 ) { 3802 printk(KERN_WARNING 3803 "md: %s(pid %d) used deprecated START_ARRAY ioctl. " 3804 "This will not be supported beyond July 2006\n", 3805 current->comm, current->pid); 3806 cnt--; 3807 } 3808 err = autostart_array(new_decode_dev(arg)); 3809 if (err) { 3810 printk(KERN_WARNING "md: autostart failed!\n"); 3811 goto abort; 3812 } 3813 goto done; 3814 } 3815 3816 err = mddev_lock(mddev); 3817 if (err) { 3818 printk(KERN_INFO 3819 "md: ioctl lock interrupted, reason %d, cmd %d\n", 3820 err, cmd); 3821 goto abort; 3822 } 3823 3824 switch (cmd) 3825 { 3826 case SET_ARRAY_INFO: 3827 { 3828 mdu_array_info_t info; 3829 if (!arg) 3830 memset(&info, 0, sizeof(info)); 3831 else if (copy_from_user(&info, argp, sizeof(info))) { 3832 err = -EFAULT; 3833 goto abort_unlock; 3834 } 3835 if (mddev->pers) { 3836 err = update_array_info(mddev, &info); 3837 if (err) { 3838 printk(KERN_WARNING "md: couldn't update" 3839 " array info. %d\n", err); 3840 goto abort_unlock; 3841 } 3842 goto done_unlock; 3843 } 3844 if (!list_empty(&mddev->disks)) { 3845 printk(KERN_WARNING 3846 "md: array %s already has disks!\n", 3847 mdname(mddev)); 3848 err = -EBUSY; 3849 goto abort_unlock; 3850 } 3851 if (mddev->raid_disks) { 3852 printk(KERN_WARNING 3853 "md: array %s already initialised!\n", 3854 mdname(mddev)); 3855 err = -EBUSY; 3856 goto abort_unlock; 3857 } 3858 err = set_array_info(mddev, &info); 3859 if (err) { 3860 printk(KERN_WARNING "md: couldn't set" 3861 " array info. %d\n", err); 3862 goto abort_unlock; 3863 } 3864 } 3865 goto done_unlock; 3866 3867 default:; 3868 } 3869 3870 /* 3871 * Commands querying/configuring an existing array: 3872 */ 3873 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY, 3874 * RUN_ARRAY, and SET_BITMAP_FILE are allowed */ 3875 if (!mddev->raid_disks && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY 3876 && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE) { 3877 err = -ENODEV; 3878 goto abort_unlock; 3879 } 3880 3881 /* 3882 * Commands even a read-only array can execute: 3883 */ 3884 switch (cmd) 3885 { 3886 case GET_ARRAY_INFO: 3887 err = get_array_info(mddev, argp); 3888 goto done_unlock; 3889 3890 case GET_BITMAP_FILE: 3891 err = get_bitmap_file(mddev, argp); 3892 goto done_unlock; 3893 3894 case GET_DISK_INFO: 3895 err = get_disk_info(mddev, argp); 3896 goto done_unlock; 3897 3898 case RESTART_ARRAY_RW: 3899 err = restart_array(mddev); 3900 goto done_unlock; 3901 3902 case STOP_ARRAY: 3903 err = do_md_stop (mddev, 0); 3904 goto done_unlock; 3905 3906 case STOP_ARRAY_RO: 3907 err = do_md_stop (mddev, 1); 3908 goto done_unlock; 3909 3910 /* 3911 * We have a problem here : there is no easy way to give a CHS 3912 * virtual geometry. We currently pretend that we have a 2 heads 3913 * 4 sectors (with a BIG number of cylinders...). This drives 3914 * dosfs just mad... ;-) 3915 */ 3916 } 3917 3918 /* 3919 * The remaining ioctls are changing the state of the 3920 * superblock, so we do not allow them on read-only arrays. 3921 * However non-MD ioctls (e.g. get-size) will still come through 3922 * here and hit the 'default' below, so only disallow 3923 * 'md' ioctls, and switch to rw mode if started auto-readonly. 3924 */ 3925 if (_IOC_TYPE(cmd) == MD_MAJOR && 3926 mddev->ro && mddev->pers) { 3927 if (mddev->ro == 2) { 3928 mddev->ro = 0; 3929 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3930 md_wakeup_thread(mddev->thread); 3931 3932 } else { 3933 err = -EROFS; 3934 goto abort_unlock; 3935 } 3936 } 3937 3938 switch (cmd) 3939 { 3940 case ADD_NEW_DISK: 3941 { 3942 mdu_disk_info_t info; 3943 if (copy_from_user(&info, argp, sizeof(info))) 3944 err = -EFAULT; 3945 else 3946 err = add_new_disk(mddev, &info); 3947 goto done_unlock; 3948 } 3949 3950 case HOT_REMOVE_DISK: 3951 err = hot_remove_disk(mddev, new_decode_dev(arg)); 3952 goto done_unlock; 3953 3954 case HOT_ADD_DISK: 3955 err = hot_add_disk(mddev, new_decode_dev(arg)); 3956 goto done_unlock; 3957 3958 case SET_DISK_FAULTY: 3959 err = set_disk_faulty(mddev, new_decode_dev(arg)); 3960 goto done_unlock; 3961 3962 case RUN_ARRAY: 3963 err = do_md_run (mddev); 3964 goto done_unlock; 3965 3966 case SET_BITMAP_FILE: 3967 err = set_bitmap_file(mddev, (int)arg); 3968 goto done_unlock; 3969 3970 default: 3971 if (_IOC_TYPE(cmd) == MD_MAJOR) 3972 printk(KERN_WARNING "md: %s(pid %d) used" 3973 " obsolete MD ioctl, upgrade your" 3974 " software to use new ictls.\n", 3975 current->comm, current->pid); 3976 err = -EINVAL; 3977 goto abort_unlock; 3978 } 3979 3980 done_unlock: 3981 abort_unlock: 3982 mddev_unlock(mddev); 3983 3984 return err; 3985 done: 3986 if (err) 3987 MD_BUG(); 3988 abort: 3989 return err; 3990 } 3991 3992 static int md_open(struct inode *inode, struct file *file) 3993 { 3994 /* 3995 * Succeed if we can lock the mddev, which confirms that 3996 * it isn't being stopped right now. 3997 */ 3998 mddev_t *mddev = inode->i_bdev->bd_disk->private_data; 3999 int err; 4000 4001 if ((err = mddev_lock(mddev))) 4002 goto out; 4003 4004 err = 0; 4005 mddev_get(mddev); 4006 mddev_unlock(mddev); 4007 4008 check_disk_change(inode->i_bdev); 4009 out: 4010 return err; 4011 } 4012 4013 static int md_release(struct inode *inode, struct file * file) 4014 { 4015 mddev_t *mddev = inode->i_bdev->bd_disk->private_data; 4016 4017 if (!mddev) 4018 BUG(); 4019 mddev_put(mddev); 4020 4021 return 0; 4022 } 4023 4024 static int md_media_changed(struct gendisk *disk) 4025 { 4026 mddev_t *mddev = disk->private_data; 4027 4028 return mddev->changed; 4029 } 4030 4031 static int md_revalidate(struct gendisk *disk) 4032 { 4033 mddev_t *mddev = disk->private_data; 4034 4035 mddev->changed = 0; 4036 return 0; 4037 } 4038 static struct block_device_operations md_fops = 4039 { 4040 .owner = THIS_MODULE, 4041 .open = md_open, 4042 .release = md_release, 4043 .ioctl = md_ioctl, 4044 .getgeo = md_getgeo, 4045 .media_changed = md_media_changed, 4046 .revalidate_disk= md_revalidate, 4047 }; 4048 4049 static int md_thread(void * arg) 4050 { 4051 mdk_thread_t *thread = arg; 4052 4053 /* 4054 * md_thread is a 'system-thread', it's priority should be very 4055 * high. We avoid resource deadlocks individually in each 4056 * raid personality. (RAID5 does preallocation) We also use RR and 4057 * the very same RT priority as kswapd, thus we will never get 4058 * into a priority inversion deadlock. 4059 * 4060 * we definitely have to have equal or higher priority than 4061 * bdflush, otherwise bdflush will deadlock if there are too 4062 * many dirty RAID5 blocks. 4063 */ 4064 4065 allow_signal(SIGKILL); 4066 while (!kthread_should_stop()) { 4067 4068 /* We need to wait INTERRUPTIBLE so that 4069 * we don't add to the load-average. 4070 * That means we need to be sure no signals are 4071 * pending 4072 */ 4073 if (signal_pending(current)) 4074 flush_signals(current); 4075 4076 wait_event_interruptible_timeout 4077 (thread->wqueue, 4078 test_bit(THREAD_WAKEUP, &thread->flags) 4079 || kthread_should_stop(), 4080 thread->timeout); 4081 try_to_freeze(); 4082 4083 clear_bit(THREAD_WAKEUP, &thread->flags); 4084 4085 thread->run(thread->mddev); 4086 } 4087 4088 return 0; 4089 } 4090 4091 void md_wakeup_thread(mdk_thread_t *thread) 4092 { 4093 if (thread) { 4094 dprintk("md: waking up MD thread %s.\n", thread->tsk->comm); 4095 set_bit(THREAD_WAKEUP, &thread->flags); 4096 wake_up(&thread->wqueue); 4097 } 4098 } 4099 4100 mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev, 4101 const char *name) 4102 { 4103 mdk_thread_t *thread; 4104 4105 thread = kzalloc(sizeof(mdk_thread_t), GFP_KERNEL); 4106 if (!thread) 4107 return NULL; 4108 4109 init_waitqueue_head(&thread->wqueue); 4110 4111 thread->run = run; 4112 thread->mddev = mddev; 4113 thread->timeout = MAX_SCHEDULE_TIMEOUT; 4114 thread->tsk = kthread_run(md_thread, thread, name, mdname(thread->mddev)); 4115 if (IS_ERR(thread->tsk)) { 4116 kfree(thread); 4117 return NULL; 4118 } 4119 return thread; 4120 } 4121 4122 void md_unregister_thread(mdk_thread_t *thread) 4123 { 4124 dprintk("interrupting MD-thread pid %d\n", thread->tsk->pid); 4125 4126 kthread_stop(thread->tsk); 4127 kfree(thread); 4128 } 4129 4130 void md_error(mddev_t *mddev, mdk_rdev_t *rdev) 4131 { 4132 if (!mddev) { 4133 MD_BUG(); 4134 return; 4135 } 4136 4137 if (!rdev || test_bit(Faulty, &rdev->flags)) 4138 return; 4139 /* 4140 dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n", 4141 mdname(mddev), 4142 MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev), 4143 __builtin_return_address(0),__builtin_return_address(1), 4144 __builtin_return_address(2),__builtin_return_address(3)); 4145 */ 4146 if (!mddev->pers->error_handler) 4147 return; 4148 mddev->pers->error_handler(mddev,rdev); 4149 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 4150 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4151 md_wakeup_thread(mddev->thread); 4152 md_new_event(mddev); 4153 } 4154 4155 /* seq_file implementation /proc/mdstat */ 4156 4157 static void status_unused(struct seq_file *seq) 4158 { 4159 int i = 0; 4160 mdk_rdev_t *rdev; 4161 struct list_head *tmp; 4162 4163 seq_printf(seq, "unused devices: "); 4164 4165 ITERATE_RDEV_PENDING(rdev,tmp) { 4166 char b[BDEVNAME_SIZE]; 4167 i++; 4168 seq_printf(seq, "%s ", 4169 bdevname(rdev->bdev,b)); 4170 } 4171 if (!i) 4172 seq_printf(seq, "<none>"); 4173 4174 seq_printf(seq, "\n"); 4175 } 4176 4177 4178 static void status_resync(struct seq_file *seq, mddev_t * mddev) 4179 { 4180 sector_t max_blocks, resync, res; 4181 unsigned long dt, db, rt; 4182 int scale; 4183 unsigned int per_milli; 4184 4185 resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active))/2; 4186 4187 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 4188 max_blocks = mddev->resync_max_sectors >> 1; 4189 else 4190 max_blocks = mddev->size; 4191 4192 /* 4193 * Should not happen. 4194 */ 4195 if (!max_blocks) { 4196 MD_BUG(); 4197 return; 4198 } 4199 /* Pick 'scale' such that (resync>>scale)*1000 will fit 4200 * in a sector_t, and (max_blocks>>scale) will fit in a 4201 * u32, as those are the requirements for sector_div. 4202 * Thus 'scale' must be at least 10 4203 */ 4204 scale = 10; 4205 if (sizeof(sector_t) > sizeof(unsigned long)) { 4206 while ( max_blocks/2 > (1ULL<<(scale+32))) 4207 scale++; 4208 } 4209 res = (resync>>scale)*1000; 4210 sector_div(res, (u32)((max_blocks>>scale)+1)); 4211 4212 per_milli = res; 4213 { 4214 int i, x = per_milli/50, y = 20-x; 4215 seq_printf(seq, "["); 4216 for (i = 0; i < x; i++) 4217 seq_printf(seq, "="); 4218 seq_printf(seq, ">"); 4219 for (i = 0; i < y; i++) 4220 seq_printf(seq, "."); 4221 seq_printf(seq, "] "); 4222 } 4223 seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)", 4224 (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)? 4225 "reshape" : 4226 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ? 4227 "resync" : "recovery")), 4228 per_milli/10, per_milli % 10, 4229 (unsigned long long) resync, 4230 (unsigned long long) max_blocks); 4231 4232 /* 4233 * We do not want to overflow, so the order of operands and 4234 * the * 100 / 100 trick are important. We do a +1 to be 4235 * safe against division by zero. We only estimate anyway. 4236 * 4237 * dt: time from mark until now 4238 * db: blocks written from mark until now 4239 * rt: remaining time 4240 */ 4241 dt = ((jiffies - mddev->resync_mark) / HZ); 4242 if (!dt) dt++; 4243 db = resync - (mddev->resync_mark_cnt/2); 4244 rt = (dt * ((unsigned long)(max_blocks-resync) / (db/100+1)))/100; 4245 4246 seq_printf(seq, " finish=%lu.%lumin", rt / 60, (rt % 60)/6); 4247 4248 seq_printf(seq, " speed=%ldK/sec", db/dt); 4249 } 4250 4251 static void *md_seq_start(struct seq_file *seq, loff_t *pos) 4252 { 4253 struct list_head *tmp; 4254 loff_t l = *pos; 4255 mddev_t *mddev; 4256 4257 if (l >= 0x10000) 4258 return NULL; 4259 if (!l--) 4260 /* header */ 4261 return (void*)1; 4262 4263 spin_lock(&all_mddevs_lock); 4264 list_for_each(tmp,&all_mddevs) 4265 if (!l--) { 4266 mddev = list_entry(tmp, mddev_t, all_mddevs); 4267 mddev_get(mddev); 4268 spin_unlock(&all_mddevs_lock); 4269 return mddev; 4270 } 4271 spin_unlock(&all_mddevs_lock); 4272 if (!l--) 4273 return (void*)2;/* tail */ 4274 return NULL; 4275 } 4276 4277 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos) 4278 { 4279 struct list_head *tmp; 4280 mddev_t *next_mddev, *mddev = v; 4281 4282 ++*pos; 4283 if (v == (void*)2) 4284 return NULL; 4285 4286 spin_lock(&all_mddevs_lock); 4287 if (v == (void*)1) 4288 tmp = all_mddevs.next; 4289 else 4290 tmp = mddev->all_mddevs.next; 4291 if (tmp != &all_mddevs) 4292 next_mddev = mddev_get(list_entry(tmp,mddev_t,all_mddevs)); 4293 else { 4294 next_mddev = (void*)2; 4295 *pos = 0x10000; 4296 } 4297 spin_unlock(&all_mddevs_lock); 4298 4299 if (v != (void*)1) 4300 mddev_put(mddev); 4301 return next_mddev; 4302 4303 } 4304 4305 static void md_seq_stop(struct seq_file *seq, void *v) 4306 { 4307 mddev_t *mddev = v; 4308 4309 if (mddev && v != (void*)1 && v != (void*)2) 4310 mddev_put(mddev); 4311 } 4312 4313 struct mdstat_info { 4314 int event; 4315 }; 4316 4317 static int md_seq_show(struct seq_file *seq, void *v) 4318 { 4319 mddev_t *mddev = v; 4320 sector_t size; 4321 struct list_head *tmp2; 4322 mdk_rdev_t *rdev; 4323 struct mdstat_info *mi = seq->private; 4324 struct bitmap *bitmap; 4325 4326 if (v == (void*)1) { 4327 struct mdk_personality *pers; 4328 seq_printf(seq, "Personalities : "); 4329 spin_lock(&pers_lock); 4330 list_for_each_entry(pers, &pers_list, list) 4331 seq_printf(seq, "[%s] ", pers->name); 4332 4333 spin_unlock(&pers_lock); 4334 seq_printf(seq, "\n"); 4335 mi->event = atomic_read(&md_event_count); 4336 return 0; 4337 } 4338 if (v == (void*)2) { 4339 status_unused(seq); 4340 return 0; 4341 } 4342 4343 if (mddev_lock(mddev) < 0) 4344 return -EINTR; 4345 4346 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) { 4347 seq_printf(seq, "%s : %sactive", mdname(mddev), 4348 mddev->pers ? "" : "in"); 4349 if (mddev->pers) { 4350 if (mddev->ro==1) 4351 seq_printf(seq, " (read-only)"); 4352 if (mddev->ro==2) 4353 seq_printf(seq, "(auto-read-only)"); 4354 seq_printf(seq, " %s", mddev->pers->name); 4355 } 4356 4357 size = 0; 4358 ITERATE_RDEV(mddev,rdev,tmp2) { 4359 char b[BDEVNAME_SIZE]; 4360 seq_printf(seq, " %s[%d]", 4361 bdevname(rdev->bdev,b), rdev->desc_nr); 4362 if (test_bit(WriteMostly, &rdev->flags)) 4363 seq_printf(seq, "(W)"); 4364 if (test_bit(Faulty, &rdev->flags)) { 4365 seq_printf(seq, "(F)"); 4366 continue; 4367 } else if (rdev->raid_disk < 0) 4368 seq_printf(seq, "(S)"); /* spare */ 4369 size += rdev->size; 4370 } 4371 4372 if (!list_empty(&mddev->disks)) { 4373 if (mddev->pers) 4374 seq_printf(seq, "\n %llu blocks", 4375 (unsigned long long)mddev->array_size); 4376 else 4377 seq_printf(seq, "\n %llu blocks", 4378 (unsigned long long)size); 4379 } 4380 if (mddev->persistent) { 4381 if (mddev->major_version != 0 || 4382 mddev->minor_version != 90) { 4383 seq_printf(seq," super %d.%d", 4384 mddev->major_version, 4385 mddev->minor_version); 4386 } 4387 } else 4388 seq_printf(seq, " super non-persistent"); 4389 4390 if (mddev->pers) { 4391 mddev->pers->status (seq, mddev); 4392 seq_printf(seq, "\n "); 4393 if (mddev->pers->sync_request) { 4394 if (mddev->curr_resync > 2) { 4395 status_resync (seq, mddev); 4396 seq_printf(seq, "\n "); 4397 } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2) 4398 seq_printf(seq, "\tresync=DELAYED\n "); 4399 else if (mddev->recovery_cp < MaxSector) 4400 seq_printf(seq, "\tresync=PENDING\n "); 4401 } 4402 } else 4403 seq_printf(seq, "\n "); 4404 4405 if ((bitmap = mddev->bitmap)) { 4406 unsigned long chunk_kb; 4407 unsigned long flags; 4408 spin_lock_irqsave(&bitmap->lock, flags); 4409 chunk_kb = bitmap->chunksize >> 10; 4410 seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], " 4411 "%lu%s chunk", 4412 bitmap->pages - bitmap->missing_pages, 4413 bitmap->pages, 4414 (bitmap->pages - bitmap->missing_pages) 4415 << (PAGE_SHIFT - 10), 4416 chunk_kb ? chunk_kb : bitmap->chunksize, 4417 chunk_kb ? "KB" : "B"); 4418 if (bitmap->file) { 4419 seq_printf(seq, ", file: "); 4420 seq_path(seq, bitmap->file->f_vfsmnt, 4421 bitmap->file->f_dentry," \t\n"); 4422 } 4423 4424 seq_printf(seq, "\n"); 4425 spin_unlock_irqrestore(&bitmap->lock, flags); 4426 } 4427 4428 seq_printf(seq, "\n"); 4429 } 4430 mddev_unlock(mddev); 4431 4432 return 0; 4433 } 4434 4435 static struct seq_operations md_seq_ops = { 4436 .start = md_seq_start, 4437 .next = md_seq_next, 4438 .stop = md_seq_stop, 4439 .show = md_seq_show, 4440 }; 4441 4442 static int md_seq_open(struct inode *inode, struct file *file) 4443 { 4444 int error; 4445 struct mdstat_info *mi = kmalloc(sizeof(*mi), GFP_KERNEL); 4446 if (mi == NULL) 4447 return -ENOMEM; 4448 4449 error = seq_open(file, &md_seq_ops); 4450 if (error) 4451 kfree(mi); 4452 else { 4453 struct seq_file *p = file->private_data; 4454 p->private = mi; 4455 mi->event = atomic_read(&md_event_count); 4456 } 4457 return error; 4458 } 4459 4460 static int md_seq_release(struct inode *inode, struct file *file) 4461 { 4462 struct seq_file *m = file->private_data; 4463 struct mdstat_info *mi = m->private; 4464 m->private = NULL; 4465 kfree(mi); 4466 return seq_release(inode, file); 4467 } 4468 4469 static unsigned int mdstat_poll(struct file *filp, poll_table *wait) 4470 { 4471 struct seq_file *m = filp->private_data; 4472 struct mdstat_info *mi = m->private; 4473 int mask; 4474 4475 poll_wait(filp, &md_event_waiters, wait); 4476 4477 /* always allow read */ 4478 mask = POLLIN | POLLRDNORM; 4479 4480 if (mi->event != atomic_read(&md_event_count)) 4481 mask |= POLLERR | POLLPRI; 4482 return mask; 4483 } 4484 4485 static struct file_operations md_seq_fops = { 4486 .open = md_seq_open, 4487 .read = seq_read, 4488 .llseek = seq_lseek, 4489 .release = md_seq_release, 4490 .poll = mdstat_poll, 4491 }; 4492 4493 int register_md_personality(struct mdk_personality *p) 4494 { 4495 spin_lock(&pers_lock); 4496 list_add_tail(&p->list, &pers_list); 4497 printk(KERN_INFO "md: %s personality registered for level %d\n", p->name, p->level); 4498 spin_unlock(&pers_lock); 4499 return 0; 4500 } 4501 4502 int unregister_md_personality(struct mdk_personality *p) 4503 { 4504 printk(KERN_INFO "md: %s personality unregistered\n", p->name); 4505 spin_lock(&pers_lock); 4506 list_del_init(&p->list); 4507 spin_unlock(&pers_lock); 4508 return 0; 4509 } 4510 4511 static int is_mddev_idle(mddev_t *mddev) 4512 { 4513 mdk_rdev_t * rdev; 4514 struct list_head *tmp; 4515 int idle; 4516 unsigned long curr_events; 4517 4518 idle = 1; 4519 ITERATE_RDEV(mddev,rdev,tmp) { 4520 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk; 4521 curr_events = disk_stat_read(disk, sectors[0]) + 4522 disk_stat_read(disk, sectors[1]) - 4523 atomic_read(&disk->sync_io); 4524 /* The difference between curr_events and last_events 4525 * will be affected by any new non-sync IO (making 4526 * curr_events bigger) and any difference in the amount of 4527 * in-flight syncio (making current_events bigger or smaller) 4528 * The amount in-flight is currently limited to 4529 * 32*64K in raid1/10 and 256*PAGE_SIZE in raid5/6 4530 * which is at most 4096 sectors. 4531 * These numbers are fairly fragile and should be made 4532 * more robust, probably by enforcing the 4533 * 'window size' that md_do_sync sort-of uses. 4534 * 4535 * Note: the following is an unsigned comparison. 4536 */ 4537 if ((curr_events - rdev->last_events + 4096) > 8192) { 4538 rdev->last_events = curr_events; 4539 idle = 0; 4540 } 4541 } 4542 return idle; 4543 } 4544 4545 void md_done_sync(mddev_t *mddev, int blocks, int ok) 4546 { 4547 /* another "blocks" (512byte) blocks have been synced */ 4548 atomic_sub(blocks, &mddev->recovery_active); 4549 wake_up(&mddev->recovery_wait); 4550 if (!ok) { 4551 set_bit(MD_RECOVERY_ERR, &mddev->recovery); 4552 md_wakeup_thread(mddev->thread); 4553 // stop recovery, signal do_sync .... 4554 } 4555 } 4556 4557 4558 /* md_write_start(mddev, bi) 4559 * If we need to update some array metadata (e.g. 'active' flag 4560 * in superblock) before writing, schedule a superblock update 4561 * and wait for it to complete. 4562 */ 4563 void md_write_start(mddev_t *mddev, struct bio *bi) 4564 { 4565 if (bio_data_dir(bi) != WRITE) 4566 return; 4567 4568 BUG_ON(mddev->ro == 1); 4569 if (mddev->ro == 2) { 4570 /* need to switch to read/write */ 4571 mddev->ro = 0; 4572 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4573 md_wakeup_thread(mddev->thread); 4574 } 4575 atomic_inc(&mddev->writes_pending); 4576 if (mddev->in_sync) { 4577 spin_lock_irq(&mddev->write_lock); 4578 if (mddev->in_sync) { 4579 mddev->in_sync = 0; 4580 mddev->sb_dirty = 1; 4581 md_wakeup_thread(mddev->thread); 4582 } 4583 spin_unlock_irq(&mddev->write_lock); 4584 } 4585 wait_event(mddev->sb_wait, mddev->sb_dirty==0); 4586 } 4587 4588 void md_write_end(mddev_t *mddev) 4589 { 4590 if (atomic_dec_and_test(&mddev->writes_pending)) { 4591 if (mddev->safemode == 2) 4592 md_wakeup_thread(mddev->thread); 4593 else 4594 mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay); 4595 } 4596 } 4597 4598 static DECLARE_WAIT_QUEUE_HEAD(resync_wait); 4599 4600 #define SYNC_MARKS 10 4601 #define SYNC_MARK_STEP (3*HZ) 4602 void md_do_sync(mddev_t *mddev) 4603 { 4604 mddev_t *mddev2; 4605 unsigned int currspeed = 0, 4606 window; 4607 sector_t max_sectors,j, io_sectors; 4608 unsigned long mark[SYNC_MARKS]; 4609 sector_t mark_cnt[SYNC_MARKS]; 4610 int last_mark,m; 4611 struct list_head *tmp; 4612 sector_t last_check; 4613 int skipped = 0; 4614 4615 /* just incase thread restarts... */ 4616 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) 4617 return; 4618 4619 /* we overload curr_resync somewhat here. 4620 * 0 == not engaged in resync at all 4621 * 2 == checking that there is no conflict with another sync 4622 * 1 == like 2, but have yielded to allow conflicting resync to 4623 * commense 4624 * other == active in resync - this many blocks 4625 * 4626 * Before starting a resync we must have set curr_resync to 4627 * 2, and then checked that every "conflicting" array has curr_resync 4628 * less than ours. When we find one that is the same or higher 4629 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync 4630 * to 1 if we choose to yield (based arbitrarily on address of mddev structure). 4631 * This will mean we have to start checking from the beginning again. 4632 * 4633 */ 4634 4635 do { 4636 mddev->curr_resync = 2; 4637 4638 try_again: 4639 if (kthread_should_stop()) { 4640 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 4641 goto skip; 4642 } 4643 ITERATE_MDDEV(mddev2,tmp) { 4644 if (mddev2 == mddev) 4645 continue; 4646 if (mddev2->curr_resync && 4647 match_mddev_units(mddev,mddev2)) { 4648 DEFINE_WAIT(wq); 4649 if (mddev < mddev2 && mddev->curr_resync == 2) { 4650 /* arbitrarily yield */ 4651 mddev->curr_resync = 1; 4652 wake_up(&resync_wait); 4653 } 4654 if (mddev > mddev2 && mddev->curr_resync == 1) 4655 /* no need to wait here, we can wait the next 4656 * time 'round when curr_resync == 2 4657 */ 4658 continue; 4659 prepare_to_wait(&resync_wait, &wq, TASK_UNINTERRUPTIBLE); 4660 if (!kthread_should_stop() && 4661 mddev2->curr_resync >= mddev->curr_resync) { 4662 printk(KERN_INFO "md: delaying resync of %s" 4663 " until %s has finished resync (they" 4664 " share one or more physical units)\n", 4665 mdname(mddev), mdname(mddev2)); 4666 mddev_put(mddev2); 4667 schedule(); 4668 finish_wait(&resync_wait, &wq); 4669 goto try_again; 4670 } 4671 finish_wait(&resync_wait, &wq); 4672 } 4673 } 4674 } while (mddev->curr_resync < 2); 4675 4676 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 4677 /* resync follows the size requested by the personality, 4678 * which defaults to physical size, but can be virtual size 4679 */ 4680 max_sectors = mddev->resync_max_sectors; 4681 mddev->resync_mismatches = 0; 4682 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 4683 max_sectors = mddev->size << 1; 4684 else 4685 /* recovery follows the physical size of devices */ 4686 max_sectors = mddev->size << 1; 4687 4688 printk(KERN_INFO "md: syncing RAID array %s\n", mdname(mddev)); 4689 printk(KERN_INFO "md: minimum _guaranteed_ reconstruction speed:" 4690 " %d KB/sec/disc.\n", speed_min(mddev)); 4691 printk(KERN_INFO "md: using maximum available idle IO bandwidth " 4692 "(but not more than %d KB/sec) for reconstruction.\n", 4693 speed_max(mddev)); 4694 4695 is_mddev_idle(mddev); /* this also initializes IO event counters */ 4696 /* we don't use the checkpoint if there's a bitmap */ 4697 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && !mddev->bitmap 4698 && ! test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 4699 j = mddev->recovery_cp; 4700 else 4701 j = 0; 4702 io_sectors = 0; 4703 for (m = 0; m < SYNC_MARKS; m++) { 4704 mark[m] = jiffies; 4705 mark_cnt[m] = io_sectors; 4706 } 4707 last_mark = 0; 4708 mddev->resync_mark = mark[last_mark]; 4709 mddev->resync_mark_cnt = mark_cnt[last_mark]; 4710 4711 /* 4712 * Tune reconstruction: 4713 */ 4714 window = 32*(PAGE_SIZE/512); 4715 printk(KERN_INFO "md: using %dk window, over a total of %llu blocks.\n", 4716 window/2,(unsigned long long) max_sectors/2); 4717 4718 atomic_set(&mddev->recovery_active, 0); 4719 init_waitqueue_head(&mddev->recovery_wait); 4720 last_check = 0; 4721 4722 if (j>2) { 4723 printk(KERN_INFO 4724 "md: resuming recovery of %s from checkpoint.\n", 4725 mdname(mddev)); 4726 mddev->curr_resync = j; 4727 } 4728 4729 while (j < max_sectors) { 4730 sector_t sectors; 4731 4732 skipped = 0; 4733 sectors = mddev->pers->sync_request(mddev, j, &skipped, 4734 currspeed < speed_min(mddev)); 4735 if (sectors == 0) { 4736 set_bit(MD_RECOVERY_ERR, &mddev->recovery); 4737 goto out; 4738 } 4739 4740 if (!skipped) { /* actual IO requested */ 4741 io_sectors += sectors; 4742 atomic_add(sectors, &mddev->recovery_active); 4743 } 4744 4745 j += sectors; 4746 if (j>1) mddev->curr_resync = j; 4747 if (last_check == 0) 4748 /* this is the earliers that rebuilt will be 4749 * visible in /proc/mdstat 4750 */ 4751 md_new_event(mddev); 4752 4753 if (last_check + window > io_sectors || j == max_sectors) 4754 continue; 4755 4756 last_check = io_sectors; 4757 4758 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery) || 4759 test_bit(MD_RECOVERY_ERR, &mddev->recovery)) 4760 break; 4761 4762 repeat: 4763 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) { 4764 /* step marks */ 4765 int next = (last_mark+1) % SYNC_MARKS; 4766 4767 mddev->resync_mark = mark[next]; 4768 mddev->resync_mark_cnt = mark_cnt[next]; 4769 mark[next] = jiffies; 4770 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active); 4771 last_mark = next; 4772 } 4773 4774 4775 if (kthread_should_stop()) { 4776 /* 4777 * got a signal, exit. 4778 */ 4779 printk(KERN_INFO 4780 "md: md_do_sync() got signal ... exiting\n"); 4781 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 4782 goto out; 4783 } 4784 4785 /* 4786 * this loop exits only if either when we are slower than 4787 * the 'hard' speed limit, or the system was IO-idle for 4788 * a jiffy. 4789 * the system might be non-idle CPU-wise, but we only care 4790 * about not overloading the IO subsystem. (things like an 4791 * e2fsck being done on the RAID array should execute fast) 4792 */ 4793 mddev->queue->unplug_fn(mddev->queue); 4794 cond_resched(); 4795 4796 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2 4797 /((jiffies-mddev->resync_mark)/HZ +1) +1; 4798 4799 if (currspeed > speed_min(mddev)) { 4800 if ((currspeed > speed_max(mddev)) || 4801 !is_mddev_idle(mddev)) { 4802 msleep(500); 4803 goto repeat; 4804 } 4805 } 4806 } 4807 printk(KERN_INFO "md: %s: sync done.\n",mdname(mddev)); 4808 /* 4809 * this also signals 'finished resyncing' to md_stop 4810 */ 4811 out: 4812 mddev->queue->unplug_fn(mddev->queue); 4813 4814 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); 4815 4816 /* tell personality that we are finished */ 4817 mddev->pers->sync_request(mddev, max_sectors, &skipped, 1); 4818 4819 if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) && 4820 test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && 4821 !test_bit(MD_RECOVERY_CHECK, &mddev->recovery) && 4822 mddev->curr_resync > 2 && 4823 mddev->curr_resync >= mddev->recovery_cp) { 4824 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 4825 printk(KERN_INFO 4826 "md: checkpointing recovery of %s.\n", 4827 mdname(mddev)); 4828 mddev->recovery_cp = mddev->curr_resync; 4829 } else 4830 mddev->recovery_cp = MaxSector; 4831 } 4832 4833 skip: 4834 mddev->curr_resync = 0; 4835 wake_up(&resync_wait); 4836 set_bit(MD_RECOVERY_DONE, &mddev->recovery); 4837 md_wakeup_thread(mddev->thread); 4838 } 4839 EXPORT_SYMBOL_GPL(md_do_sync); 4840 4841 4842 /* 4843 * This routine is regularly called by all per-raid-array threads to 4844 * deal with generic issues like resync and super-block update. 4845 * Raid personalities that don't have a thread (linear/raid0) do not 4846 * need this as they never do any recovery or update the superblock. 4847 * 4848 * It does not do any resync itself, but rather "forks" off other threads 4849 * to do that as needed. 4850 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in 4851 * "->recovery" and create a thread at ->sync_thread. 4852 * When the thread finishes it sets MD_RECOVERY_DONE (and might set MD_RECOVERY_ERR) 4853 * and wakeups up this thread which will reap the thread and finish up. 4854 * This thread also removes any faulty devices (with nr_pending == 0). 4855 * 4856 * The overall approach is: 4857 * 1/ if the superblock needs updating, update it. 4858 * 2/ If a recovery thread is running, don't do anything else. 4859 * 3/ If recovery has finished, clean up, possibly marking spares active. 4860 * 4/ If there are any faulty devices, remove them. 4861 * 5/ If array is degraded, try to add spares devices 4862 * 6/ If array has spares or is not in-sync, start a resync thread. 4863 */ 4864 void md_check_recovery(mddev_t *mddev) 4865 { 4866 mdk_rdev_t *rdev; 4867 struct list_head *rtmp; 4868 4869 4870 if (mddev->bitmap) 4871 bitmap_daemon_work(mddev->bitmap); 4872 4873 if (mddev->ro) 4874 return; 4875 4876 if (signal_pending(current)) { 4877 if (mddev->pers->sync_request) { 4878 printk(KERN_INFO "md: %s in immediate safe mode\n", 4879 mdname(mddev)); 4880 mddev->safemode = 2; 4881 } 4882 flush_signals(current); 4883 } 4884 4885 if ( ! ( 4886 mddev->sb_dirty || 4887 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || 4888 test_bit(MD_RECOVERY_DONE, &mddev->recovery) || 4889 (mddev->safemode == 1) || 4890 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending) 4891 && !mddev->in_sync && mddev->recovery_cp == MaxSector) 4892 )) 4893 return; 4894 4895 if (mddev_trylock(mddev)) { 4896 int spares =0; 4897 4898 spin_lock_irq(&mddev->write_lock); 4899 if (mddev->safemode && !atomic_read(&mddev->writes_pending) && 4900 !mddev->in_sync && mddev->recovery_cp == MaxSector) { 4901 mddev->in_sync = 1; 4902 mddev->sb_dirty = 1; 4903 } 4904 if (mddev->safemode == 1) 4905 mddev->safemode = 0; 4906 spin_unlock_irq(&mddev->write_lock); 4907 4908 if (mddev->sb_dirty) 4909 md_update_sb(mddev); 4910 4911 4912 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && 4913 !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) { 4914 /* resync/recovery still happening */ 4915 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4916 goto unlock; 4917 } 4918 if (mddev->sync_thread) { 4919 /* resync has finished, collect result */ 4920 md_unregister_thread(mddev->sync_thread); 4921 mddev->sync_thread = NULL; 4922 if (!test_bit(MD_RECOVERY_ERR, &mddev->recovery) && 4923 !test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 4924 /* success...*/ 4925 /* activate any spares */ 4926 mddev->pers->spare_active(mddev); 4927 } 4928 md_update_sb(mddev); 4929 4930 /* if array is no-longer degraded, then any saved_raid_disk 4931 * information must be scrapped 4932 */ 4933 if (!mddev->degraded) 4934 ITERATE_RDEV(mddev,rdev,rtmp) 4935 rdev->saved_raid_disk = -1; 4936 4937 mddev->recovery = 0; 4938 /* flag recovery needed just to double check */ 4939 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4940 md_new_event(mddev); 4941 goto unlock; 4942 } 4943 /* Clear some bits that don't mean anything, but 4944 * might be left set 4945 */ 4946 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4947 clear_bit(MD_RECOVERY_ERR, &mddev->recovery); 4948 clear_bit(MD_RECOVERY_INTR, &mddev->recovery); 4949 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); 4950 4951 /* no recovery is running. 4952 * remove any failed drives, then 4953 * add spares if possible. 4954 * Spare are also removed and re-added, to allow 4955 * the personality to fail the re-add. 4956 */ 4957 ITERATE_RDEV(mddev,rdev,rtmp) 4958 if (rdev->raid_disk >= 0 && 4959 (test_bit(Faulty, &rdev->flags) || ! test_bit(In_sync, &rdev->flags)) && 4960 atomic_read(&rdev->nr_pending)==0) { 4961 if (mddev->pers->hot_remove_disk(mddev, rdev->raid_disk)==0) { 4962 char nm[20]; 4963 sprintf(nm,"rd%d", rdev->raid_disk); 4964 sysfs_remove_link(&mddev->kobj, nm); 4965 rdev->raid_disk = -1; 4966 } 4967 } 4968 4969 if (mddev->degraded) { 4970 ITERATE_RDEV(mddev,rdev,rtmp) 4971 if (rdev->raid_disk < 0 4972 && !test_bit(Faulty, &rdev->flags)) { 4973 if (mddev->pers->hot_add_disk(mddev,rdev)) { 4974 char nm[20]; 4975 sprintf(nm, "rd%d", rdev->raid_disk); 4976 sysfs_create_link(&mddev->kobj, &rdev->kobj, nm); 4977 spares++; 4978 md_new_event(mddev); 4979 } else 4980 break; 4981 } 4982 } 4983 4984 if (spares) { 4985 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 4986 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 4987 } else if (mddev->recovery_cp < MaxSector) { 4988 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 4989 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 4990 /* nothing to be done ... */ 4991 goto unlock; 4992 4993 if (mddev->pers->sync_request) { 4994 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 4995 if (spares && mddev->bitmap && ! mddev->bitmap->file) { 4996 /* We are adding a device or devices to an array 4997 * which has the bitmap stored on all devices. 4998 * So make sure all bitmap pages get written 4999 */ 5000 bitmap_write_all(mddev->bitmap); 5001 } 5002 mddev->sync_thread = md_register_thread(md_do_sync, 5003 mddev, 5004 "%s_resync"); 5005 if (!mddev->sync_thread) { 5006 printk(KERN_ERR "%s: could not start resync" 5007 " thread...\n", 5008 mdname(mddev)); 5009 /* leave the spares where they are, it shouldn't hurt */ 5010 mddev->recovery = 0; 5011 } else 5012 md_wakeup_thread(mddev->sync_thread); 5013 md_new_event(mddev); 5014 } 5015 unlock: 5016 mddev_unlock(mddev); 5017 } 5018 } 5019 5020 static int md_notify_reboot(struct notifier_block *this, 5021 unsigned long code, void *x) 5022 { 5023 struct list_head *tmp; 5024 mddev_t *mddev; 5025 5026 if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) { 5027 5028 printk(KERN_INFO "md: stopping all md devices.\n"); 5029 5030 ITERATE_MDDEV(mddev,tmp) 5031 if (mddev_trylock(mddev)) 5032 do_md_stop (mddev, 1); 5033 /* 5034 * certain more exotic SCSI devices are known to be 5035 * volatile wrt too early system reboots. While the 5036 * right place to handle this issue is the given 5037 * driver, we do want to have a safe RAID driver ... 5038 */ 5039 mdelay(1000*1); 5040 } 5041 return NOTIFY_DONE; 5042 } 5043 5044 static struct notifier_block md_notifier = { 5045 .notifier_call = md_notify_reboot, 5046 .next = NULL, 5047 .priority = INT_MAX, /* before any real devices */ 5048 }; 5049 5050 static void md_geninit(void) 5051 { 5052 struct proc_dir_entry *p; 5053 5054 dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t)); 5055 5056 p = create_proc_entry("mdstat", S_IRUGO, NULL); 5057 if (p) 5058 p->proc_fops = &md_seq_fops; 5059 } 5060 5061 static int __init md_init(void) 5062 { 5063 int minor; 5064 5065 printk(KERN_INFO "md: md driver %d.%d.%d MAX_MD_DEVS=%d," 5066 " MD_SB_DISKS=%d\n", 5067 MD_MAJOR_VERSION, MD_MINOR_VERSION, 5068 MD_PATCHLEVEL_VERSION, MAX_MD_DEVS, MD_SB_DISKS); 5069 printk(KERN_INFO "md: bitmap version %d.%d\n", BITMAP_MAJOR_HI, 5070 BITMAP_MINOR); 5071 5072 if (register_blkdev(MAJOR_NR, "md")) 5073 return -1; 5074 if ((mdp_major=register_blkdev(0, "mdp"))<=0) { 5075 unregister_blkdev(MAJOR_NR, "md"); 5076 return -1; 5077 } 5078 devfs_mk_dir("md"); 5079 blk_register_region(MKDEV(MAJOR_NR, 0), MAX_MD_DEVS, THIS_MODULE, 5080 md_probe, NULL, NULL); 5081 blk_register_region(MKDEV(mdp_major, 0), MAX_MD_DEVS<<MdpMinorShift, THIS_MODULE, 5082 md_probe, NULL, NULL); 5083 5084 for (minor=0; minor < MAX_MD_DEVS; ++minor) 5085 devfs_mk_bdev(MKDEV(MAJOR_NR, minor), 5086 S_IFBLK|S_IRUSR|S_IWUSR, 5087 "md/%d", minor); 5088 5089 for (minor=0; minor < MAX_MD_DEVS; ++minor) 5090 devfs_mk_bdev(MKDEV(mdp_major, minor<<MdpMinorShift), 5091 S_IFBLK|S_IRUSR|S_IWUSR, 5092 "md/mdp%d", minor); 5093 5094 5095 register_reboot_notifier(&md_notifier); 5096 raid_table_header = register_sysctl_table(raid_root_table, 1); 5097 5098 md_geninit(); 5099 return (0); 5100 } 5101 5102 5103 #ifndef MODULE 5104 5105 /* 5106 * Searches all registered partitions for autorun RAID arrays 5107 * at boot time. 5108 */ 5109 static dev_t detected_devices[128]; 5110 static int dev_cnt; 5111 5112 void md_autodetect_dev(dev_t dev) 5113 { 5114 if (dev_cnt >= 0 && dev_cnt < 127) 5115 detected_devices[dev_cnt++] = dev; 5116 } 5117 5118 5119 static void autostart_arrays(int part) 5120 { 5121 mdk_rdev_t *rdev; 5122 int i; 5123 5124 printk(KERN_INFO "md: Autodetecting RAID arrays.\n"); 5125 5126 for (i = 0; i < dev_cnt; i++) { 5127 dev_t dev = detected_devices[i]; 5128 5129 rdev = md_import_device(dev,0, 0); 5130 if (IS_ERR(rdev)) 5131 continue; 5132 5133 if (test_bit(Faulty, &rdev->flags)) { 5134 MD_BUG(); 5135 continue; 5136 } 5137 list_add(&rdev->same_set, &pending_raid_disks); 5138 } 5139 dev_cnt = 0; 5140 5141 autorun_devices(part); 5142 } 5143 5144 #endif 5145 5146 static __exit void md_exit(void) 5147 { 5148 mddev_t *mddev; 5149 struct list_head *tmp; 5150 int i; 5151 blk_unregister_region(MKDEV(MAJOR_NR,0), MAX_MD_DEVS); 5152 blk_unregister_region(MKDEV(mdp_major,0), MAX_MD_DEVS << MdpMinorShift); 5153 for (i=0; i < MAX_MD_DEVS; i++) 5154 devfs_remove("md/%d", i); 5155 for (i=0; i < MAX_MD_DEVS; i++) 5156 devfs_remove("md/d%d", i); 5157 5158 devfs_remove("md"); 5159 5160 unregister_blkdev(MAJOR_NR,"md"); 5161 unregister_blkdev(mdp_major, "mdp"); 5162 unregister_reboot_notifier(&md_notifier); 5163 unregister_sysctl_table(raid_table_header); 5164 remove_proc_entry("mdstat", NULL); 5165 ITERATE_MDDEV(mddev,tmp) { 5166 struct gendisk *disk = mddev->gendisk; 5167 if (!disk) 5168 continue; 5169 export_array(mddev); 5170 del_gendisk(disk); 5171 put_disk(disk); 5172 mddev->gendisk = NULL; 5173 mddev_put(mddev); 5174 } 5175 } 5176 5177 module_init(md_init) 5178 module_exit(md_exit) 5179 5180 static int get_ro(char *buffer, struct kernel_param *kp) 5181 { 5182 return sprintf(buffer, "%d", start_readonly); 5183 } 5184 static int set_ro(const char *val, struct kernel_param *kp) 5185 { 5186 char *e; 5187 int num = simple_strtoul(val, &e, 10); 5188 if (*val && (*e == '\0' || *e == '\n')) { 5189 start_readonly = num; 5190 return 0; 5191 } 5192 return -EINVAL; 5193 } 5194 5195 module_param_call(start_ro, set_ro, get_ro, NULL, 0600); 5196 module_param(start_dirty_degraded, int, 0644); 5197 5198 5199 EXPORT_SYMBOL(register_md_personality); 5200 EXPORT_SYMBOL(unregister_md_personality); 5201 EXPORT_SYMBOL(md_error); 5202 EXPORT_SYMBOL(md_done_sync); 5203 EXPORT_SYMBOL(md_write_start); 5204 EXPORT_SYMBOL(md_write_end); 5205 EXPORT_SYMBOL(md_register_thread); 5206 EXPORT_SYMBOL(md_unregister_thread); 5207 EXPORT_SYMBOL(md_wakeup_thread); 5208 EXPORT_SYMBOL(md_print_devices); 5209 EXPORT_SYMBOL(md_check_recovery); 5210 MODULE_LICENSE("GPL"); 5211 MODULE_ALIAS("md"); 5212 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR); 5213