1 /* 2 md.c : Multiple Devices driver for Linux 3 Copyright (C) 1998, 1999, 2000 Ingo Molnar 4 5 completely rewritten, based on the MD driver code from Marc Zyngier 6 7 Changes: 8 9 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar 10 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com> 11 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net> 12 - kerneld support by Boris Tobotras <boris@xtalk.msk.su> 13 - kmod support by: Cyrus Durgin 14 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com> 15 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au> 16 17 - lots of fixes and improvements to the RAID1/RAID5 and generic 18 RAID code (such as request based resynchronization): 19 20 Neil Brown <neilb@cse.unsw.edu.au>. 21 22 - persistent bitmap code 23 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc. 24 25 This program is free software; you can redistribute it and/or modify 26 it under the terms of the GNU General Public License as published by 27 the Free Software Foundation; either version 2, or (at your option) 28 any later version. 29 30 You should have received a copy of the GNU General Public License 31 (for example /usr/src/linux/COPYING); if not, write to the Free 32 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 33 */ 34 35 #include <linux/kthread.h> 36 #include <linux/raid/md.h> 37 #include <linux/raid/bitmap.h> 38 #include <linux/sysctl.h> 39 #include <linux/buffer_head.h> /* for invalidate_bdev */ 40 #include <linux/poll.h> 41 #include <linux/ctype.h> 42 #include <linux/hdreg.h> 43 #include <linux/proc_fs.h> 44 #include <linux/random.h> 45 #include <linux/reboot.h> 46 #include <linux/file.h> 47 #include <linux/delay.h> 48 49 #define MAJOR_NR MD_MAJOR 50 51 /* 63 partitions with the alternate major number (mdp) */ 52 #define MdpMinorShift 6 53 54 #define DEBUG 0 55 #define dprintk(x...) ((void)(DEBUG && printk(x))) 56 57 58 #ifndef MODULE 59 static void autostart_arrays(int part); 60 #endif 61 62 static LIST_HEAD(pers_list); 63 static DEFINE_SPINLOCK(pers_lock); 64 65 static void md_print_devices(void); 66 67 static DECLARE_WAIT_QUEUE_HEAD(resync_wait); 68 69 #define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); } 70 71 /* 72 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit' 73 * is 1000 KB/sec, so the extra system load does not show up that much. 74 * Increase it if you want to have more _guaranteed_ speed. Note that 75 * the RAID driver will use the maximum available bandwidth if the IO 76 * subsystem is idle. There is also an 'absolute maximum' reconstruction 77 * speed limit - in case reconstruction slows down your system despite 78 * idle IO detection. 79 * 80 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max. 81 * or /sys/block/mdX/md/sync_speed_{min,max} 82 */ 83 84 static int sysctl_speed_limit_min = 1000; 85 static int sysctl_speed_limit_max = 200000; 86 static inline int speed_min(mddev_t *mddev) 87 { 88 return mddev->sync_speed_min ? 89 mddev->sync_speed_min : sysctl_speed_limit_min; 90 } 91 92 static inline int speed_max(mddev_t *mddev) 93 { 94 return mddev->sync_speed_max ? 95 mddev->sync_speed_max : sysctl_speed_limit_max; 96 } 97 98 static struct ctl_table_header *raid_table_header; 99 100 static ctl_table raid_table[] = { 101 { 102 .ctl_name = DEV_RAID_SPEED_LIMIT_MIN, 103 .procname = "speed_limit_min", 104 .data = &sysctl_speed_limit_min, 105 .maxlen = sizeof(int), 106 .mode = S_IRUGO|S_IWUSR, 107 .proc_handler = &proc_dointvec, 108 }, 109 { 110 .ctl_name = DEV_RAID_SPEED_LIMIT_MAX, 111 .procname = "speed_limit_max", 112 .data = &sysctl_speed_limit_max, 113 .maxlen = sizeof(int), 114 .mode = S_IRUGO|S_IWUSR, 115 .proc_handler = &proc_dointvec, 116 }, 117 { .ctl_name = 0 } 118 }; 119 120 static ctl_table raid_dir_table[] = { 121 { 122 .ctl_name = DEV_RAID, 123 .procname = "raid", 124 .maxlen = 0, 125 .mode = S_IRUGO|S_IXUGO, 126 .child = raid_table, 127 }, 128 { .ctl_name = 0 } 129 }; 130 131 static ctl_table raid_root_table[] = { 132 { 133 .ctl_name = CTL_DEV, 134 .procname = "dev", 135 .maxlen = 0, 136 .mode = 0555, 137 .child = raid_dir_table, 138 }, 139 { .ctl_name = 0 } 140 }; 141 142 static struct block_device_operations md_fops; 143 144 static int start_readonly; 145 146 /* 147 * We have a system wide 'event count' that is incremented 148 * on any 'interesting' event, and readers of /proc/mdstat 149 * can use 'poll' or 'select' to find out when the event 150 * count increases. 151 * 152 * Events are: 153 * start array, stop array, error, add device, remove device, 154 * start build, activate spare 155 */ 156 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters); 157 static atomic_t md_event_count; 158 void md_new_event(mddev_t *mddev) 159 { 160 atomic_inc(&md_event_count); 161 wake_up(&md_event_waiters); 162 } 163 EXPORT_SYMBOL_GPL(md_new_event); 164 165 /* Alternate version that can be called from interrupts 166 * when calling sysfs_notify isn't needed. 167 */ 168 static void md_new_event_inintr(mddev_t *mddev) 169 { 170 atomic_inc(&md_event_count); 171 wake_up(&md_event_waiters); 172 } 173 174 /* 175 * Enables to iterate over all existing md arrays 176 * all_mddevs_lock protects this list. 177 */ 178 static LIST_HEAD(all_mddevs); 179 static DEFINE_SPINLOCK(all_mddevs_lock); 180 181 182 /* 183 * iterates through all used mddevs in the system. 184 * We take care to grab the all_mddevs_lock whenever navigating 185 * the list, and to always hold a refcount when unlocked. 186 * Any code which breaks out of this loop while own 187 * a reference to the current mddev and must mddev_put it. 188 */ 189 #define for_each_mddev(mddev,tmp) \ 190 \ 191 for (({ spin_lock(&all_mddevs_lock); \ 192 tmp = all_mddevs.next; \ 193 mddev = NULL;}); \ 194 ({ if (tmp != &all_mddevs) \ 195 mddev_get(list_entry(tmp, mddev_t, all_mddevs));\ 196 spin_unlock(&all_mddevs_lock); \ 197 if (mddev) mddev_put(mddev); \ 198 mddev = list_entry(tmp, mddev_t, all_mddevs); \ 199 tmp != &all_mddevs;}); \ 200 ({ spin_lock(&all_mddevs_lock); \ 201 tmp = tmp->next;}) \ 202 ) 203 204 205 static int md_fail_request(struct request_queue *q, struct bio *bio) 206 { 207 bio_io_error(bio); 208 return 0; 209 } 210 211 static inline mddev_t *mddev_get(mddev_t *mddev) 212 { 213 atomic_inc(&mddev->active); 214 return mddev; 215 } 216 217 static void mddev_delayed_delete(struct work_struct *ws) 218 { 219 mddev_t *mddev = container_of(ws, mddev_t, del_work); 220 kobject_del(&mddev->kobj); 221 kobject_put(&mddev->kobj); 222 } 223 224 static void mddev_put(mddev_t *mddev) 225 { 226 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock)) 227 return; 228 if (!mddev->raid_disks && list_empty(&mddev->disks) && 229 !mddev->hold_active) { 230 list_del(&mddev->all_mddevs); 231 if (mddev->gendisk) { 232 /* we did a probe so need to clean up. 233 * Call schedule_work inside the spinlock 234 * so that flush_scheduled_work() after 235 * mddev_find will succeed in waiting for the 236 * work to be done. 237 */ 238 INIT_WORK(&mddev->del_work, mddev_delayed_delete); 239 schedule_work(&mddev->del_work); 240 } else 241 kfree(mddev); 242 } 243 spin_unlock(&all_mddevs_lock); 244 } 245 246 static mddev_t * mddev_find(dev_t unit) 247 { 248 mddev_t *mddev, *new = NULL; 249 250 retry: 251 spin_lock(&all_mddevs_lock); 252 253 if (unit) { 254 list_for_each_entry(mddev, &all_mddevs, all_mddevs) 255 if (mddev->unit == unit) { 256 mddev_get(mddev); 257 spin_unlock(&all_mddevs_lock); 258 kfree(new); 259 return mddev; 260 } 261 262 if (new) { 263 list_add(&new->all_mddevs, &all_mddevs); 264 spin_unlock(&all_mddevs_lock); 265 new->hold_active = UNTIL_IOCTL; 266 return new; 267 } 268 } else if (new) { 269 /* find an unused unit number */ 270 static int next_minor = 512; 271 int start = next_minor; 272 int is_free = 0; 273 int dev = 0; 274 while (!is_free) { 275 dev = MKDEV(MD_MAJOR, next_minor); 276 next_minor++; 277 if (next_minor > MINORMASK) 278 next_minor = 0; 279 if (next_minor == start) { 280 /* Oh dear, all in use. */ 281 spin_unlock(&all_mddevs_lock); 282 kfree(new); 283 return NULL; 284 } 285 286 is_free = 1; 287 list_for_each_entry(mddev, &all_mddevs, all_mddevs) 288 if (mddev->unit == dev) { 289 is_free = 0; 290 break; 291 } 292 } 293 new->unit = dev; 294 new->md_minor = MINOR(dev); 295 new->hold_active = UNTIL_STOP; 296 list_add(&new->all_mddevs, &all_mddevs); 297 spin_unlock(&all_mddevs_lock); 298 return new; 299 } 300 spin_unlock(&all_mddevs_lock); 301 302 new = kzalloc(sizeof(*new), GFP_KERNEL); 303 if (!new) 304 return NULL; 305 306 new->unit = unit; 307 if (MAJOR(unit) == MD_MAJOR) 308 new->md_minor = MINOR(unit); 309 else 310 new->md_minor = MINOR(unit) >> MdpMinorShift; 311 312 mutex_init(&new->reconfig_mutex); 313 INIT_LIST_HEAD(&new->disks); 314 INIT_LIST_HEAD(&new->all_mddevs); 315 init_timer(&new->safemode_timer); 316 atomic_set(&new->active, 1); 317 atomic_set(&new->openers, 0); 318 spin_lock_init(&new->write_lock); 319 init_waitqueue_head(&new->sb_wait); 320 init_waitqueue_head(&new->recovery_wait); 321 new->reshape_position = MaxSector; 322 new->resync_min = 0; 323 new->resync_max = MaxSector; 324 new->level = LEVEL_NONE; 325 326 goto retry; 327 } 328 329 static inline int mddev_lock(mddev_t * mddev) 330 { 331 return mutex_lock_interruptible(&mddev->reconfig_mutex); 332 } 333 334 static inline int mddev_trylock(mddev_t * mddev) 335 { 336 return mutex_trylock(&mddev->reconfig_mutex); 337 } 338 339 static inline void mddev_unlock(mddev_t * mddev) 340 { 341 mutex_unlock(&mddev->reconfig_mutex); 342 343 md_wakeup_thread(mddev->thread); 344 } 345 346 static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr) 347 { 348 mdk_rdev_t *rdev; 349 350 list_for_each_entry(rdev, &mddev->disks, same_set) 351 if (rdev->desc_nr == nr) 352 return rdev; 353 354 return NULL; 355 } 356 357 static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev) 358 { 359 mdk_rdev_t *rdev; 360 361 list_for_each_entry(rdev, &mddev->disks, same_set) 362 if (rdev->bdev->bd_dev == dev) 363 return rdev; 364 365 return NULL; 366 } 367 368 static struct mdk_personality *find_pers(int level, char *clevel) 369 { 370 struct mdk_personality *pers; 371 list_for_each_entry(pers, &pers_list, list) { 372 if (level != LEVEL_NONE && pers->level == level) 373 return pers; 374 if (strcmp(pers->name, clevel)==0) 375 return pers; 376 } 377 return NULL; 378 } 379 380 /* return the offset of the super block in 512byte sectors */ 381 static inline sector_t calc_dev_sboffset(struct block_device *bdev) 382 { 383 sector_t num_sectors = bdev->bd_inode->i_size / 512; 384 return MD_NEW_SIZE_SECTORS(num_sectors); 385 } 386 387 static sector_t calc_num_sectors(mdk_rdev_t *rdev, unsigned chunk_size) 388 { 389 sector_t num_sectors = rdev->sb_start; 390 391 if (chunk_size) 392 num_sectors &= ~((sector_t)chunk_size/512 - 1); 393 return num_sectors; 394 } 395 396 static int alloc_disk_sb(mdk_rdev_t * rdev) 397 { 398 if (rdev->sb_page) 399 MD_BUG(); 400 401 rdev->sb_page = alloc_page(GFP_KERNEL); 402 if (!rdev->sb_page) { 403 printk(KERN_ALERT "md: out of memory.\n"); 404 return -ENOMEM; 405 } 406 407 return 0; 408 } 409 410 static void free_disk_sb(mdk_rdev_t * rdev) 411 { 412 if (rdev->sb_page) { 413 put_page(rdev->sb_page); 414 rdev->sb_loaded = 0; 415 rdev->sb_page = NULL; 416 rdev->sb_start = 0; 417 rdev->size = 0; 418 } 419 } 420 421 422 static void super_written(struct bio *bio, int error) 423 { 424 mdk_rdev_t *rdev = bio->bi_private; 425 mddev_t *mddev = rdev->mddev; 426 427 if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) { 428 printk("md: super_written gets error=%d, uptodate=%d\n", 429 error, test_bit(BIO_UPTODATE, &bio->bi_flags)); 430 WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags)); 431 md_error(mddev, rdev); 432 } 433 434 if (atomic_dec_and_test(&mddev->pending_writes)) 435 wake_up(&mddev->sb_wait); 436 bio_put(bio); 437 } 438 439 static void super_written_barrier(struct bio *bio, int error) 440 { 441 struct bio *bio2 = bio->bi_private; 442 mdk_rdev_t *rdev = bio2->bi_private; 443 mddev_t *mddev = rdev->mddev; 444 445 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && 446 error == -EOPNOTSUPP) { 447 unsigned long flags; 448 /* barriers don't appear to be supported :-( */ 449 set_bit(BarriersNotsupp, &rdev->flags); 450 mddev->barriers_work = 0; 451 spin_lock_irqsave(&mddev->write_lock, flags); 452 bio2->bi_next = mddev->biolist; 453 mddev->biolist = bio2; 454 spin_unlock_irqrestore(&mddev->write_lock, flags); 455 wake_up(&mddev->sb_wait); 456 bio_put(bio); 457 } else { 458 bio_put(bio2); 459 bio->bi_private = rdev; 460 super_written(bio, error); 461 } 462 } 463 464 void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, 465 sector_t sector, int size, struct page *page) 466 { 467 /* write first size bytes of page to sector of rdev 468 * Increment mddev->pending_writes before returning 469 * and decrement it on completion, waking up sb_wait 470 * if zero is reached. 471 * If an error occurred, call md_error 472 * 473 * As we might need to resubmit the request if BIO_RW_BARRIER 474 * causes ENOTSUPP, we allocate a spare bio... 475 */ 476 struct bio *bio = bio_alloc(GFP_NOIO, 1); 477 int rw = (1<<BIO_RW) | (1<<BIO_RW_SYNC); 478 479 bio->bi_bdev = rdev->bdev; 480 bio->bi_sector = sector; 481 bio_add_page(bio, page, size, 0); 482 bio->bi_private = rdev; 483 bio->bi_end_io = super_written; 484 bio->bi_rw = rw; 485 486 atomic_inc(&mddev->pending_writes); 487 if (!test_bit(BarriersNotsupp, &rdev->flags)) { 488 struct bio *rbio; 489 rw |= (1<<BIO_RW_BARRIER); 490 rbio = bio_clone(bio, GFP_NOIO); 491 rbio->bi_private = bio; 492 rbio->bi_end_io = super_written_barrier; 493 submit_bio(rw, rbio); 494 } else 495 submit_bio(rw, bio); 496 } 497 498 void md_super_wait(mddev_t *mddev) 499 { 500 /* wait for all superblock writes that were scheduled to complete. 501 * if any had to be retried (due to BARRIER problems), retry them 502 */ 503 DEFINE_WAIT(wq); 504 for(;;) { 505 prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE); 506 if (atomic_read(&mddev->pending_writes)==0) 507 break; 508 while (mddev->biolist) { 509 struct bio *bio; 510 spin_lock_irq(&mddev->write_lock); 511 bio = mddev->biolist; 512 mddev->biolist = bio->bi_next ; 513 bio->bi_next = NULL; 514 spin_unlock_irq(&mddev->write_lock); 515 submit_bio(bio->bi_rw, bio); 516 } 517 schedule(); 518 } 519 finish_wait(&mddev->sb_wait, &wq); 520 } 521 522 static void bi_complete(struct bio *bio, int error) 523 { 524 complete((struct completion*)bio->bi_private); 525 } 526 527 int sync_page_io(struct block_device *bdev, sector_t sector, int size, 528 struct page *page, int rw) 529 { 530 struct bio *bio = bio_alloc(GFP_NOIO, 1); 531 struct completion event; 532 int ret; 533 534 rw |= (1 << BIO_RW_SYNC); 535 536 bio->bi_bdev = bdev; 537 bio->bi_sector = sector; 538 bio_add_page(bio, page, size, 0); 539 init_completion(&event); 540 bio->bi_private = &event; 541 bio->bi_end_io = bi_complete; 542 submit_bio(rw, bio); 543 wait_for_completion(&event); 544 545 ret = test_bit(BIO_UPTODATE, &bio->bi_flags); 546 bio_put(bio); 547 return ret; 548 } 549 EXPORT_SYMBOL_GPL(sync_page_io); 550 551 static int read_disk_sb(mdk_rdev_t * rdev, int size) 552 { 553 char b[BDEVNAME_SIZE]; 554 if (!rdev->sb_page) { 555 MD_BUG(); 556 return -EINVAL; 557 } 558 if (rdev->sb_loaded) 559 return 0; 560 561 562 if (!sync_page_io(rdev->bdev, rdev->sb_start, size, rdev->sb_page, READ)) 563 goto fail; 564 rdev->sb_loaded = 1; 565 return 0; 566 567 fail: 568 printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n", 569 bdevname(rdev->bdev,b)); 570 return -EINVAL; 571 } 572 573 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2) 574 { 575 return sb1->set_uuid0 == sb2->set_uuid0 && 576 sb1->set_uuid1 == sb2->set_uuid1 && 577 sb1->set_uuid2 == sb2->set_uuid2 && 578 sb1->set_uuid3 == sb2->set_uuid3; 579 } 580 581 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2) 582 { 583 int ret; 584 mdp_super_t *tmp1, *tmp2; 585 586 tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL); 587 tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL); 588 589 if (!tmp1 || !tmp2) { 590 ret = 0; 591 printk(KERN_INFO "md.c sb_equal(): failed to allocate memory!\n"); 592 goto abort; 593 } 594 595 *tmp1 = *sb1; 596 *tmp2 = *sb2; 597 598 /* 599 * nr_disks is not constant 600 */ 601 tmp1->nr_disks = 0; 602 tmp2->nr_disks = 0; 603 604 ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0); 605 abort: 606 kfree(tmp1); 607 kfree(tmp2); 608 return ret; 609 } 610 611 612 static u32 md_csum_fold(u32 csum) 613 { 614 csum = (csum & 0xffff) + (csum >> 16); 615 return (csum & 0xffff) + (csum >> 16); 616 } 617 618 static unsigned int calc_sb_csum(mdp_super_t * sb) 619 { 620 u64 newcsum = 0; 621 u32 *sb32 = (u32*)sb; 622 int i; 623 unsigned int disk_csum, csum; 624 625 disk_csum = sb->sb_csum; 626 sb->sb_csum = 0; 627 628 for (i = 0; i < MD_SB_BYTES/4 ; i++) 629 newcsum += sb32[i]; 630 csum = (newcsum & 0xffffffff) + (newcsum>>32); 631 632 633 #ifdef CONFIG_ALPHA 634 /* This used to use csum_partial, which was wrong for several 635 * reasons including that different results are returned on 636 * different architectures. It isn't critical that we get exactly 637 * the same return value as before (we always csum_fold before 638 * testing, and that removes any differences). However as we 639 * know that csum_partial always returned a 16bit value on 640 * alphas, do a fold to maximise conformity to previous behaviour. 641 */ 642 sb->sb_csum = md_csum_fold(disk_csum); 643 #else 644 sb->sb_csum = disk_csum; 645 #endif 646 return csum; 647 } 648 649 650 /* 651 * Handle superblock details. 652 * We want to be able to handle multiple superblock formats 653 * so we have a common interface to them all, and an array of 654 * different handlers. 655 * We rely on user-space to write the initial superblock, and support 656 * reading and updating of superblocks. 657 * Interface methods are: 658 * int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version) 659 * loads and validates a superblock on dev. 660 * if refdev != NULL, compare superblocks on both devices 661 * Return: 662 * 0 - dev has a superblock that is compatible with refdev 663 * 1 - dev has a superblock that is compatible and newer than refdev 664 * so dev should be used as the refdev in future 665 * -EINVAL superblock incompatible or invalid 666 * -othererror e.g. -EIO 667 * 668 * int validate_super(mddev_t *mddev, mdk_rdev_t *dev) 669 * Verify that dev is acceptable into mddev. 670 * The first time, mddev->raid_disks will be 0, and data from 671 * dev should be merged in. Subsequent calls check that dev 672 * is new enough. Return 0 or -EINVAL 673 * 674 * void sync_super(mddev_t *mddev, mdk_rdev_t *dev) 675 * Update the superblock for rdev with data in mddev 676 * This does not write to disc. 677 * 678 */ 679 680 struct super_type { 681 char *name; 682 struct module *owner; 683 int (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev, 684 int minor_version); 685 int (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev); 686 void (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev); 687 unsigned long long (*rdev_size_change)(mdk_rdev_t *rdev, 688 sector_t num_sectors); 689 }; 690 691 /* 692 * load_super for 0.90.0 693 */ 694 static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) 695 { 696 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 697 mdp_super_t *sb; 698 int ret; 699 700 /* 701 * Calculate the position of the superblock (512byte sectors), 702 * it's at the end of the disk. 703 * 704 * It also happens to be a multiple of 4Kb. 705 */ 706 rdev->sb_start = calc_dev_sboffset(rdev->bdev); 707 708 ret = read_disk_sb(rdev, MD_SB_BYTES); 709 if (ret) return ret; 710 711 ret = -EINVAL; 712 713 bdevname(rdev->bdev, b); 714 sb = (mdp_super_t*)page_address(rdev->sb_page); 715 716 if (sb->md_magic != MD_SB_MAGIC) { 717 printk(KERN_ERR "md: invalid raid superblock magic on %s\n", 718 b); 719 goto abort; 720 } 721 722 if (sb->major_version != 0 || 723 sb->minor_version < 90 || 724 sb->minor_version > 91) { 725 printk(KERN_WARNING "Bad version number %d.%d on %s\n", 726 sb->major_version, sb->minor_version, 727 b); 728 goto abort; 729 } 730 731 if (sb->raid_disks <= 0) 732 goto abort; 733 734 if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) { 735 printk(KERN_WARNING "md: invalid superblock checksum on %s\n", 736 b); 737 goto abort; 738 } 739 740 rdev->preferred_minor = sb->md_minor; 741 rdev->data_offset = 0; 742 rdev->sb_size = MD_SB_BYTES; 743 744 if (sb->state & (1<<MD_SB_BITMAP_PRESENT)) { 745 if (sb->level != 1 && sb->level != 4 746 && sb->level != 5 && sb->level != 6 747 && sb->level != 10) { 748 /* FIXME use a better test */ 749 printk(KERN_WARNING 750 "md: bitmaps not supported for this level.\n"); 751 goto abort; 752 } 753 } 754 755 if (sb->level == LEVEL_MULTIPATH) 756 rdev->desc_nr = -1; 757 else 758 rdev->desc_nr = sb->this_disk.number; 759 760 if (!refdev) { 761 ret = 1; 762 } else { 763 __u64 ev1, ev2; 764 mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page); 765 if (!uuid_equal(refsb, sb)) { 766 printk(KERN_WARNING "md: %s has different UUID to %s\n", 767 b, bdevname(refdev->bdev,b2)); 768 goto abort; 769 } 770 if (!sb_equal(refsb, sb)) { 771 printk(KERN_WARNING "md: %s has same UUID" 772 " but different superblock to %s\n", 773 b, bdevname(refdev->bdev, b2)); 774 goto abort; 775 } 776 ev1 = md_event(sb); 777 ev2 = md_event(refsb); 778 if (ev1 > ev2) 779 ret = 1; 780 else 781 ret = 0; 782 } 783 rdev->size = calc_num_sectors(rdev, sb->chunk_size) / 2; 784 785 if (rdev->size < sb->size && sb->level > 1) 786 /* "this cannot possibly happen" ... */ 787 ret = -EINVAL; 788 789 abort: 790 return ret; 791 } 792 793 /* 794 * validate_super for 0.90.0 795 */ 796 static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev) 797 { 798 mdp_disk_t *desc; 799 mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page); 800 __u64 ev1 = md_event(sb); 801 802 rdev->raid_disk = -1; 803 clear_bit(Faulty, &rdev->flags); 804 clear_bit(In_sync, &rdev->flags); 805 clear_bit(WriteMostly, &rdev->flags); 806 clear_bit(BarriersNotsupp, &rdev->flags); 807 808 if (mddev->raid_disks == 0) { 809 mddev->major_version = 0; 810 mddev->minor_version = sb->minor_version; 811 mddev->patch_version = sb->patch_version; 812 mddev->external = 0; 813 mddev->chunk_size = sb->chunk_size; 814 mddev->ctime = sb->ctime; 815 mddev->utime = sb->utime; 816 mddev->level = sb->level; 817 mddev->clevel[0] = 0; 818 mddev->layout = sb->layout; 819 mddev->raid_disks = sb->raid_disks; 820 mddev->size = sb->size; 821 mddev->events = ev1; 822 mddev->bitmap_offset = 0; 823 mddev->default_bitmap_offset = MD_SB_BYTES >> 9; 824 825 if (mddev->minor_version >= 91) { 826 mddev->reshape_position = sb->reshape_position; 827 mddev->delta_disks = sb->delta_disks; 828 mddev->new_level = sb->new_level; 829 mddev->new_layout = sb->new_layout; 830 mddev->new_chunk = sb->new_chunk; 831 } else { 832 mddev->reshape_position = MaxSector; 833 mddev->delta_disks = 0; 834 mddev->new_level = mddev->level; 835 mddev->new_layout = mddev->layout; 836 mddev->new_chunk = mddev->chunk_size; 837 } 838 839 if (sb->state & (1<<MD_SB_CLEAN)) 840 mddev->recovery_cp = MaxSector; 841 else { 842 if (sb->events_hi == sb->cp_events_hi && 843 sb->events_lo == sb->cp_events_lo) { 844 mddev->recovery_cp = sb->recovery_cp; 845 } else 846 mddev->recovery_cp = 0; 847 } 848 849 memcpy(mddev->uuid+0, &sb->set_uuid0, 4); 850 memcpy(mddev->uuid+4, &sb->set_uuid1, 4); 851 memcpy(mddev->uuid+8, &sb->set_uuid2, 4); 852 memcpy(mddev->uuid+12,&sb->set_uuid3, 4); 853 854 mddev->max_disks = MD_SB_DISKS; 855 856 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) && 857 mddev->bitmap_file == NULL) 858 mddev->bitmap_offset = mddev->default_bitmap_offset; 859 860 } else if (mddev->pers == NULL) { 861 /* Insist on good event counter while assembling */ 862 ++ev1; 863 if (ev1 < mddev->events) 864 return -EINVAL; 865 } else if (mddev->bitmap) { 866 /* if adding to array with a bitmap, then we can accept an 867 * older device ... but not too old. 868 */ 869 if (ev1 < mddev->bitmap->events_cleared) 870 return 0; 871 } else { 872 if (ev1 < mddev->events) 873 /* just a hot-add of a new device, leave raid_disk at -1 */ 874 return 0; 875 } 876 877 if (mddev->level != LEVEL_MULTIPATH) { 878 desc = sb->disks + rdev->desc_nr; 879 880 if (desc->state & (1<<MD_DISK_FAULTY)) 881 set_bit(Faulty, &rdev->flags); 882 else if (desc->state & (1<<MD_DISK_SYNC) /* && 883 desc->raid_disk < mddev->raid_disks */) { 884 set_bit(In_sync, &rdev->flags); 885 rdev->raid_disk = desc->raid_disk; 886 } 887 if (desc->state & (1<<MD_DISK_WRITEMOSTLY)) 888 set_bit(WriteMostly, &rdev->flags); 889 } else /* MULTIPATH are always insync */ 890 set_bit(In_sync, &rdev->flags); 891 return 0; 892 } 893 894 /* 895 * sync_super for 0.90.0 896 */ 897 static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev) 898 { 899 mdp_super_t *sb; 900 mdk_rdev_t *rdev2; 901 int next_spare = mddev->raid_disks; 902 903 904 /* make rdev->sb match mddev data.. 905 * 906 * 1/ zero out disks 907 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare); 908 * 3/ any empty disks < next_spare become removed 909 * 910 * disks[0] gets initialised to REMOVED because 911 * we cannot be sure from other fields if it has 912 * been initialised or not. 913 */ 914 int i; 915 int active=0, working=0,failed=0,spare=0,nr_disks=0; 916 917 rdev->sb_size = MD_SB_BYTES; 918 919 sb = (mdp_super_t*)page_address(rdev->sb_page); 920 921 memset(sb, 0, sizeof(*sb)); 922 923 sb->md_magic = MD_SB_MAGIC; 924 sb->major_version = mddev->major_version; 925 sb->patch_version = mddev->patch_version; 926 sb->gvalid_words = 0; /* ignored */ 927 memcpy(&sb->set_uuid0, mddev->uuid+0, 4); 928 memcpy(&sb->set_uuid1, mddev->uuid+4, 4); 929 memcpy(&sb->set_uuid2, mddev->uuid+8, 4); 930 memcpy(&sb->set_uuid3, mddev->uuid+12,4); 931 932 sb->ctime = mddev->ctime; 933 sb->level = mddev->level; 934 sb->size = mddev->size; 935 sb->raid_disks = mddev->raid_disks; 936 sb->md_minor = mddev->md_minor; 937 sb->not_persistent = 0; 938 sb->utime = mddev->utime; 939 sb->state = 0; 940 sb->events_hi = (mddev->events>>32); 941 sb->events_lo = (u32)mddev->events; 942 943 if (mddev->reshape_position == MaxSector) 944 sb->minor_version = 90; 945 else { 946 sb->minor_version = 91; 947 sb->reshape_position = mddev->reshape_position; 948 sb->new_level = mddev->new_level; 949 sb->delta_disks = mddev->delta_disks; 950 sb->new_layout = mddev->new_layout; 951 sb->new_chunk = mddev->new_chunk; 952 } 953 mddev->minor_version = sb->minor_version; 954 if (mddev->in_sync) 955 { 956 sb->recovery_cp = mddev->recovery_cp; 957 sb->cp_events_hi = (mddev->events>>32); 958 sb->cp_events_lo = (u32)mddev->events; 959 if (mddev->recovery_cp == MaxSector) 960 sb->state = (1<< MD_SB_CLEAN); 961 } else 962 sb->recovery_cp = 0; 963 964 sb->layout = mddev->layout; 965 sb->chunk_size = mddev->chunk_size; 966 967 if (mddev->bitmap && mddev->bitmap_file == NULL) 968 sb->state |= (1<<MD_SB_BITMAP_PRESENT); 969 970 sb->disks[0].state = (1<<MD_DISK_REMOVED); 971 list_for_each_entry(rdev2, &mddev->disks, same_set) { 972 mdp_disk_t *d; 973 int desc_nr; 974 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags) 975 && !test_bit(Faulty, &rdev2->flags)) 976 desc_nr = rdev2->raid_disk; 977 else 978 desc_nr = next_spare++; 979 rdev2->desc_nr = desc_nr; 980 d = &sb->disks[rdev2->desc_nr]; 981 nr_disks++; 982 d->number = rdev2->desc_nr; 983 d->major = MAJOR(rdev2->bdev->bd_dev); 984 d->minor = MINOR(rdev2->bdev->bd_dev); 985 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags) 986 && !test_bit(Faulty, &rdev2->flags)) 987 d->raid_disk = rdev2->raid_disk; 988 else 989 d->raid_disk = rdev2->desc_nr; /* compatibility */ 990 if (test_bit(Faulty, &rdev2->flags)) 991 d->state = (1<<MD_DISK_FAULTY); 992 else if (test_bit(In_sync, &rdev2->flags)) { 993 d->state = (1<<MD_DISK_ACTIVE); 994 d->state |= (1<<MD_DISK_SYNC); 995 active++; 996 working++; 997 } else { 998 d->state = 0; 999 spare++; 1000 working++; 1001 } 1002 if (test_bit(WriteMostly, &rdev2->flags)) 1003 d->state |= (1<<MD_DISK_WRITEMOSTLY); 1004 } 1005 /* now set the "removed" and "faulty" bits on any missing devices */ 1006 for (i=0 ; i < mddev->raid_disks ; i++) { 1007 mdp_disk_t *d = &sb->disks[i]; 1008 if (d->state == 0 && d->number == 0) { 1009 d->number = i; 1010 d->raid_disk = i; 1011 d->state = (1<<MD_DISK_REMOVED); 1012 d->state |= (1<<MD_DISK_FAULTY); 1013 failed++; 1014 } 1015 } 1016 sb->nr_disks = nr_disks; 1017 sb->active_disks = active; 1018 sb->working_disks = working; 1019 sb->failed_disks = failed; 1020 sb->spare_disks = spare; 1021 1022 sb->this_disk = sb->disks[rdev->desc_nr]; 1023 sb->sb_csum = calc_sb_csum(sb); 1024 } 1025 1026 /* 1027 * rdev_size_change for 0.90.0 1028 */ 1029 static unsigned long long 1030 super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors) 1031 { 1032 if (num_sectors && num_sectors < rdev->mddev->size * 2) 1033 return 0; /* component must fit device */ 1034 if (rdev->mddev->bitmap_offset) 1035 return 0; /* can't move bitmap */ 1036 rdev->sb_start = calc_dev_sboffset(rdev->bdev); 1037 if (!num_sectors || num_sectors > rdev->sb_start) 1038 num_sectors = rdev->sb_start; 1039 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, 1040 rdev->sb_page); 1041 md_super_wait(rdev->mddev); 1042 return num_sectors / 2; /* kB for sysfs */ 1043 } 1044 1045 1046 /* 1047 * version 1 superblock 1048 */ 1049 1050 static __le32 calc_sb_1_csum(struct mdp_superblock_1 * sb) 1051 { 1052 __le32 disk_csum; 1053 u32 csum; 1054 unsigned long long newcsum; 1055 int size = 256 + le32_to_cpu(sb->max_dev)*2; 1056 __le32 *isuper = (__le32*)sb; 1057 int i; 1058 1059 disk_csum = sb->sb_csum; 1060 sb->sb_csum = 0; 1061 newcsum = 0; 1062 for (i=0; size>=4; size -= 4 ) 1063 newcsum += le32_to_cpu(*isuper++); 1064 1065 if (size == 2) 1066 newcsum += le16_to_cpu(*(__le16*) isuper); 1067 1068 csum = (newcsum & 0xffffffff) + (newcsum >> 32); 1069 sb->sb_csum = disk_csum; 1070 return cpu_to_le32(csum); 1071 } 1072 1073 static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) 1074 { 1075 struct mdp_superblock_1 *sb; 1076 int ret; 1077 sector_t sb_start; 1078 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 1079 int bmask; 1080 1081 /* 1082 * Calculate the position of the superblock in 512byte sectors. 1083 * It is always aligned to a 4K boundary and 1084 * depeding on minor_version, it can be: 1085 * 0: At least 8K, but less than 12K, from end of device 1086 * 1: At start of device 1087 * 2: 4K from start of device. 1088 */ 1089 switch(minor_version) { 1090 case 0: 1091 sb_start = rdev->bdev->bd_inode->i_size >> 9; 1092 sb_start -= 8*2; 1093 sb_start &= ~(sector_t)(4*2-1); 1094 break; 1095 case 1: 1096 sb_start = 0; 1097 break; 1098 case 2: 1099 sb_start = 8; 1100 break; 1101 default: 1102 return -EINVAL; 1103 } 1104 rdev->sb_start = sb_start; 1105 1106 /* superblock is rarely larger than 1K, but it can be larger, 1107 * and it is safe to read 4k, so we do that 1108 */ 1109 ret = read_disk_sb(rdev, 4096); 1110 if (ret) return ret; 1111 1112 1113 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page); 1114 1115 if (sb->magic != cpu_to_le32(MD_SB_MAGIC) || 1116 sb->major_version != cpu_to_le32(1) || 1117 le32_to_cpu(sb->max_dev) > (4096-256)/2 || 1118 le64_to_cpu(sb->super_offset) != rdev->sb_start || 1119 (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0) 1120 return -EINVAL; 1121 1122 if (calc_sb_1_csum(sb) != sb->sb_csum) { 1123 printk("md: invalid superblock checksum on %s\n", 1124 bdevname(rdev->bdev,b)); 1125 return -EINVAL; 1126 } 1127 if (le64_to_cpu(sb->data_size) < 10) { 1128 printk("md: data_size too small on %s\n", 1129 bdevname(rdev->bdev,b)); 1130 return -EINVAL; 1131 } 1132 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET)) { 1133 if (sb->level != cpu_to_le32(1) && 1134 sb->level != cpu_to_le32(4) && 1135 sb->level != cpu_to_le32(5) && 1136 sb->level != cpu_to_le32(6) && 1137 sb->level != cpu_to_le32(10)) { 1138 printk(KERN_WARNING 1139 "md: bitmaps not supported for this level.\n"); 1140 return -EINVAL; 1141 } 1142 } 1143 1144 rdev->preferred_minor = 0xffff; 1145 rdev->data_offset = le64_to_cpu(sb->data_offset); 1146 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); 1147 1148 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; 1149 bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1; 1150 if (rdev->sb_size & bmask) 1151 rdev->sb_size = (rdev->sb_size | bmask) + 1; 1152 1153 if (minor_version 1154 && rdev->data_offset < sb_start + (rdev->sb_size/512)) 1155 return -EINVAL; 1156 1157 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH)) 1158 rdev->desc_nr = -1; 1159 else 1160 rdev->desc_nr = le32_to_cpu(sb->dev_number); 1161 1162 if (!refdev) { 1163 ret = 1; 1164 } else { 1165 __u64 ev1, ev2; 1166 struct mdp_superblock_1 *refsb = 1167 (struct mdp_superblock_1*)page_address(refdev->sb_page); 1168 1169 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 || 1170 sb->level != refsb->level || 1171 sb->layout != refsb->layout || 1172 sb->chunksize != refsb->chunksize) { 1173 printk(KERN_WARNING "md: %s has strangely different" 1174 " superblock to %s\n", 1175 bdevname(rdev->bdev,b), 1176 bdevname(refdev->bdev,b2)); 1177 return -EINVAL; 1178 } 1179 ev1 = le64_to_cpu(sb->events); 1180 ev2 = le64_to_cpu(refsb->events); 1181 1182 if (ev1 > ev2) 1183 ret = 1; 1184 else 1185 ret = 0; 1186 } 1187 if (minor_version) 1188 rdev->size = ((rdev->bdev->bd_inode->i_size>>9) - le64_to_cpu(sb->data_offset)) / 2; 1189 else 1190 rdev->size = rdev->sb_start / 2; 1191 if (rdev->size < le64_to_cpu(sb->data_size)/2) 1192 return -EINVAL; 1193 rdev->size = le64_to_cpu(sb->data_size)/2; 1194 if (le32_to_cpu(sb->chunksize)) 1195 rdev->size &= ~((sector_t)le32_to_cpu(sb->chunksize)/2 - 1); 1196 1197 if (le64_to_cpu(sb->size) > rdev->size*2) 1198 return -EINVAL; 1199 return ret; 1200 } 1201 1202 static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev) 1203 { 1204 struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page); 1205 __u64 ev1 = le64_to_cpu(sb->events); 1206 1207 rdev->raid_disk = -1; 1208 clear_bit(Faulty, &rdev->flags); 1209 clear_bit(In_sync, &rdev->flags); 1210 clear_bit(WriteMostly, &rdev->flags); 1211 clear_bit(BarriersNotsupp, &rdev->flags); 1212 1213 if (mddev->raid_disks == 0) { 1214 mddev->major_version = 1; 1215 mddev->patch_version = 0; 1216 mddev->external = 0; 1217 mddev->chunk_size = le32_to_cpu(sb->chunksize) << 9; 1218 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1); 1219 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1); 1220 mddev->level = le32_to_cpu(sb->level); 1221 mddev->clevel[0] = 0; 1222 mddev->layout = le32_to_cpu(sb->layout); 1223 mddev->raid_disks = le32_to_cpu(sb->raid_disks); 1224 mddev->size = le64_to_cpu(sb->size)/2; 1225 mddev->events = ev1; 1226 mddev->bitmap_offset = 0; 1227 mddev->default_bitmap_offset = 1024 >> 9; 1228 1229 mddev->recovery_cp = le64_to_cpu(sb->resync_offset); 1230 memcpy(mddev->uuid, sb->set_uuid, 16); 1231 1232 mddev->max_disks = (4096-256)/2; 1233 1234 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) && 1235 mddev->bitmap_file == NULL ) 1236 mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset); 1237 1238 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) { 1239 mddev->reshape_position = le64_to_cpu(sb->reshape_position); 1240 mddev->delta_disks = le32_to_cpu(sb->delta_disks); 1241 mddev->new_level = le32_to_cpu(sb->new_level); 1242 mddev->new_layout = le32_to_cpu(sb->new_layout); 1243 mddev->new_chunk = le32_to_cpu(sb->new_chunk)<<9; 1244 } else { 1245 mddev->reshape_position = MaxSector; 1246 mddev->delta_disks = 0; 1247 mddev->new_level = mddev->level; 1248 mddev->new_layout = mddev->layout; 1249 mddev->new_chunk = mddev->chunk_size; 1250 } 1251 1252 } else if (mddev->pers == NULL) { 1253 /* Insist of good event counter while assembling */ 1254 ++ev1; 1255 if (ev1 < mddev->events) 1256 return -EINVAL; 1257 } else if (mddev->bitmap) { 1258 /* If adding to array with a bitmap, then we can accept an 1259 * older device, but not too old. 1260 */ 1261 if (ev1 < mddev->bitmap->events_cleared) 1262 return 0; 1263 } else { 1264 if (ev1 < mddev->events) 1265 /* just a hot-add of a new device, leave raid_disk at -1 */ 1266 return 0; 1267 } 1268 if (mddev->level != LEVEL_MULTIPATH) { 1269 int role; 1270 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]); 1271 switch(role) { 1272 case 0xffff: /* spare */ 1273 break; 1274 case 0xfffe: /* faulty */ 1275 set_bit(Faulty, &rdev->flags); 1276 break; 1277 default: 1278 if ((le32_to_cpu(sb->feature_map) & 1279 MD_FEATURE_RECOVERY_OFFSET)) 1280 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset); 1281 else 1282 set_bit(In_sync, &rdev->flags); 1283 rdev->raid_disk = role; 1284 break; 1285 } 1286 if (sb->devflags & WriteMostly1) 1287 set_bit(WriteMostly, &rdev->flags); 1288 } else /* MULTIPATH are always insync */ 1289 set_bit(In_sync, &rdev->flags); 1290 1291 return 0; 1292 } 1293 1294 static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev) 1295 { 1296 struct mdp_superblock_1 *sb; 1297 mdk_rdev_t *rdev2; 1298 int max_dev, i; 1299 /* make rdev->sb match mddev and rdev data. */ 1300 1301 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page); 1302 1303 sb->feature_map = 0; 1304 sb->pad0 = 0; 1305 sb->recovery_offset = cpu_to_le64(0); 1306 memset(sb->pad1, 0, sizeof(sb->pad1)); 1307 memset(sb->pad2, 0, sizeof(sb->pad2)); 1308 memset(sb->pad3, 0, sizeof(sb->pad3)); 1309 1310 sb->utime = cpu_to_le64((__u64)mddev->utime); 1311 sb->events = cpu_to_le64(mddev->events); 1312 if (mddev->in_sync) 1313 sb->resync_offset = cpu_to_le64(mddev->recovery_cp); 1314 else 1315 sb->resync_offset = cpu_to_le64(0); 1316 1317 sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors)); 1318 1319 sb->raid_disks = cpu_to_le32(mddev->raid_disks); 1320 sb->size = cpu_to_le64(mddev->size<<1); 1321 1322 if (mddev->bitmap && mddev->bitmap_file == NULL) { 1323 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset); 1324 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET); 1325 } 1326 1327 if (rdev->raid_disk >= 0 && 1328 !test_bit(In_sync, &rdev->flags) && 1329 rdev->recovery_offset > 0) { 1330 sb->feature_map |= cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET); 1331 sb->recovery_offset = cpu_to_le64(rdev->recovery_offset); 1332 } 1333 1334 if (mddev->reshape_position != MaxSector) { 1335 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE); 1336 sb->reshape_position = cpu_to_le64(mddev->reshape_position); 1337 sb->new_layout = cpu_to_le32(mddev->new_layout); 1338 sb->delta_disks = cpu_to_le32(mddev->delta_disks); 1339 sb->new_level = cpu_to_le32(mddev->new_level); 1340 sb->new_chunk = cpu_to_le32(mddev->new_chunk>>9); 1341 } 1342 1343 max_dev = 0; 1344 list_for_each_entry(rdev2, &mddev->disks, same_set) 1345 if (rdev2->desc_nr+1 > max_dev) 1346 max_dev = rdev2->desc_nr+1; 1347 1348 if (max_dev > le32_to_cpu(sb->max_dev)) 1349 sb->max_dev = cpu_to_le32(max_dev); 1350 for (i=0; i<max_dev;i++) 1351 sb->dev_roles[i] = cpu_to_le16(0xfffe); 1352 1353 list_for_each_entry(rdev2, &mddev->disks, same_set) { 1354 i = rdev2->desc_nr; 1355 if (test_bit(Faulty, &rdev2->flags)) 1356 sb->dev_roles[i] = cpu_to_le16(0xfffe); 1357 else if (test_bit(In_sync, &rdev2->flags)) 1358 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); 1359 else if (rdev2->raid_disk >= 0 && rdev2->recovery_offset > 0) 1360 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); 1361 else 1362 sb->dev_roles[i] = cpu_to_le16(0xffff); 1363 } 1364 1365 sb->sb_csum = calc_sb_1_csum(sb); 1366 } 1367 1368 static unsigned long long 1369 super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors) 1370 { 1371 struct mdp_superblock_1 *sb; 1372 sector_t max_sectors; 1373 if (num_sectors && num_sectors < rdev->mddev->size * 2) 1374 return 0; /* component must fit device */ 1375 if (rdev->sb_start < rdev->data_offset) { 1376 /* minor versions 1 and 2; superblock before data */ 1377 max_sectors = rdev->bdev->bd_inode->i_size >> 9; 1378 max_sectors -= rdev->data_offset; 1379 if (!num_sectors || num_sectors > max_sectors) 1380 num_sectors = max_sectors; 1381 } else if (rdev->mddev->bitmap_offset) { 1382 /* minor version 0 with bitmap we can't move */ 1383 return 0; 1384 } else { 1385 /* minor version 0; superblock after data */ 1386 sector_t sb_start; 1387 sb_start = (rdev->bdev->bd_inode->i_size >> 9) - 8*2; 1388 sb_start &= ~(sector_t)(4*2 - 1); 1389 max_sectors = rdev->size * 2 + sb_start - rdev->sb_start; 1390 if (!num_sectors || num_sectors > max_sectors) 1391 num_sectors = max_sectors; 1392 rdev->sb_start = sb_start; 1393 } 1394 sb = (struct mdp_superblock_1 *) page_address(rdev->sb_page); 1395 sb->data_size = cpu_to_le64(num_sectors); 1396 sb->super_offset = rdev->sb_start; 1397 sb->sb_csum = calc_sb_1_csum(sb); 1398 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, 1399 rdev->sb_page); 1400 md_super_wait(rdev->mddev); 1401 return num_sectors / 2; /* kB for sysfs */ 1402 } 1403 1404 static struct super_type super_types[] = { 1405 [0] = { 1406 .name = "0.90.0", 1407 .owner = THIS_MODULE, 1408 .load_super = super_90_load, 1409 .validate_super = super_90_validate, 1410 .sync_super = super_90_sync, 1411 .rdev_size_change = super_90_rdev_size_change, 1412 }, 1413 [1] = { 1414 .name = "md-1", 1415 .owner = THIS_MODULE, 1416 .load_super = super_1_load, 1417 .validate_super = super_1_validate, 1418 .sync_super = super_1_sync, 1419 .rdev_size_change = super_1_rdev_size_change, 1420 }, 1421 }; 1422 1423 static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2) 1424 { 1425 mdk_rdev_t *rdev, *rdev2; 1426 1427 rcu_read_lock(); 1428 rdev_for_each_rcu(rdev, mddev1) 1429 rdev_for_each_rcu(rdev2, mddev2) 1430 if (rdev->bdev->bd_contains == 1431 rdev2->bdev->bd_contains) { 1432 rcu_read_unlock(); 1433 return 1; 1434 } 1435 rcu_read_unlock(); 1436 return 0; 1437 } 1438 1439 static LIST_HEAD(pending_raid_disks); 1440 1441 static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev) 1442 { 1443 char b[BDEVNAME_SIZE]; 1444 struct kobject *ko; 1445 char *s; 1446 int err; 1447 1448 if (rdev->mddev) { 1449 MD_BUG(); 1450 return -EINVAL; 1451 } 1452 1453 /* prevent duplicates */ 1454 if (find_rdev(mddev, rdev->bdev->bd_dev)) 1455 return -EEXIST; 1456 1457 /* make sure rdev->size exceeds mddev->size */ 1458 if (rdev->size && (mddev->size == 0 || rdev->size < mddev->size)) { 1459 if (mddev->pers) { 1460 /* Cannot change size, so fail 1461 * If mddev->level <= 0, then we don't care 1462 * about aligning sizes (e.g. linear) 1463 */ 1464 if (mddev->level > 0) 1465 return -ENOSPC; 1466 } else 1467 mddev->size = rdev->size; 1468 } 1469 1470 /* Verify rdev->desc_nr is unique. 1471 * If it is -1, assign a free number, else 1472 * check number is not in use 1473 */ 1474 if (rdev->desc_nr < 0) { 1475 int choice = 0; 1476 if (mddev->pers) choice = mddev->raid_disks; 1477 while (find_rdev_nr(mddev, choice)) 1478 choice++; 1479 rdev->desc_nr = choice; 1480 } else { 1481 if (find_rdev_nr(mddev, rdev->desc_nr)) 1482 return -EBUSY; 1483 } 1484 bdevname(rdev->bdev,b); 1485 while ( (s=strchr(b, '/')) != NULL) 1486 *s = '!'; 1487 1488 rdev->mddev = mddev; 1489 printk(KERN_INFO "md: bind<%s>\n", b); 1490 1491 if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b))) 1492 goto fail; 1493 1494 ko = &part_to_dev(rdev->bdev->bd_part)->kobj; 1495 if ((err = sysfs_create_link(&rdev->kobj, ko, "block"))) { 1496 kobject_del(&rdev->kobj); 1497 goto fail; 1498 } 1499 rdev->sysfs_state = sysfs_get_dirent(rdev->kobj.sd, "state"); 1500 1501 list_add_rcu(&rdev->same_set, &mddev->disks); 1502 bd_claim_by_disk(rdev->bdev, rdev->bdev->bd_holder, mddev->gendisk); 1503 1504 /* May as well allow recovery to be retried once */ 1505 mddev->recovery_disabled = 0; 1506 return 0; 1507 1508 fail: 1509 printk(KERN_WARNING "md: failed to register dev-%s for %s\n", 1510 b, mdname(mddev)); 1511 return err; 1512 } 1513 1514 static void md_delayed_delete(struct work_struct *ws) 1515 { 1516 mdk_rdev_t *rdev = container_of(ws, mdk_rdev_t, del_work); 1517 kobject_del(&rdev->kobj); 1518 kobject_put(&rdev->kobj); 1519 } 1520 1521 static void unbind_rdev_from_array(mdk_rdev_t * rdev) 1522 { 1523 char b[BDEVNAME_SIZE]; 1524 if (!rdev->mddev) { 1525 MD_BUG(); 1526 return; 1527 } 1528 bd_release_from_disk(rdev->bdev, rdev->mddev->gendisk); 1529 list_del_rcu(&rdev->same_set); 1530 printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b)); 1531 rdev->mddev = NULL; 1532 sysfs_remove_link(&rdev->kobj, "block"); 1533 sysfs_put(rdev->sysfs_state); 1534 rdev->sysfs_state = NULL; 1535 /* We need to delay this, otherwise we can deadlock when 1536 * writing to 'remove' to "dev/state". We also need 1537 * to delay it due to rcu usage. 1538 */ 1539 synchronize_rcu(); 1540 INIT_WORK(&rdev->del_work, md_delayed_delete); 1541 kobject_get(&rdev->kobj); 1542 schedule_work(&rdev->del_work); 1543 } 1544 1545 /* 1546 * prevent the device from being mounted, repartitioned or 1547 * otherwise reused by a RAID array (or any other kernel 1548 * subsystem), by bd_claiming the device. 1549 */ 1550 static int lock_rdev(mdk_rdev_t *rdev, dev_t dev, int shared) 1551 { 1552 int err = 0; 1553 struct block_device *bdev; 1554 char b[BDEVNAME_SIZE]; 1555 1556 bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE); 1557 if (IS_ERR(bdev)) { 1558 printk(KERN_ERR "md: could not open %s.\n", 1559 __bdevname(dev, b)); 1560 return PTR_ERR(bdev); 1561 } 1562 err = bd_claim(bdev, shared ? (mdk_rdev_t *)lock_rdev : rdev); 1563 if (err) { 1564 printk(KERN_ERR "md: could not bd_claim %s.\n", 1565 bdevname(bdev, b)); 1566 blkdev_put(bdev, FMODE_READ|FMODE_WRITE); 1567 return err; 1568 } 1569 if (!shared) 1570 set_bit(AllReserved, &rdev->flags); 1571 rdev->bdev = bdev; 1572 return err; 1573 } 1574 1575 static void unlock_rdev(mdk_rdev_t *rdev) 1576 { 1577 struct block_device *bdev = rdev->bdev; 1578 rdev->bdev = NULL; 1579 if (!bdev) 1580 MD_BUG(); 1581 bd_release(bdev); 1582 blkdev_put(bdev, FMODE_READ|FMODE_WRITE); 1583 } 1584 1585 void md_autodetect_dev(dev_t dev); 1586 1587 static void export_rdev(mdk_rdev_t * rdev) 1588 { 1589 char b[BDEVNAME_SIZE]; 1590 printk(KERN_INFO "md: export_rdev(%s)\n", 1591 bdevname(rdev->bdev,b)); 1592 if (rdev->mddev) 1593 MD_BUG(); 1594 free_disk_sb(rdev); 1595 #ifndef MODULE 1596 if (test_bit(AutoDetected, &rdev->flags)) 1597 md_autodetect_dev(rdev->bdev->bd_dev); 1598 #endif 1599 unlock_rdev(rdev); 1600 kobject_put(&rdev->kobj); 1601 } 1602 1603 static void kick_rdev_from_array(mdk_rdev_t * rdev) 1604 { 1605 unbind_rdev_from_array(rdev); 1606 export_rdev(rdev); 1607 } 1608 1609 static void export_array(mddev_t *mddev) 1610 { 1611 mdk_rdev_t *rdev, *tmp; 1612 1613 rdev_for_each(rdev, tmp, mddev) { 1614 if (!rdev->mddev) { 1615 MD_BUG(); 1616 continue; 1617 } 1618 kick_rdev_from_array(rdev); 1619 } 1620 if (!list_empty(&mddev->disks)) 1621 MD_BUG(); 1622 mddev->raid_disks = 0; 1623 mddev->major_version = 0; 1624 } 1625 1626 static void print_desc(mdp_disk_t *desc) 1627 { 1628 printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number, 1629 desc->major,desc->minor,desc->raid_disk,desc->state); 1630 } 1631 1632 static void print_sb_90(mdp_super_t *sb) 1633 { 1634 int i; 1635 1636 printk(KERN_INFO 1637 "md: SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n", 1638 sb->major_version, sb->minor_version, sb->patch_version, 1639 sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3, 1640 sb->ctime); 1641 printk(KERN_INFO "md: L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n", 1642 sb->level, sb->size, sb->nr_disks, sb->raid_disks, 1643 sb->md_minor, sb->layout, sb->chunk_size); 1644 printk(KERN_INFO "md: UT:%08x ST:%d AD:%d WD:%d" 1645 " FD:%d SD:%d CSUM:%08x E:%08lx\n", 1646 sb->utime, sb->state, sb->active_disks, sb->working_disks, 1647 sb->failed_disks, sb->spare_disks, 1648 sb->sb_csum, (unsigned long)sb->events_lo); 1649 1650 printk(KERN_INFO); 1651 for (i = 0; i < MD_SB_DISKS; i++) { 1652 mdp_disk_t *desc; 1653 1654 desc = sb->disks + i; 1655 if (desc->number || desc->major || desc->minor || 1656 desc->raid_disk || (desc->state && (desc->state != 4))) { 1657 printk(" D %2d: ", i); 1658 print_desc(desc); 1659 } 1660 } 1661 printk(KERN_INFO "md: THIS: "); 1662 print_desc(&sb->this_disk); 1663 } 1664 1665 static void print_sb_1(struct mdp_superblock_1 *sb) 1666 { 1667 __u8 *uuid; 1668 1669 uuid = sb->set_uuid; 1670 printk(KERN_INFO "md: SB: (V:%u) (F:0x%08x) Array-ID:<%02x%02x%02x%02x" 1671 ":%02x%02x:%02x%02x:%02x%02x:%02x%02x%02x%02x%02x%02x>\n" 1672 KERN_INFO "md: Name: \"%s\" CT:%llu\n", 1673 le32_to_cpu(sb->major_version), 1674 le32_to_cpu(sb->feature_map), 1675 uuid[0], uuid[1], uuid[2], uuid[3], 1676 uuid[4], uuid[5], uuid[6], uuid[7], 1677 uuid[8], uuid[9], uuid[10], uuid[11], 1678 uuid[12], uuid[13], uuid[14], uuid[15], 1679 sb->set_name, 1680 (unsigned long long)le64_to_cpu(sb->ctime) 1681 & MD_SUPERBLOCK_1_TIME_SEC_MASK); 1682 1683 uuid = sb->device_uuid; 1684 printk(KERN_INFO "md: L%u SZ%llu RD:%u LO:%u CS:%u DO:%llu DS:%llu SO:%llu" 1685 " RO:%llu\n" 1686 KERN_INFO "md: Dev:%08x UUID: %02x%02x%02x%02x:%02x%02x:%02x%02x:%02x%02x" 1687 ":%02x%02x%02x%02x%02x%02x\n" 1688 KERN_INFO "md: (F:0x%08x) UT:%llu Events:%llu ResyncOffset:%llu CSUM:0x%08x\n" 1689 KERN_INFO "md: (MaxDev:%u) \n", 1690 le32_to_cpu(sb->level), 1691 (unsigned long long)le64_to_cpu(sb->size), 1692 le32_to_cpu(sb->raid_disks), 1693 le32_to_cpu(sb->layout), 1694 le32_to_cpu(sb->chunksize), 1695 (unsigned long long)le64_to_cpu(sb->data_offset), 1696 (unsigned long long)le64_to_cpu(sb->data_size), 1697 (unsigned long long)le64_to_cpu(sb->super_offset), 1698 (unsigned long long)le64_to_cpu(sb->recovery_offset), 1699 le32_to_cpu(sb->dev_number), 1700 uuid[0], uuid[1], uuid[2], uuid[3], 1701 uuid[4], uuid[5], uuid[6], uuid[7], 1702 uuid[8], uuid[9], uuid[10], uuid[11], 1703 uuid[12], uuid[13], uuid[14], uuid[15], 1704 sb->devflags, 1705 (unsigned long long)le64_to_cpu(sb->utime) & MD_SUPERBLOCK_1_TIME_SEC_MASK, 1706 (unsigned long long)le64_to_cpu(sb->events), 1707 (unsigned long long)le64_to_cpu(sb->resync_offset), 1708 le32_to_cpu(sb->sb_csum), 1709 le32_to_cpu(sb->max_dev) 1710 ); 1711 } 1712 1713 static void print_rdev(mdk_rdev_t *rdev, int major_version) 1714 { 1715 char b[BDEVNAME_SIZE]; 1716 printk(KERN_INFO "md: rdev %s, SZ:%08llu F:%d S:%d DN:%u\n", 1717 bdevname(rdev->bdev,b), (unsigned long long)rdev->size, 1718 test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags), 1719 rdev->desc_nr); 1720 if (rdev->sb_loaded) { 1721 printk(KERN_INFO "md: rdev superblock (MJ:%d):\n", major_version); 1722 switch (major_version) { 1723 case 0: 1724 print_sb_90((mdp_super_t*)page_address(rdev->sb_page)); 1725 break; 1726 case 1: 1727 print_sb_1((struct mdp_superblock_1 *)page_address(rdev->sb_page)); 1728 break; 1729 } 1730 } else 1731 printk(KERN_INFO "md: no rdev superblock!\n"); 1732 } 1733 1734 static void md_print_devices(void) 1735 { 1736 struct list_head *tmp; 1737 mdk_rdev_t *rdev; 1738 mddev_t *mddev; 1739 char b[BDEVNAME_SIZE]; 1740 1741 printk("\n"); 1742 printk("md: **********************************\n"); 1743 printk("md: * <COMPLETE RAID STATE PRINTOUT> *\n"); 1744 printk("md: **********************************\n"); 1745 for_each_mddev(mddev, tmp) { 1746 1747 if (mddev->bitmap) 1748 bitmap_print_sb(mddev->bitmap); 1749 else 1750 printk("%s: ", mdname(mddev)); 1751 list_for_each_entry(rdev, &mddev->disks, same_set) 1752 printk("<%s>", bdevname(rdev->bdev,b)); 1753 printk("\n"); 1754 1755 list_for_each_entry(rdev, &mddev->disks, same_set) 1756 print_rdev(rdev, mddev->major_version); 1757 } 1758 printk("md: **********************************\n"); 1759 printk("\n"); 1760 } 1761 1762 1763 static void sync_sbs(mddev_t * mddev, int nospares) 1764 { 1765 /* Update each superblock (in-memory image), but 1766 * if we are allowed to, skip spares which already 1767 * have the right event counter, or have one earlier 1768 * (which would mean they aren't being marked as dirty 1769 * with the rest of the array) 1770 */ 1771 mdk_rdev_t *rdev; 1772 1773 list_for_each_entry(rdev, &mddev->disks, same_set) { 1774 if (rdev->sb_events == mddev->events || 1775 (nospares && 1776 rdev->raid_disk < 0 && 1777 (rdev->sb_events&1)==0 && 1778 rdev->sb_events+1 == mddev->events)) { 1779 /* Don't update this superblock */ 1780 rdev->sb_loaded = 2; 1781 } else { 1782 super_types[mddev->major_version]. 1783 sync_super(mddev, rdev); 1784 rdev->sb_loaded = 1; 1785 } 1786 } 1787 } 1788 1789 static void md_update_sb(mddev_t * mddev, int force_change) 1790 { 1791 mdk_rdev_t *rdev; 1792 int sync_req; 1793 int nospares = 0; 1794 1795 if (mddev->external) 1796 return; 1797 repeat: 1798 spin_lock_irq(&mddev->write_lock); 1799 1800 set_bit(MD_CHANGE_PENDING, &mddev->flags); 1801 if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags)) 1802 force_change = 1; 1803 if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags)) 1804 /* just a clean<-> dirty transition, possibly leave spares alone, 1805 * though if events isn't the right even/odd, we will have to do 1806 * spares after all 1807 */ 1808 nospares = 1; 1809 if (force_change) 1810 nospares = 0; 1811 if (mddev->degraded) 1812 /* If the array is degraded, then skipping spares is both 1813 * dangerous and fairly pointless. 1814 * Dangerous because a device that was removed from the array 1815 * might have a event_count that still looks up-to-date, 1816 * so it can be re-added without a resync. 1817 * Pointless because if there are any spares to skip, 1818 * then a recovery will happen and soon that array won't 1819 * be degraded any more and the spare can go back to sleep then. 1820 */ 1821 nospares = 0; 1822 1823 sync_req = mddev->in_sync; 1824 mddev->utime = get_seconds(); 1825 1826 /* If this is just a dirty<->clean transition, and the array is clean 1827 * and 'events' is odd, we can roll back to the previous clean state */ 1828 if (nospares 1829 && (mddev->in_sync && mddev->recovery_cp == MaxSector) 1830 && (mddev->events & 1) 1831 && mddev->events != 1) 1832 mddev->events--; 1833 else { 1834 /* otherwise we have to go forward and ... */ 1835 mddev->events ++; 1836 if (!mddev->in_sync || mddev->recovery_cp != MaxSector) { /* not clean */ 1837 /* .. if the array isn't clean, insist on an odd 'events' */ 1838 if ((mddev->events&1)==0) { 1839 mddev->events++; 1840 nospares = 0; 1841 } 1842 } else { 1843 /* otherwise insist on an even 'events' (for clean states) */ 1844 if ((mddev->events&1)) { 1845 mddev->events++; 1846 nospares = 0; 1847 } 1848 } 1849 } 1850 1851 if (!mddev->events) { 1852 /* 1853 * oops, this 64-bit counter should never wrap. 1854 * Either we are in around ~1 trillion A.C., assuming 1855 * 1 reboot per second, or we have a bug: 1856 */ 1857 MD_BUG(); 1858 mddev->events --; 1859 } 1860 1861 /* 1862 * do not write anything to disk if using 1863 * nonpersistent superblocks 1864 */ 1865 if (!mddev->persistent) { 1866 if (!mddev->external) 1867 clear_bit(MD_CHANGE_PENDING, &mddev->flags); 1868 1869 spin_unlock_irq(&mddev->write_lock); 1870 wake_up(&mddev->sb_wait); 1871 return; 1872 } 1873 sync_sbs(mddev, nospares); 1874 spin_unlock_irq(&mddev->write_lock); 1875 1876 dprintk(KERN_INFO 1877 "md: updating %s RAID superblock on device (in sync %d)\n", 1878 mdname(mddev),mddev->in_sync); 1879 1880 bitmap_update_sb(mddev->bitmap); 1881 list_for_each_entry(rdev, &mddev->disks, same_set) { 1882 char b[BDEVNAME_SIZE]; 1883 dprintk(KERN_INFO "md: "); 1884 if (rdev->sb_loaded != 1) 1885 continue; /* no noise on spare devices */ 1886 if (test_bit(Faulty, &rdev->flags)) 1887 dprintk("(skipping faulty "); 1888 1889 dprintk("%s ", bdevname(rdev->bdev,b)); 1890 if (!test_bit(Faulty, &rdev->flags)) { 1891 md_super_write(mddev,rdev, 1892 rdev->sb_start, rdev->sb_size, 1893 rdev->sb_page); 1894 dprintk(KERN_INFO "(write) %s's sb offset: %llu\n", 1895 bdevname(rdev->bdev,b), 1896 (unsigned long long)rdev->sb_start); 1897 rdev->sb_events = mddev->events; 1898 1899 } else 1900 dprintk(")\n"); 1901 if (mddev->level == LEVEL_MULTIPATH) 1902 /* only need to write one superblock... */ 1903 break; 1904 } 1905 md_super_wait(mddev); 1906 /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */ 1907 1908 spin_lock_irq(&mddev->write_lock); 1909 if (mddev->in_sync != sync_req || 1910 test_bit(MD_CHANGE_DEVS, &mddev->flags)) { 1911 /* have to write it out again */ 1912 spin_unlock_irq(&mddev->write_lock); 1913 goto repeat; 1914 } 1915 clear_bit(MD_CHANGE_PENDING, &mddev->flags); 1916 spin_unlock_irq(&mddev->write_lock); 1917 wake_up(&mddev->sb_wait); 1918 1919 } 1920 1921 /* words written to sysfs files may, or may not, be \n terminated. 1922 * We want to accept with case. For this we use cmd_match. 1923 */ 1924 static int cmd_match(const char *cmd, const char *str) 1925 { 1926 /* See if cmd, written into a sysfs file, matches 1927 * str. They must either be the same, or cmd can 1928 * have a trailing newline 1929 */ 1930 while (*cmd && *str && *cmd == *str) { 1931 cmd++; 1932 str++; 1933 } 1934 if (*cmd == '\n') 1935 cmd++; 1936 if (*str || *cmd) 1937 return 0; 1938 return 1; 1939 } 1940 1941 struct rdev_sysfs_entry { 1942 struct attribute attr; 1943 ssize_t (*show)(mdk_rdev_t *, char *); 1944 ssize_t (*store)(mdk_rdev_t *, const char *, size_t); 1945 }; 1946 1947 static ssize_t 1948 state_show(mdk_rdev_t *rdev, char *page) 1949 { 1950 char *sep = ""; 1951 size_t len = 0; 1952 1953 if (test_bit(Faulty, &rdev->flags)) { 1954 len+= sprintf(page+len, "%sfaulty",sep); 1955 sep = ","; 1956 } 1957 if (test_bit(In_sync, &rdev->flags)) { 1958 len += sprintf(page+len, "%sin_sync",sep); 1959 sep = ","; 1960 } 1961 if (test_bit(WriteMostly, &rdev->flags)) { 1962 len += sprintf(page+len, "%swrite_mostly",sep); 1963 sep = ","; 1964 } 1965 if (test_bit(Blocked, &rdev->flags)) { 1966 len += sprintf(page+len, "%sblocked", sep); 1967 sep = ","; 1968 } 1969 if (!test_bit(Faulty, &rdev->flags) && 1970 !test_bit(In_sync, &rdev->flags)) { 1971 len += sprintf(page+len, "%sspare", sep); 1972 sep = ","; 1973 } 1974 return len+sprintf(page+len, "\n"); 1975 } 1976 1977 static ssize_t 1978 state_store(mdk_rdev_t *rdev, const char *buf, size_t len) 1979 { 1980 /* can write 1981 * faulty - simulates and error 1982 * remove - disconnects the device 1983 * writemostly - sets write_mostly 1984 * -writemostly - clears write_mostly 1985 * blocked - sets the Blocked flag 1986 * -blocked - clears the Blocked flag 1987 */ 1988 int err = -EINVAL; 1989 if (cmd_match(buf, "faulty") && rdev->mddev->pers) { 1990 md_error(rdev->mddev, rdev); 1991 err = 0; 1992 } else if (cmd_match(buf, "remove")) { 1993 if (rdev->raid_disk >= 0) 1994 err = -EBUSY; 1995 else { 1996 mddev_t *mddev = rdev->mddev; 1997 kick_rdev_from_array(rdev); 1998 if (mddev->pers) 1999 md_update_sb(mddev, 1); 2000 md_new_event(mddev); 2001 err = 0; 2002 } 2003 } else if (cmd_match(buf, "writemostly")) { 2004 set_bit(WriteMostly, &rdev->flags); 2005 err = 0; 2006 } else if (cmd_match(buf, "-writemostly")) { 2007 clear_bit(WriteMostly, &rdev->flags); 2008 err = 0; 2009 } else if (cmd_match(buf, "blocked")) { 2010 set_bit(Blocked, &rdev->flags); 2011 err = 0; 2012 } else if (cmd_match(buf, "-blocked")) { 2013 clear_bit(Blocked, &rdev->flags); 2014 wake_up(&rdev->blocked_wait); 2015 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); 2016 md_wakeup_thread(rdev->mddev->thread); 2017 2018 err = 0; 2019 } 2020 if (!err && rdev->sysfs_state) 2021 sysfs_notify_dirent(rdev->sysfs_state); 2022 return err ? err : len; 2023 } 2024 static struct rdev_sysfs_entry rdev_state = 2025 __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store); 2026 2027 static ssize_t 2028 errors_show(mdk_rdev_t *rdev, char *page) 2029 { 2030 return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors)); 2031 } 2032 2033 static ssize_t 2034 errors_store(mdk_rdev_t *rdev, const char *buf, size_t len) 2035 { 2036 char *e; 2037 unsigned long n = simple_strtoul(buf, &e, 10); 2038 if (*buf && (*e == 0 || *e == '\n')) { 2039 atomic_set(&rdev->corrected_errors, n); 2040 return len; 2041 } 2042 return -EINVAL; 2043 } 2044 static struct rdev_sysfs_entry rdev_errors = 2045 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store); 2046 2047 static ssize_t 2048 slot_show(mdk_rdev_t *rdev, char *page) 2049 { 2050 if (rdev->raid_disk < 0) 2051 return sprintf(page, "none\n"); 2052 else 2053 return sprintf(page, "%d\n", rdev->raid_disk); 2054 } 2055 2056 static ssize_t 2057 slot_store(mdk_rdev_t *rdev, const char *buf, size_t len) 2058 { 2059 char *e; 2060 int err; 2061 char nm[20]; 2062 int slot = simple_strtoul(buf, &e, 10); 2063 if (strncmp(buf, "none", 4)==0) 2064 slot = -1; 2065 else if (e==buf || (*e && *e!= '\n')) 2066 return -EINVAL; 2067 if (rdev->mddev->pers && slot == -1) { 2068 /* Setting 'slot' on an active array requires also 2069 * updating the 'rd%d' link, and communicating 2070 * with the personality with ->hot_*_disk. 2071 * For now we only support removing 2072 * failed/spare devices. This normally happens automatically, 2073 * but not when the metadata is externally managed. 2074 */ 2075 if (rdev->raid_disk == -1) 2076 return -EEXIST; 2077 /* personality does all needed checks */ 2078 if (rdev->mddev->pers->hot_add_disk == NULL) 2079 return -EINVAL; 2080 err = rdev->mddev->pers-> 2081 hot_remove_disk(rdev->mddev, rdev->raid_disk); 2082 if (err) 2083 return err; 2084 sprintf(nm, "rd%d", rdev->raid_disk); 2085 sysfs_remove_link(&rdev->mddev->kobj, nm); 2086 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); 2087 md_wakeup_thread(rdev->mddev->thread); 2088 } else if (rdev->mddev->pers) { 2089 mdk_rdev_t *rdev2; 2090 /* Activating a spare .. or possibly reactivating 2091 * if we every get bitmaps working here. 2092 */ 2093 2094 if (rdev->raid_disk != -1) 2095 return -EBUSY; 2096 2097 if (rdev->mddev->pers->hot_add_disk == NULL) 2098 return -EINVAL; 2099 2100 list_for_each_entry(rdev2, &rdev->mddev->disks, same_set) 2101 if (rdev2->raid_disk == slot) 2102 return -EEXIST; 2103 2104 rdev->raid_disk = slot; 2105 if (test_bit(In_sync, &rdev->flags)) 2106 rdev->saved_raid_disk = slot; 2107 else 2108 rdev->saved_raid_disk = -1; 2109 err = rdev->mddev->pers-> 2110 hot_add_disk(rdev->mddev, rdev); 2111 if (err) { 2112 rdev->raid_disk = -1; 2113 return err; 2114 } else 2115 sysfs_notify_dirent(rdev->sysfs_state); 2116 sprintf(nm, "rd%d", rdev->raid_disk); 2117 if (sysfs_create_link(&rdev->mddev->kobj, &rdev->kobj, nm)) 2118 printk(KERN_WARNING 2119 "md: cannot register " 2120 "%s for %s\n", 2121 nm, mdname(rdev->mddev)); 2122 2123 /* don't wakeup anyone, leave that to userspace. */ 2124 } else { 2125 if (slot >= rdev->mddev->raid_disks) 2126 return -ENOSPC; 2127 rdev->raid_disk = slot; 2128 /* assume it is working */ 2129 clear_bit(Faulty, &rdev->flags); 2130 clear_bit(WriteMostly, &rdev->flags); 2131 set_bit(In_sync, &rdev->flags); 2132 sysfs_notify_dirent(rdev->sysfs_state); 2133 } 2134 return len; 2135 } 2136 2137 2138 static struct rdev_sysfs_entry rdev_slot = 2139 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store); 2140 2141 static ssize_t 2142 offset_show(mdk_rdev_t *rdev, char *page) 2143 { 2144 return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset); 2145 } 2146 2147 static ssize_t 2148 offset_store(mdk_rdev_t *rdev, const char *buf, size_t len) 2149 { 2150 char *e; 2151 unsigned long long offset = simple_strtoull(buf, &e, 10); 2152 if (e==buf || (*e && *e != '\n')) 2153 return -EINVAL; 2154 if (rdev->mddev->pers && rdev->raid_disk >= 0) 2155 return -EBUSY; 2156 if (rdev->size && rdev->mddev->external) 2157 /* Must set offset before size, so overlap checks 2158 * can be sane */ 2159 return -EBUSY; 2160 rdev->data_offset = offset; 2161 return len; 2162 } 2163 2164 static struct rdev_sysfs_entry rdev_offset = 2165 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store); 2166 2167 static ssize_t 2168 rdev_size_show(mdk_rdev_t *rdev, char *page) 2169 { 2170 return sprintf(page, "%llu\n", (unsigned long long)rdev->size); 2171 } 2172 2173 static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2) 2174 { 2175 /* check if two start/length pairs overlap */ 2176 if (s1+l1 <= s2) 2177 return 0; 2178 if (s2+l2 <= s1) 2179 return 0; 2180 return 1; 2181 } 2182 2183 static ssize_t 2184 rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len) 2185 { 2186 unsigned long long size; 2187 unsigned long long oldsize = rdev->size; 2188 mddev_t *my_mddev = rdev->mddev; 2189 2190 if (strict_strtoull(buf, 10, &size) < 0) 2191 return -EINVAL; 2192 if (my_mddev->pers && rdev->raid_disk >= 0) { 2193 if (my_mddev->persistent) { 2194 size = super_types[my_mddev->major_version]. 2195 rdev_size_change(rdev, size * 2); 2196 if (!size) 2197 return -EBUSY; 2198 } else if (!size) { 2199 size = (rdev->bdev->bd_inode->i_size >> 10); 2200 size -= rdev->data_offset/2; 2201 } 2202 } 2203 if (size < my_mddev->size) 2204 return -EINVAL; /* component must fit device */ 2205 2206 rdev->size = size; 2207 if (size > oldsize && my_mddev->external) { 2208 /* need to check that all other rdevs with the same ->bdev 2209 * do not overlap. We need to unlock the mddev to avoid 2210 * a deadlock. We have already changed rdev->size, and if 2211 * we have to change it back, we will have the lock again. 2212 */ 2213 mddev_t *mddev; 2214 int overlap = 0; 2215 struct list_head *tmp; 2216 2217 mddev_unlock(my_mddev); 2218 for_each_mddev(mddev, tmp) { 2219 mdk_rdev_t *rdev2; 2220 2221 mddev_lock(mddev); 2222 list_for_each_entry(rdev2, &mddev->disks, same_set) 2223 if (test_bit(AllReserved, &rdev2->flags) || 2224 (rdev->bdev == rdev2->bdev && 2225 rdev != rdev2 && 2226 overlaps(rdev->data_offset, rdev->size * 2, 2227 rdev2->data_offset, 2228 rdev2->size * 2))) { 2229 overlap = 1; 2230 break; 2231 } 2232 mddev_unlock(mddev); 2233 if (overlap) { 2234 mddev_put(mddev); 2235 break; 2236 } 2237 } 2238 mddev_lock(my_mddev); 2239 if (overlap) { 2240 /* Someone else could have slipped in a size 2241 * change here, but doing so is just silly. 2242 * We put oldsize back because we *know* it is 2243 * safe, and trust userspace not to race with 2244 * itself 2245 */ 2246 rdev->size = oldsize; 2247 return -EBUSY; 2248 } 2249 } 2250 return len; 2251 } 2252 2253 static struct rdev_sysfs_entry rdev_size = 2254 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store); 2255 2256 static struct attribute *rdev_default_attrs[] = { 2257 &rdev_state.attr, 2258 &rdev_errors.attr, 2259 &rdev_slot.attr, 2260 &rdev_offset.attr, 2261 &rdev_size.attr, 2262 NULL, 2263 }; 2264 static ssize_t 2265 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 2266 { 2267 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); 2268 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj); 2269 mddev_t *mddev = rdev->mddev; 2270 ssize_t rv; 2271 2272 if (!entry->show) 2273 return -EIO; 2274 2275 rv = mddev ? mddev_lock(mddev) : -EBUSY; 2276 if (!rv) { 2277 if (rdev->mddev == NULL) 2278 rv = -EBUSY; 2279 else 2280 rv = entry->show(rdev, page); 2281 mddev_unlock(mddev); 2282 } 2283 return rv; 2284 } 2285 2286 static ssize_t 2287 rdev_attr_store(struct kobject *kobj, struct attribute *attr, 2288 const char *page, size_t length) 2289 { 2290 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); 2291 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj); 2292 ssize_t rv; 2293 mddev_t *mddev = rdev->mddev; 2294 2295 if (!entry->store) 2296 return -EIO; 2297 if (!capable(CAP_SYS_ADMIN)) 2298 return -EACCES; 2299 rv = mddev ? mddev_lock(mddev): -EBUSY; 2300 if (!rv) { 2301 if (rdev->mddev == NULL) 2302 rv = -EBUSY; 2303 else 2304 rv = entry->store(rdev, page, length); 2305 mddev_unlock(mddev); 2306 } 2307 return rv; 2308 } 2309 2310 static void rdev_free(struct kobject *ko) 2311 { 2312 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj); 2313 kfree(rdev); 2314 } 2315 static struct sysfs_ops rdev_sysfs_ops = { 2316 .show = rdev_attr_show, 2317 .store = rdev_attr_store, 2318 }; 2319 static struct kobj_type rdev_ktype = { 2320 .release = rdev_free, 2321 .sysfs_ops = &rdev_sysfs_ops, 2322 .default_attrs = rdev_default_attrs, 2323 }; 2324 2325 /* 2326 * Import a device. If 'super_format' >= 0, then sanity check the superblock 2327 * 2328 * mark the device faulty if: 2329 * 2330 * - the device is nonexistent (zero size) 2331 * - the device has no valid superblock 2332 * 2333 * a faulty rdev _never_ has rdev->sb set. 2334 */ 2335 static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_minor) 2336 { 2337 char b[BDEVNAME_SIZE]; 2338 int err; 2339 mdk_rdev_t *rdev; 2340 sector_t size; 2341 2342 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); 2343 if (!rdev) { 2344 printk(KERN_ERR "md: could not alloc mem for new device!\n"); 2345 return ERR_PTR(-ENOMEM); 2346 } 2347 2348 if ((err = alloc_disk_sb(rdev))) 2349 goto abort_free; 2350 2351 err = lock_rdev(rdev, newdev, super_format == -2); 2352 if (err) 2353 goto abort_free; 2354 2355 kobject_init(&rdev->kobj, &rdev_ktype); 2356 2357 rdev->desc_nr = -1; 2358 rdev->saved_raid_disk = -1; 2359 rdev->raid_disk = -1; 2360 rdev->flags = 0; 2361 rdev->data_offset = 0; 2362 rdev->sb_events = 0; 2363 atomic_set(&rdev->nr_pending, 0); 2364 atomic_set(&rdev->read_errors, 0); 2365 atomic_set(&rdev->corrected_errors, 0); 2366 2367 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; 2368 if (!size) { 2369 printk(KERN_WARNING 2370 "md: %s has zero or unknown size, marking faulty!\n", 2371 bdevname(rdev->bdev,b)); 2372 err = -EINVAL; 2373 goto abort_free; 2374 } 2375 2376 if (super_format >= 0) { 2377 err = super_types[super_format]. 2378 load_super(rdev, NULL, super_minor); 2379 if (err == -EINVAL) { 2380 printk(KERN_WARNING 2381 "md: %s does not have a valid v%d.%d " 2382 "superblock, not importing!\n", 2383 bdevname(rdev->bdev,b), 2384 super_format, super_minor); 2385 goto abort_free; 2386 } 2387 if (err < 0) { 2388 printk(KERN_WARNING 2389 "md: could not read %s's sb, not importing!\n", 2390 bdevname(rdev->bdev,b)); 2391 goto abort_free; 2392 } 2393 } 2394 2395 INIT_LIST_HEAD(&rdev->same_set); 2396 init_waitqueue_head(&rdev->blocked_wait); 2397 2398 return rdev; 2399 2400 abort_free: 2401 if (rdev->sb_page) { 2402 if (rdev->bdev) 2403 unlock_rdev(rdev); 2404 free_disk_sb(rdev); 2405 } 2406 kfree(rdev); 2407 return ERR_PTR(err); 2408 } 2409 2410 /* 2411 * Check a full RAID array for plausibility 2412 */ 2413 2414 2415 static void analyze_sbs(mddev_t * mddev) 2416 { 2417 int i; 2418 mdk_rdev_t *rdev, *freshest, *tmp; 2419 char b[BDEVNAME_SIZE]; 2420 2421 freshest = NULL; 2422 rdev_for_each(rdev, tmp, mddev) 2423 switch (super_types[mddev->major_version]. 2424 load_super(rdev, freshest, mddev->minor_version)) { 2425 case 1: 2426 freshest = rdev; 2427 break; 2428 case 0: 2429 break; 2430 default: 2431 printk( KERN_ERR \ 2432 "md: fatal superblock inconsistency in %s" 2433 " -- removing from array\n", 2434 bdevname(rdev->bdev,b)); 2435 kick_rdev_from_array(rdev); 2436 } 2437 2438 2439 super_types[mddev->major_version]. 2440 validate_super(mddev, freshest); 2441 2442 i = 0; 2443 rdev_for_each(rdev, tmp, mddev) { 2444 if (rdev != freshest) 2445 if (super_types[mddev->major_version]. 2446 validate_super(mddev, rdev)) { 2447 printk(KERN_WARNING "md: kicking non-fresh %s" 2448 " from array!\n", 2449 bdevname(rdev->bdev,b)); 2450 kick_rdev_from_array(rdev); 2451 continue; 2452 } 2453 if (mddev->level == LEVEL_MULTIPATH) { 2454 rdev->desc_nr = i++; 2455 rdev->raid_disk = rdev->desc_nr; 2456 set_bit(In_sync, &rdev->flags); 2457 } else if (rdev->raid_disk >= mddev->raid_disks) { 2458 rdev->raid_disk = -1; 2459 clear_bit(In_sync, &rdev->flags); 2460 } 2461 } 2462 2463 2464 2465 if (mddev->recovery_cp != MaxSector && 2466 mddev->level >= 1) 2467 printk(KERN_ERR "md: %s: raid array is not clean" 2468 " -- starting background reconstruction\n", 2469 mdname(mddev)); 2470 2471 } 2472 2473 static void md_safemode_timeout(unsigned long data); 2474 2475 static ssize_t 2476 safe_delay_show(mddev_t *mddev, char *page) 2477 { 2478 int msec = (mddev->safemode_delay*1000)/HZ; 2479 return sprintf(page, "%d.%03d\n", msec/1000, msec%1000); 2480 } 2481 static ssize_t 2482 safe_delay_store(mddev_t *mddev, const char *cbuf, size_t len) 2483 { 2484 int scale=1; 2485 int dot=0; 2486 int i; 2487 unsigned long msec; 2488 char buf[30]; 2489 2490 /* remove a period, and count digits after it */ 2491 if (len >= sizeof(buf)) 2492 return -EINVAL; 2493 strlcpy(buf, cbuf, sizeof(buf)); 2494 for (i=0; i<len; i++) { 2495 if (dot) { 2496 if (isdigit(buf[i])) { 2497 buf[i-1] = buf[i]; 2498 scale *= 10; 2499 } 2500 buf[i] = 0; 2501 } else if (buf[i] == '.') { 2502 dot=1; 2503 buf[i] = 0; 2504 } 2505 } 2506 if (strict_strtoul(buf, 10, &msec) < 0) 2507 return -EINVAL; 2508 msec = (msec * 1000) / scale; 2509 if (msec == 0) 2510 mddev->safemode_delay = 0; 2511 else { 2512 unsigned long old_delay = mddev->safemode_delay; 2513 mddev->safemode_delay = (msec*HZ)/1000; 2514 if (mddev->safemode_delay == 0) 2515 mddev->safemode_delay = 1; 2516 if (mddev->safemode_delay < old_delay) 2517 md_safemode_timeout((unsigned long)mddev); 2518 } 2519 return len; 2520 } 2521 static struct md_sysfs_entry md_safe_delay = 2522 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store); 2523 2524 static ssize_t 2525 level_show(mddev_t *mddev, char *page) 2526 { 2527 struct mdk_personality *p = mddev->pers; 2528 if (p) 2529 return sprintf(page, "%s\n", p->name); 2530 else if (mddev->clevel[0]) 2531 return sprintf(page, "%s\n", mddev->clevel); 2532 else if (mddev->level != LEVEL_NONE) 2533 return sprintf(page, "%d\n", mddev->level); 2534 else 2535 return 0; 2536 } 2537 2538 static ssize_t 2539 level_store(mddev_t *mddev, const char *buf, size_t len) 2540 { 2541 ssize_t rv = len; 2542 if (mddev->pers) 2543 return -EBUSY; 2544 if (len == 0) 2545 return 0; 2546 if (len >= sizeof(mddev->clevel)) 2547 return -ENOSPC; 2548 strncpy(mddev->clevel, buf, len); 2549 if (mddev->clevel[len-1] == '\n') 2550 len--; 2551 mddev->clevel[len] = 0; 2552 mddev->level = LEVEL_NONE; 2553 return rv; 2554 } 2555 2556 static struct md_sysfs_entry md_level = 2557 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store); 2558 2559 2560 static ssize_t 2561 layout_show(mddev_t *mddev, char *page) 2562 { 2563 /* just a number, not meaningful for all levels */ 2564 if (mddev->reshape_position != MaxSector && 2565 mddev->layout != mddev->new_layout) 2566 return sprintf(page, "%d (%d)\n", 2567 mddev->new_layout, mddev->layout); 2568 return sprintf(page, "%d\n", mddev->layout); 2569 } 2570 2571 static ssize_t 2572 layout_store(mddev_t *mddev, const char *buf, size_t len) 2573 { 2574 char *e; 2575 unsigned long n = simple_strtoul(buf, &e, 10); 2576 2577 if (!*buf || (*e && *e != '\n')) 2578 return -EINVAL; 2579 2580 if (mddev->pers) 2581 return -EBUSY; 2582 if (mddev->reshape_position != MaxSector) 2583 mddev->new_layout = n; 2584 else 2585 mddev->layout = n; 2586 return len; 2587 } 2588 static struct md_sysfs_entry md_layout = 2589 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store); 2590 2591 2592 static ssize_t 2593 raid_disks_show(mddev_t *mddev, char *page) 2594 { 2595 if (mddev->raid_disks == 0) 2596 return 0; 2597 if (mddev->reshape_position != MaxSector && 2598 mddev->delta_disks != 0) 2599 return sprintf(page, "%d (%d)\n", mddev->raid_disks, 2600 mddev->raid_disks - mddev->delta_disks); 2601 return sprintf(page, "%d\n", mddev->raid_disks); 2602 } 2603 2604 static int update_raid_disks(mddev_t *mddev, int raid_disks); 2605 2606 static ssize_t 2607 raid_disks_store(mddev_t *mddev, const char *buf, size_t len) 2608 { 2609 char *e; 2610 int rv = 0; 2611 unsigned long n = simple_strtoul(buf, &e, 10); 2612 2613 if (!*buf || (*e && *e != '\n')) 2614 return -EINVAL; 2615 2616 if (mddev->pers) 2617 rv = update_raid_disks(mddev, n); 2618 else if (mddev->reshape_position != MaxSector) { 2619 int olddisks = mddev->raid_disks - mddev->delta_disks; 2620 mddev->delta_disks = n - olddisks; 2621 mddev->raid_disks = n; 2622 } else 2623 mddev->raid_disks = n; 2624 return rv ? rv : len; 2625 } 2626 static struct md_sysfs_entry md_raid_disks = 2627 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store); 2628 2629 static ssize_t 2630 chunk_size_show(mddev_t *mddev, char *page) 2631 { 2632 if (mddev->reshape_position != MaxSector && 2633 mddev->chunk_size != mddev->new_chunk) 2634 return sprintf(page, "%d (%d)\n", mddev->new_chunk, 2635 mddev->chunk_size); 2636 return sprintf(page, "%d\n", mddev->chunk_size); 2637 } 2638 2639 static ssize_t 2640 chunk_size_store(mddev_t *mddev, const char *buf, size_t len) 2641 { 2642 /* can only set chunk_size if array is not yet active */ 2643 char *e; 2644 unsigned long n = simple_strtoul(buf, &e, 10); 2645 2646 if (!*buf || (*e && *e != '\n')) 2647 return -EINVAL; 2648 2649 if (mddev->pers) 2650 return -EBUSY; 2651 else if (mddev->reshape_position != MaxSector) 2652 mddev->new_chunk = n; 2653 else 2654 mddev->chunk_size = n; 2655 return len; 2656 } 2657 static struct md_sysfs_entry md_chunk_size = 2658 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store); 2659 2660 static ssize_t 2661 resync_start_show(mddev_t *mddev, char *page) 2662 { 2663 return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp); 2664 } 2665 2666 static ssize_t 2667 resync_start_store(mddev_t *mddev, const char *buf, size_t len) 2668 { 2669 char *e; 2670 unsigned long long n = simple_strtoull(buf, &e, 10); 2671 2672 if (mddev->pers) 2673 return -EBUSY; 2674 if (!*buf || (*e && *e != '\n')) 2675 return -EINVAL; 2676 2677 mddev->recovery_cp = n; 2678 return len; 2679 } 2680 static struct md_sysfs_entry md_resync_start = 2681 __ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store); 2682 2683 /* 2684 * The array state can be: 2685 * 2686 * clear 2687 * No devices, no size, no level 2688 * Equivalent to STOP_ARRAY ioctl 2689 * inactive 2690 * May have some settings, but array is not active 2691 * all IO results in error 2692 * When written, doesn't tear down array, but just stops it 2693 * suspended (not supported yet) 2694 * All IO requests will block. The array can be reconfigured. 2695 * Writing this, if accepted, will block until array is quiescent 2696 * readonly 2697 * no resync can happen. no superblocks get written. 2698 * write requests fail 2699 * read-auto 2700 * like readonly, but behaves like 'clean' on a write request. 2701 * 2702 * clean - no pending writes, but otherwise active. 2703 * When written to inactive array, starts without resync 2704 * If a write request arrives then 2705 * if metadata is known, mark 'dirty' and switch to 'active'. 2706 * if not known, block and switch to write-pending 2707 * If written to an active array that has pending writes, then fails. 2708 * active 2709 * fully active: IO and resync can be happening. 2710 * When written to inactive array, starts with resync 2711 * 2712 * write-pending 2713 * clean, but writes are blocked waiting for 'active' to be written. 2714 * 2715 * active-idle 2716 * like active, but no writes have been seen for a while (100msec). 2717 * 2718 */ 2719 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active, 2720 write_pending, active_idle, bad_word}; 2721 static char *array_states[] = { 2722 "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active", 2723 "write-pending", "active-idle", NULL }; 2724 2725 static int match_word(const char *word, char **list) 2726 { 2727 int n; 2728 for (n=0; list[n]; n++) 2729 if (cmd_match(word, list[n])) 2730 break; 2731 return n; 2732 } 2733 2734 static ssize_t 2735 array_state_show(mddev_t *mddev, char *page) 2736 { 2737 enum array_state st = inactive; 2738 2739 if (mddev->pers) 2740 switch(mddev->ro) { 2741 case 1: 2742 st = readonly; 2743 break; 2744 case 2: 2745 st = read_auto; 2746 break; 2747 case 0: 2748 if (mddev->in_sync) 2749 st = clean; 2750 else if (test_bit(MD_CHANGE_CLEAN, &mddev->flags)) 2751 st = write_pending; 2752 else if (mddev->safemode) 2753 st = active_idle; 2754 else 2755 st = active; 2756 } 2757 else { 2758 if (list_empty(&mddev->disks) && 2759 mddev->raid_disks == 0 && 2760 mddev->size == 0) 2761 st = clear; 2762 else 2763 st = inactive; 2764 } 2765 return sprintf(page, "%s\n", array_states[st]); 2766 } 2767 2768 static int do_md_stop(mddev_t * mddev, int ro, int is_open); 2769 static int do_md_run(mddev_t * mddev); 2770 static int restart_array(mddev_t *mddev); 2771 2772 static ssize_t 2773 array_state_store(mddev_t *mddev, const char *buf, size_t len) 2774 { 2775 int err = -EINVAL; 2776 enum array_state st = match_word(buf, array_states); 2777 switch(st) { 2778 case bad_word: 2779 break; 2780 case clear: 2781 /* stopping an active array */ 2782 if (atomic_read(&mddev->openers) > 0) 2783 return -EBUSY; 2784 err = do_md_stop(mddev, 0, 0); 2785 break; 2786 case inactive: 2787 /* stopping an active array */ 2788 if (mddev->pers) { 2789 if (atomic_read(&mddev->openers) > 0) 2790 return -EBUSY; 2791 err = do_md_stop(mddev, 2, 0); 2792 } else 2793 err = 0; /* already inactive */ 2794 break; 2795 case suspended: 2796 break; /* not supported yet */ 2797 case readonly: 2798 if (mddev->pers) 2799 err = do_md_stop(mddev, 1, 0); 2800 else { 2801 mddev->ro = 1; 2802 set_disk_ro(mddev->gendisk, 1); 2803 err = do_md_run(mddev); 2804 } 2805 break; 2806 case read_auto: 2807 if (mddev->pers) { 2808 if (mddev->ro == 0) 2809 err = do_md_stop(mddev, 1, 0); 2810 else if (mddev->ro == 1) 2811 err = restart_array(mddev); 2812 if (err == 0) { 2813 mddev->ro = 2; 2814 set_disk_ro(mddev->gendisk, 0); 2815 } 2816 } else { 2817 mddev->ro = 2; 2818 err = do_md_run(mddev); 2819 } 2820 break; 2821 case clean: 2822 if (mddev->pers) { 2823 restart_array(mddev); 2824 spin_lock_irq(&mddev->write_lock); 2825 if (atomic_read(&mddev->writes_pending) == 0) { 2826 if (mddev->in_sync == 0) { 2827 mddev->in_sync = 1; 2828 if (mddev->safemode == 1) 2829 mddev->safemode = 0; 2830 if (mddev->persistent) 2831 set_bit(MD_CHANGE_CLEAN, 2832 &mddev->flags); 2833 } 2834 err = 0; 2835 } else 2836 err = -EBUSY; 2837 spin_unlock_irq(&mddev->write_lock); 2838 } else { 2839 mddev->ro = 0; 2840 mddev->recovery_cp = MaxSector; 2841 err = do_md_run(mddev); 2842 } 2843 break; 2844 case active: 2845 if (mddev->pers) { 2846 restart_array(mddev); 2847 if (mddev->external) 2848 clear_bit(MD_CHANGE_CLEAN, &mddev->flags); 2849 wake_up(&mddev->sb_wait); 2850 err = 0; 2851 } else { 2852 mddev->ro = 0; 2853 set_disk_ro(mddev->gendisk, 0); 2854 err = do_md_run(mddev); 2855 } 2856 break; 2857 case write_pending: 2858 case active_idle: 2859 /* these cannot be set */ 2860 break; 2861 } 2862 if (err) 2863 return err; 2864 else { 2865 sysfs_notify_dirent(mddev->sysfs_state); 2866 return len; 2867 } 2868 } 2869 static struct md_sysfs_entry md_array_state = 2870 __ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store); 2871 2872 static ssize_t 2873 null_show(mddev_t *mddev, char *page) 2874 { 2875 return -EINVAL; 2876 } 2877 2878 static ssize_t 2879 new_dev_store(mddev_t *mddev, const char *buf, size_t len) 2880 { 2881 /* buf must be %d:%d\n? giving major and minor numbers */ 2882 /* The new device is added to the array. 2883 * If the array has a persistent superblock, we read the 2884 * superblock to initialise info and check validity. 2885 * Otherwise, only checking done is that in bind_rdev_to_array, 2886 * which mainly checks size. 2887 */ 2888 char *e; 2889 int major = simple_strtoul(buf, &e, 10); 2890 int minor; 2891 dev_t dev; 2892 mdk_rdev_t *rdev; 2893 int err; 2894 2895 if (!*buf || *e != ':' || !e[1] || e[1] == '\n') 2896 return -EINVAL; 2897 minor = simple_strtoul(e+1, &e, 10); 2898 if (*e && *e != '\n') 2899 return -EINVAL; 2900 dev = MKDEV(major, minor); 2901 if (major != MAJOR(dev) || 2902 minor != MINOR(dev)) 2903 return -EOVERFLOW; 2904 2905 2906 if (mddev->persistent) { 2907 rdev = md_import_device(dev, mddev->major_version, 2908 mddev->minor_version); 2909 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) { 2910 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next, 2911 mdk_rdev_t, same_set); 2912 err = super_types[mddev->major_version] 2913 .load_super(rdev, rdev0, mddev->minor_version); 2914 if (err < 0) 2915 goto out; 2916 } 2917 } else if (mddev->external) 2918 rdev = md_import_device(dev, -2, -1); 2919 else 2920 rdev = md_import_device(dev, -1, -1); 2921 2922 if (IS_ERR(rdev)) 2923 return PTR_ERR(rdev); 2924 err = bind_rdev_to_array(rdev, mddev); 2925 out: 2926 if (err) 2927 export_rdev(rdev); 2928 return err ? err : len; 2929 } 2930 2931 static struct md_sysfs_entry md_new_device = 2932 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store); 2933 2934 static ssize_t 2935 bitmap_store(mddev_t *mddev, const char *buf, size_t len) 2936 { 2937 char *end; 2938 unsigned long chunk, end_chunk; 2939 2940 if (!mddev->bitmap) 2941 goto out; 2942 /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */ 2943 while (*buf) { 2944 chunk = end_chunk = simple_strtoul(buf, &end, 0); 2945 if (buf == end) break; 2946 if (*end == '-') { /* range */ 2947 buf = end + 1; 2948 end_chunk = simple_strtoul(buf, &end, 0); 2949 if (buf == end) break; 2950 } 2951 if (*end && !isspace(*end)) break; 2952 bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk); 2953 buf = end; 2954 while (isspace(*buf)) buf++; 2955 } 2956 bitmap_unplug(mddev->bitmap); /* flush the bits to disk */ 2957 out: 2958 return len; 2959 } 2960 2961 static struct md_sysfs_entry md_bitmap = 2962 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store); 2963 2964 static ssize_t 2965 size_show(mddev_t *mddev, char *page) 2966 { 2967 return sprintf(page, "%llu\n", (unsigned long long)mddev->size); 2968 } 2969 2970 static int update_size(mddev_t *mddev, sector_t num_sectors); 2971 2972 static ssize_t 2973 size_store(mddev_t *mddev, const char *buf, size_t len) 2974 { 2975 /* If array is inactive, we can reduce the component size, but 2976 * not increase it (except from 0). 2977 * If array is active, we can try an on-line resize 2978 */ 2979 char *e; 2980 int err = 0; 2981 unsigned long long size = simple_strtoull(buf, &e, 10); 2982 if (!*buf || *buf == '\n' || 2983 (*e && *e != '\n')) 2984 return -EINVAL; 2985 2986 if (mddev->pers) { 2987 err = update_size(mddev, size * 2); 2988 md_update_sb(mddev, 1); 2989 } else { 2990 if (mddev->size == 0 || 2991 mddev->size > size) 2992 mddev->size = size; 2993 else 2994 err = -ENOSPC; 2995 } 2996 return err ? err : len; 2997 } 2998 2999 static struct md_sysfs_entry md_size = 3000 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store); 3001 3002 3003 /* Metdata version. 3004 * This is one of 3005 * 'none' for arrays with no metadata (good luck...) 3006 * 'external' for arrays with externally managed metadata, 3007 * or N.M for internally known formats 3008 */ 3009 static ssize_t 3010 metadata_show(mddev_t *mddev, char *page) 3011 { 3012 if (mddev->persistent) 3013 return sprintf(page, "%d.%d\n", 3014 mddev->major_version, mddev->minor_version); 3015 else if (mddev->external) 3016 return sprintf(page, "external:%s\n", mddev->metadata_type); 3017 else 3018 return sprintf(page, "none\n"); 3019 } 3020 3021 static ssize_t 3022 metadata_store(mddev_t *mddev, const char *buf, size_t len) 3023 { 3024 int major, minor; 3025 char *e; 3026 /* Changing the details of 'external' metadata is 3027 * always permitted. Otherwise there must be 3028 * no devices attached to the array. 3029 */ 3030 if (mddev->external && strncmp(buf, "external:", 9) == 0) 3031 ; 3032 else if (!list_empty(&mddev->disks)) 3033 return -EBUSY; 3034 3035 if (cmd_match(buf, "none")) { 3036 mddev->persistent = 0; 3037 mddev->external = 0; 3038 mddev->major_version = 0; 3039 mddev->minor_version = 90; 3040 return len; 3041 } 3042 if (strncmp(buf, "external:", 9) == 0) { 3043 size_t namelen = len-9; 3044 if (namelen >= sizeof(mddev->metadata_type)) 3045 namelen = sizeof(mddev->metadata_type)-1; 3046 strncpy(mddev->metadata_type, buf+9, namelen); 3047 mddev->metadata_type[namelen] = 0; 3048 if (namelen && mddev->metadata_type[namelen-1] == '\n') 3049 mddev->metadata_type[--namelen] = 0; 3050 mddev->persistent = 0; 3051 mddev->external = 1; 3052 mddev->major_version = 0; 3053 mddev->minor_version = 90; 3054 return len; 3055 } 3056 major = simple_strtoul(buf, &e, 10); 3057 if (e==buf || *e != '.') 3058 return -EINVAL; 3059 buf = e+1; 3060 minor = simple_strtoul(buf, &e, 10); 3061 if (e==buf || (*e && *e != '\n') ) 3062 return -EINVAL; 3063 if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL) 3064 return -ENOENT; 3065 mddev->major_version = major; 3066 mddev->minor_version = minor; 3067 mddev->persistent = 1; 3068 mddev->external = 0; 3069 return len; 3070 } 3071 3072 static struct md_sysfs_entry md_metadata = 3073 __ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store); 3074 3075 static ssize_t 3076 action_show(mddev_t *mddev, char *page) 3077 { 3078 char *type = "idle"; 3079 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 3080 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) { 3081 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 3082 type = "reshape"; 3083 else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 3084 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 3085 type = "resync"; 3086 else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) 3087 type = "check"; 3088 else 3089 type = "repair"; 3090 } else if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) 3091 type = "recover"; 3092 } 3093 return sprintf(page, "%s\n", type); 3094 } 3095 3096 static ssize_t 3097 action_store(mddev_t *mddev, const char *page, size_t len) 3098 { 3099 if (!mddev->pers || !mddev->pers->sync_request) 3100 return -EINVAL; 3101 3102 if (cmd_match(page, "idle")) { 3103 if (mddev->sync_thread) { 3104 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 3105 md_unregister_thread(mddev->sync_thread); 3106 mddev->sync_thread = NULL; 3107 mddev->recovery = 0; 3108 } 3109 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 3110 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) 3111 return -EBUSY; 3112 else if (cmd_match(page, "resync")) 3113 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3114 else if (cmd_match(page, "recover")) { 3115 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 3116 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3117 } else if (cmd_match(page, "reshape")) { 3118 int err; 3119 if (mddev->pers->start_reshape == NULL) 3120 return -EINVAL; 3121 err = mddev->pers->start_reshape(mddev); 3122 if (err) 3123 return err; 3124 sysfs_notify(&mddev->kobj, NULL, "degraded"); 3125 } else { 3126 if (cmd_match(page, "check")) 3127 set_bit(MD_RECOVERY_CHECK, &mddev->recovery); 3128 else if (!cmd_match(page, "repair")) 3129 return -EINVAL; 3130 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 3131 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 3132 } 3133 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3134 md_wakeup_thread(mddev->thread); 3135 sysfs_notify_dirent(mddev->sysfs_action); 3136 return len; 3137 } 3138 3139 static ssize_t 3140 mismatch_cnt_show(mddev_t *mddev, char *page) 3141 { 3142 return sprintf(page, "%llu\n", 3143 (unsigned long long) mddev->resync_mismatches); 3144 } 3145 3146 static struct md_sysfs_entry md_scan_mode = 3147 __ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store); 3148 3149 3150 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt); 3151 3152 static ssize_t 3153 sync_min_show(mddev_t *mddev, char *page) 3154 { 3155 return sprintf(page, "%d (%s)\n", speed_min(mddev), 3156 mddev->sync_speed_min ? "local": "system"); 3157 } 3158 3159 static ssize_t 3160 sync_min_store(mddev_t *mddev, const char *buf, size_t len) 3161 { 3162 int min; 3163 char *e; 3164 if (strncmp(buf, "system", 6)==0) { 3165 mddev->sync_speed_min = 0; 3166 return len; 3167 } 3168 min = simple_strtoul(buf, &e, 10); 3169 if (buf == e || (*e && *e != '\n') || min <= 0) 3170 return -EINVAL; 3171 mddev->sync_speed_min = min; 3172 return len; 3173 } 3174 3175 static struct md_sysfs_entry md_sync_min = 3176 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store); 3177 3178 static ssize_t 3179 sync_max_show(mddev_t *mddev, char *page) 3180 { 3181 return sprintf(page, "%d (%s)\n", speed_max(mddev), 3182 mddev->sync_speed_max ? "local": "system"); 3183 } 3184 3185 static ssize_t 3186 sync_max_store(mddev_t *mddev, const char *buf, size_t len) 3187 { 3188 int max; 3189 char *e; 3190 if (strncmp(buf, "system", 6)==0) { 3191 mddev->sync_speed_max = 0; 3192 return len; 3193 } 3194 max = simple_strtoul(buf, &e, 10); 3195 if (buf == e || (*e && *e != '\n') || max <= 0) 3196 return -EINVAL; 3197 mddev->sync_speed_max = max; 3198 return len; 3199 } 3200 3201 static struct md_sysfs_entry md_sync_max = 3202 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store); 3203 3204 static ssize_t 3205 degraded_show(mddev_t *mddev, char *page) 3206 { 3207 return sprintf(page, "%d\n", mddev->degraded); 3208 } 3209 static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded); 3210 3211 static ssize_t 3212 sync_force_parallel_show(mddev_t *mddev, char *page) 3213 { 3214 return sprintf(page, "%d\n", mddev->parallel_resync); 3215 } 3216 3217 static ssize_t 3218 sync_force_parallel_store(mddev_t *mddev, const char *buf, size_t len) 3219 { 3220 long n; 3221 3222 if (strict_strtol(buf, 10, &n)) 3223 return -EINVAL; 3224 3225 if (n != 0 && n != 1) 3226 return -EINVAL; 3227 3228 mddev->parallel_resync = n; 3229 3230 if (mddev->sync_thread) 3231 wake_up(&resync_wait); 3232 3233 return len; 3234 } 3235 3236 /* force parallel resync, even with shared block devices */ 3237 static struct md_sysfs_entry md_sync_force_parallel = 3238 __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR, 3239 sync_force_parallel_show, sync_force_parallel_store); 3240 3241 static ssize_t 3242 sync_speed_show(mddev_t *mddev, char *page) 3243 { 3244 unsigned long resync, dt, db; 3245 resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active); 3246 dt = (jiffies - mddev->resync_mark) / HZ; 3247 if (!dt) dt++; 3248 db = resync - mddev->resync_mark_cnt; 3249 return sprintf(page, "%lu\n", db/dt/2); /* K/sec */ 3250 } 3251 3252 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed); 3253 3254 static ssize_t 3255 sync_completed_show(mddev_t *mddev, char *page) 3256 { 3257 unsigned long max_blocks, resync; 3258 3259 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 3260 max_blocks = mddev->resync_max_sectors; 3261 else 3262 max_blocks = mddev->size << 1; 3263 3264 resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active)); 3265 return sprintf(page, "%lu / %lu\n", resync, max_blocks); 3266 } 3267 3268 static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed); 3269 3270 static ssize_t 3271 min_sync_show(mddev_t *mddev, char *page) 3272 { 3273 return sprintf(page, "%llu\n", 3274 (unsigned long long)mddev->resync_min); 3275 } 3276 static ssize_t 3277 min_sync_store(mddev_t *mddev, const char *buf, size_t len) 3278 { 3279 unsigned long long min; 3280 if (strict_strtoull(buf, 10, &min)) 3281 return -EINVAL; 3282 if (min > mddev->resync_max) 3283 return -EINVAL; 3284 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 3285 return -EBUSY; 3286 3287 /* Must be a multiple of chunk_size */ 3288 if (mddev->chunk_size) { 3289 if (min & (sector_t)((mddev->chunk_size>>9)-1)) 3290 return -EINVAL; 3291 } 3292 mddev->resync_min = min; 3293 3294 return len; 3295 } 3296 3297 static struct md_sysfs_entry md_min_sync = 3298 __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store); 3299 3300 static ssize_t 3301 max_sync_show(mddev_t *mddev, char *page) 3302 { 3303 if (mddev->resync_max == MaxSector) 3304 return sprintf(page, "max\n"); 3305 else 3306 return sprintf(page, "%llu\n", 3307 (unsigned long long)mddev->resync_max); 3308 } 3309 static ssize_t 3310 max_sync_store(mddev_t *mddev, const char *buf, size_t len) 3311 { 3312 if (strncmp(buf, "max", 3) == 0) 3313 mddev->resync_max = MaxSector; 3314 else { 3315 unsigned long long max; 3316 if (strict_strtoull(buf, 10, &max)) 3317 return -EINVAL; 3318 if (max < mddev->resync_min) 3319 return -EINVAL; 3320 if (max < mddev->resync_max && 3321 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 3322 return -EBUSY; 3323 3324 /* Must be a multiple of chunk_size */ 3325 if (mddev->chunk_size) { 3326 if (max & (sector_t)((mddev->chunk_size>>9)-1)) 3327 return -EINVAL; 3328 } 3329 mddev->resync_max = max; 3330 } 3331 wake_up(&mddev->recovery_wait); 3332 return len; 3333 } 3334 3335 static struct md_sysfs_entry md_max_sync = 3336 __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store); 3337 3338 static ssize_t 3339 suspend_lo_show(mddev_t *mddev, char *page) 3340 { 3341 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo); 3342 } 3343 3344 static ssize_t 3345 suspend_lo_store(mddev_t *mddev, const char *buf, size_t len) 3346 { 3347 char *e; 3348 unsigned long long new = simple_strtoull(buf, &e, 10); 3349 3350 if (mddev->pers->quiesce == NULL) 3351 return -EINVAL; 3352 if (buf == e || (*e && *e != '\n')) 3353 return -EINVAL; 3354 if (new >= mddev->suspend_hi || 3355 (new > mddev->suspend_lo && new < mddev->suspend_hi)) { 3356 mddev->suspend_lo = new; 3357 mddev->pers->quiesce(mddev, 2); 3358 return len; 3359 } else 3360 return -EINVAL; 3361 } 3362 static struct md_sysfs_entry md_suspend_lo = 3363 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store); 3364 3365 3366 static ssize_t 3367 suspend_hi_show(mddev_t *mddev, char *page) 3368 { 3369 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi); 3370 } 3371 3372 static ssize_t 3373 suspend_hi_store(mddev_t *mddev, const char *buf, size_t len) 3374 { 3375 char *e; 3376 unsigned long long new = simple_strtoull(buf, &e, 10); 3377 3378 if (mddev->pers->quiesce == NULL) 3379 return -EINVAL; 3380 if (buf == e || (*e && *e != '\n')) 3381 return -EINVAL; 3382 if ((new <= mddev->suspend_lo && mddev->suspend_lo >= mddev->suspend_hi) || 3383 (new > mddev->suspend_lo && new > mddev->suspend_hi)) { 3384 mddev->suspend_hi = new; 3385 mddev->pers->quiesce(mddev, 1); 3386 mddev->pers->quiesce(mddev, 0); 3387 return len; 3388 } else 3389 return -EINVAL; 3390 } 3391 static struct md_sysfs_entry md_suspend_hi = 3392 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store); 3393 3394 static ssize_t 3395 reshape_position_show(mddev_t *mddev, char *page) 3396 { 3397 if (mddev->reshape_position != MaxSector) 3398 return sprintf(page, "%llu\n", 3399 (unsigned long long)mddev->reshape_position); 3400 strcpy(page, "none\n"); 3401 return 5; 3402 } 3403 3404 static ssize_t 3405 reshape_position_store(mddev_t *mddev, const char *buf, size_t len) 3406 { 3407 char *e; 3408 unsigned long long new = simple_strtoull(buf, &e, 10); 3409 if (mddev->pers) 3410 return -EBUSY; 3411 if (buf == e || (*e && *e != '\n')) 3412 return -EINVAL; 3413 mddev->reshape_position = new; 3414 mddev->delta_disks = 0; 3415 mddev->new_level = mddev->level; 3416 mddev->new_layout = mddev->layout; 3417 mddev->new_chunk = mddev->chunk_size; 3418 return len; 3419 } 3420 3421 static struct md_sysfs_entry md_reshape_position = 3422 __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show, 3423 reshape_position_store); 3424 3425 3426 static struct attribute *md_default_attrs[] = { 3427 &md_level.attr, 3428 &md_layout.attr, 3429 &md_raid_disks.attr, 3430 &md_chunk_size.attr, 3431 &md_size.attr, 3432 &md_resync_start.attr, 3433 &md_metadata.attr, 3434 &md_new_device.attr, 3435 &md_safe_delay.attr, 3436 &md_array_state.attr, 3437 &md_reshape_position.attr, 3438 NULL, 3439 }; 3440 3441 static struct attribute *md_redundancy_attrs[] = { 3442 &md_scan_mode.attr, 3443 &md_mismatches.attr, 3444 &md_sync_min.attr, 3445 &md_sync_max.attr, 3446 &md_sync_speed.attr, 3447 &md_sync_force_parallel.attr, 3448 &md_sync_completed.attr, 3449 &md_min_sync.attr, 3450 &md_max_sync.attr, 3451 &md_suspend_lo.attr, 3452 &md_suspend_hi.attr, 3453 &md_bitmap.attr, 3454 &md_degraded.attr, 3455 NULL, 3456 }; 3457 static struct attribute_group md_redundancy_group = { 3458 .name = NULL, 3459 .attrs = md_redundancy_attrs, 3460 }; 3461 3462 3463 static ssize_t 3464 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 3465 { 3466 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); 3467 mddev_t *mddev = container_of(kobj, struct mddev_s, kobj); 3468 ssize_t rv; 3469 3470 if (!entry->show) 3471 return -EIO; 3472 rv = mddev_lock(mddev); 3473 if (!rv) { 3474 rv = entry->show(mddev, page); 3475 mddev_unlock(mddev); 3476 } 3477 return rv; 3478 } 3479 3480 static ssize_t 3481 md_attr_store(struct kobject *kobj, struct attribute *attr, 3482 const char *page, size_t length) 3483 { 3484 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); 3485 mddev_t *mddev = container_of(kobj, struct mddev_s, kobj); 3486 ssize_t rv; 3487 3488 if (!entry->store) 3489 return -EIO; 3490 if (!capable(CAP_SYS_ADMIN)) 3491 return -EACCES; 3492 rv = mddev_lock(mddev); 3493 if (mddev->hold_active == UNTIL_IOCTL) 3494 mddev->hold_active = 0; 3495 if (!rv) { 3496 rv = entry->store(mddev, page, length); 3497 mddev_unlock(mddev); 3498 } 3499 return rv; 3500 } 3501 3502 static void md_free(struct kobject *ko) 3503 { 3504 mddev_t *mddev = container_of(ko, mddev_t, kobj); 3505 3506 if (mddev->sysfs_state) 3507 sysfs_put(mddev->sysfs_state); 3508 3509 if (mddev->gendisk) { 3510 del_gendisk(mddev->gendisk); 3511 put_disk(mddev->gendisk); 3512 } 3513 if (mddev->queue) 3514 blk_cleanup_queue(mddev->queue); 3515 3516 kfree(mddev); 3517 } 3518 3519 static struct sysfs_ops md_sysfs_ops = { 3520 .show = md_attr_show, 3521 .store = md_attr_store, 3522 }; 3523 static struct kobj_type md_ktype = { 3524 .release = md_free, 3525 .sysfs_ops = &md_sysfs_ops, 3526 .default_attrs = md_default_attrs, 3527 }; 3528 3529 int mdp_major = 0; 3530 3531 static int md_alloc(dev_t dev, char *name) 3532 { 3533 static DEFINE_MUTEX(disks_mutex); 3534 mddev_t *mddev = mddev_find(dev); 3535 struct gendisk *disk; 3536 int partitioned; 3537 int shift; 3538 int unit; 3539 int error; 3540 3541 if (!mddev) 3542 return -ENODEV; 3543 3544 partitioned = (MAJOR(mddev->unit) != MD_MAJOR); 3545 shift = partitioned ? MdpMinorShift : 0; 3546 unit = MINOR(mddev->unit) >> shift; 3547 3548 /* wait for any previous instance if this device 3549 * to be completed removed (mddev_delayed_delete). 3550 */ 3551 flush_scheduled_work(); 3552 3553 mutex_lock(&disks_mutex); 3554 if (mddev->gendisk) { 3555 mutex_unlock(&disks_mutex); 3556 mddev_put(mddev); 3557 return -EEXIST; 3558 } 3559 3560 if (name) { 3561 /* Need to ensure that 'name' is not a duplicate. 3562 */ 3563 mddev_t *mddev2; 3564 spin_lock(&all_mddevs_lock); 3565 3566 list_for_each_entry(mddev2, &all_mddevs, all_mddevs) 3567 if (mddev2->gendisk && 3568 strcmp(mddev2->gendisk->disk_name, name) == 0) { 3569 spin_unlock(&all_mddevs_lock); 3570 return -EEXIST; 3571 } 3572 spin_unlock(&all_mddevs_lock); 3573 } 3574 3575 mddev->queue = blk_alloc_queue(GFP_KERNEL); 3576 if (!mddev->queue) { 3577 mutex_unlock(&disks_mutex); 3578 mddev_put(mddev); 3579 return -ENOMEM; 3580 } 3581 /* Can be unlocked because the queue is new: no concurrency */ 3582 queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, mddev->queue); 3583 3584 blk_queue_make_request(mddev->queue, md_fail_request); 3585 3586 disk = alloc_disk(1 << shift); 3587 if (!disk) { 3588 mutex_unlock(&disks_mutex); 3589 blk_cleanup_queue(mddev->queue); 3590 mddev->queue = NULL; 3591 mddev_put(mddev); 3592 return -ENOMEM; 3593 } 3594 disk->major = MAJOR(mddev->unit); 3595 disk->first_minor = unit << shift; 3596 if (name) 3597 strcpy(disk->disk_name, name); 3598 else if (partitioned) 3599 sprintf(disk->disk_name, "md_d%d", unit); 3600 else 3601 sprintf(disk->disk_name, "md%d", unit); 3602 disk->fops = &md_fops; 3603 disk->private_data = mddev; 3604 disk->queue = mddev->queue; 3605 /* Allow extended partitions. This makes the 3606 * 'mdp' device redundant, but we can't really 3607 * remove it now. 3608 */ 3609 disk->flags |= GENHD_FL_EXT_DEVT; 3610 add_disk(disk); 3611 mddev->gendisk = disk; 3612 error = kobject_init_and_add(&mddev->kobj, &md_ktype, 3613 &disk_to_dev(disk)->kobj, "%s", "md"); 3614 mutex_unlock(&disks_mutex); 3615 if (error) 3616 printk(KERN_WARNING "md: cannot register %s/md - name in use\n", 3617 disk->disk_name); 3618 else { 3619 kobject_uevent(&mddev->kobj, KOBJ_ADD); 3620 mddev->sysfs_state = sysfs_get_dirent(mddev->kobj.sd, "array_state"); 3621 } 3622 mddev_put(mddev); 3623 return 0; 3624 } 3625 3626 static struct kobject *md_probe(dev_t dev, int *part, void *data) 3627 { 3628 md_alloc(dev, NULL); 3629 return NULL; 3630 } 3631 3632 static int add_named_array(const char *val, struct kernel_param *kp) 3633 { 3634 /* val must be "md_*" where * is not all digits. 3635 * We allocate an array with a large free minor number, and 3636 * set the name to val. val must not already be an active name. 3637 */ 3638 int len = strlen(val); 3639 char buf[DISK_NAME_LEN]; 3640 3641 while (len && val[len-1] == '\n') 3642 len--; 3643 if (len >= DISK_NAME_LEN) 3644 return -E2BIG; 3645 strlcpy(buf, val, len+1); 3646 if (strncmp(buf, "md_", 3) != 0) 3647 return -EINVAL; 3648 return md_alloc(0, buf); 3649 } 3650 3651 static void md_safemode_timeout(unsigned long data) 3652 { 3653 mddev_t *mddev = (mddev_t *) data; 3654 3655 if (!atomic_read(&mddev->writes_pending)) { 3656 mddev->safemode = 1; 3657 if (mddev->external) 3658 sysfs_notify_dirent(mddev->sysfs_state); 3659 } 3660 md_wakeup_thread(mddev->thread); 3661 } 3662 3663 static int start_dirty_degraded; 3664 3665 static int do_md_run(mddev_t * mddev) 3666 { 3667 int err; 3668 int chunk_size; 3669 mdk_rdev_t *rdev; 3670 struct gendisk *disk; 3671 struct mdk_personality *pers; 3672 char b[BDEVNAME_SIZE]; 3673 3674 if (list_empty(&mddev->disks)) 3675 /* cannot run an array with no devices.. */ 3676 return -EINVAL; 3677 3678 if (mddev->pers) 3679 return -EBUSY; 3680 3681 /* 3682 * Analyze all RAID superblock(s) 3683 */ 3684 if (!mddev->raid_disks) { 3685 if (!mddev->persistent) 3686 return -EINVAL; 3687 analyze_sbs(mddev); 3688 } 3689 3690 chunk_size = mddev->chunk_size; 3691 3692 if (chunk_size) { 3693 if (chunk_size > MAX_CHUNK_SIZE) { 3694 printk(KERN_ERR "too big chunk_size: %d > %d\n", 3695 chunk_size, MAX_CHUNK_SIZE); 3696 return -EINVAL; 3697 } 3698 /* 3699 * chunk-size has to be a power of 2 3700 */ 3701 if ( (1 << ffz(~chunk_size)) != chunk_size) { 3702 printk(KERN_ERR "chunk_size of %d not valid\n", chunk_size); 3703 return -EINVAL; 3704 } 3705 3706 /* devices must have minimum size of one chunk */ 3707 list_for_each_entry(rdev, &mddev->disks, same_set) { 3708 if (test_bit(Faulty, &rdev->flags)) 3709 continue; 3710 if (rdev->size < chunk_size / 1024) { 3711 printk(KERN_WARNING 3712 "md: Dev %s smaller than chunk_size:" 3713 " %lluk < %dk\n", 3714 bdevname(rdev->bdev,b), 3715 (unsigned long long)rdev->size, 3716 chunk_size / 1024); 3717 return -EINVAL; 3718 } 3719 } 3720 } 3721 3722 if (mddev->level != LEVEL_NONE) 3723 request_module("md-level-%d", mddev->level); 3724 else if (mddev->clevel[0]) 3725 request_module("md-%s", mddev->clevel); 3726 3727 /* 3728 * Drop all container device buffers, from now on 3729 * the only valid external interface is through the md 3730 * device. 3731 */ 3732 list_for_each_entry(rdev, &mddev->disks, same_set) { 3733 if (test_bit(Faulty, &rdev->flags)) 3734 continue; 3735 sync_blockdev(rdev->bdev); 3736 invalidate_bdev(rdev->bdev); 3737 3738 /* perform some consistency tests on the device. 3739 * We don't want the data to overlap the metadata, 3740 * Internal Bitmap issues has handled elsewhere. 3741 */ 3742 if (rdev->data_offset < rdev->sb_start) { 3743 if (mddev->size && 3744 rdev->data_offset + mddev->size*2 3745 > rdev->sb_start) { 3746 printk("md: %s: data overlaps metadata\n", 3747 mdname(mddev)); 3748 return -EINVAL; 3749 } 3750 } else { 3751 if (rdev->sb_start + rdev->sb_size/512 3752 > rdev->data_offset) { 3753 printk("md: %s: metadata overlaps data\n", 3754 mdname(mddev)); 3755 return -EINVAL; 3756 } 3757 } 3758 sysfs_notify_dirent(rdev->sysfs_state); 3759 } 3760 3761 md_probe(mddev->unit, NULL, NULL); 3762 disk = mddev->gendisk; 3763 if (!disk) 3764 return -ENOMEM; 3765 3766 spin_lock(&pers_lock); 3767 pers = find_pers(mddev->level, mddev->clevel); 3768 if (!pers || !try_module_get(pers->owner)) { 3769 spin_unlock(&pers_lock); 3770 if (mddev->level != LEVEL_NONE) 3771 printk(KERN_WARNING "md: personality for level %d is not loaded!\n", 3772 mddev->level); 3773 else 3774 printk(KERN_WARNING "md: personality for level %s is not loaded!\n", 3775 mddev->clevel); 3776 return -EINVAL; 3777 } 3778 mddev->pers = pers; 3779 spin_unlock(&pers_lock); 3780 mddev->level = pers->level; 3781 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); 3782 3783 if (mddev->reshape_position != MaxSector && 3784 pers->start_reshape == NULL) { 3785 /* This personality cannot handle reshaping... */ 3786 mddev->pers = NULL; 3787 module_put(pers->owner); 3788 return -EINVAL; 3789 } 3790 3791 if (pers->sync_request) { 3792 /* Warn if this is a potentially silly 3793 * configuration. 3794 */ 3795 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 3796 mdk_rdev_t *rdev2; 3797 int warned = 0; 3798 3799 list_for_each_entry(rdev, &mddev->disks, same_set) 3800 list_for_each_entry(rdev2, &mddev->disks, same_set) { 3801 if (rdev < rdev2 && 3802 rdev->bdev->bd_contains == 3803 rdev2->bdev->bd_contains) { 3804 printk(KERN_WARNING 3805 "%s: WARNING: %s appears to be" 3806 " on the same physical disk as" 3807 " %s.\n", 3808 mdname(mddev), 3809 bdevname(rdev->bdev,b), 3810 bdevname(rdev2->bdev,b2)); 3811 warned = 1; 3812 } 3813 } 3814 3815 if (warned) 3816 printk(KERN_WARNING 3817 "True protection against single-disk" 3818 " failure might be compromised.\n"); 3819 } 3820 3821 mddev->recovery = 0; 3822 mddev->resync_max_sectors = mddev->size << 1; /* may be over-ridden by personality */ 3823 mddev->barriers_work = 1; 3824 mddev->ok_start_degraded = start_dirty_degraded; 3825 3826 if (start_readonly) 3827 mddev->ro = 2; /* read-only, but switch on first write */ 3828 3829 err = mddev->pers->run(mddev); 3830 if (err) 3831 printk(KERN_ERR "md: pers->run() failed ...\n"); 3832 else if (mddev->pers->sync_request) { 3833 err = bitmap_create(mddev); 3834 if (err) { 3835 printk(KERN_ERR "%s: failed to create bitmap (%d)\n", 3836 mdname(mddev), err); 3837 mddev->pers->stop(mddev); 3838 } 3839 } 3840 if (err) { 3841 module_put(mddev->pers->owner); 3842 mddev->pers = NULL; 3843 bitmap_destroy(mddev); 3844 return err; 3845 } 3846 if (mddev->pers->sync_request) { 3847 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group)) 3848 printk(KERN_WARNING 3849 "md: cannot register extra attributes for %s\n", 3850 mdname(mddev)); 3851 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action"); 3852 } else if (mddev->ro == 2) /* auto-readonly not meaningful */ 3853 mddev->ro = 0; 3854 3855 atomic_set(&mddev->writes_pending,0); 3856 mddev->safemode = 0; 3857 mddev->safemode_timer.function = md_safemode_timeout; 3858 mddev->safemode_timer.data = (unsigned long) mddev; 3859 mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */ 3860 mddev->in_sync = 1; 3861 3862 list_for_each_entry(rdev, &mddev->disks, same_set) 3863 if (rdev->raid_disk >= 0) { 3864 char nm[20]; 3865 sprintf(nm, "rd%d", rdev->raid_disk); 3866 if (sysfs_create_link(&mddev->kobj, &rdev->kobj, nm)) 3867 printk("md: cannot register %s for %s\n", 3868 nm, mdname(mddev)); 3869 } 3870 3871 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3872 3873 if (mddev->flags) 3874 md_update_sb(mddev, 0); 3875 3876 set_capacity(disk, mddev->array_sectors); 3877 3878 /* If we call blk_queue_make_request here, it will 3879 * re-initialise max_sectors etc which may have been 3880 * refined inside -> run. So just set the bits we need to set. 3881 * Most initialisation happended when we called 3882 * blk_queue_make_request(..., md_fail_request) 3883 * earlier. 3884 */ 3885 mddev->queue->queuedata = mddev; 3886 mddev->queue->make_request_fn = mddev->pers->make_request; 3887 3888 /* If there is a partially-recovered drive we need to 3889 * start recovery here. If we leave it to md_check_recovery, 3890 * it will remove the drives and not do the right thing 3891 */ 3892 if (mddev->degraded && !mddev->sync_thread) { 3893 int spares = 0; 3894 list_for_each_entry(rdev, &mddev->disks, same_set) 3895 if (rdev->raid_disk >= 0 && 3896 !test_bit(In_sync, &rdev->flags) && 3897 !test_bit(Faulty, &rdev->flags)) 3898 /* complete an interrupted recovery */ 3899 spares++; 3900 if (spares && mddev->pers->sync_request) { 3901 mddev->recovery = 0; 3902 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 3903 mddev->sync_thread = md_register_thread(md_do_sync, 3904 mddev, 3905 "%s_resync"); 3906 if (!mddev->sync_thread) { 3907 printk(KERN_ERR "%s: could not start resync" 3908 " thread...\n", 3909 mdname(mddev)); 3910 /* leave the spares where they are, it shouldn't hurt */ 3911 mddev->recovery = 0; 3912 } 3913 } 3914 } 3915 md_wakeup_thread(mddev->thread); 3916 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ 3917 3918 mddev->changed = 1; 3919 md_new_event(mddev); 3920 sysfs_notify_dirent(mddev->sysfs_state); 3921 if (mddev->sysfs_action) 3922 sysfs_notify_dirent(mddev->sysfs_action); 3923 sysfs_notify(&mddev->kobj, NULL, "degraded"); 3924 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); 3925 return 0; 3926 } 3927 3928 static int restart_array(mddev_t *mddev) 3929 { 3930 struct gendisk *disk = mddev->gendisk; 3931 3932 /* Complain if it has no devices */ 3933 if (list_empty(&mddev->disks)) 3934 return -ENXIO; 3935 if (!mddev->pers) 3936 return -EINVAL; 3937 if (!mddev->ro) 3938 return -EBUSY; 3939 mddev->safemode = 0; 3940 mddev->ro = 0; 3941 set_disk_ro(disk, 0); 3942 printk(KERN_INFO "md: %s switched to read-write mode.\n", 3943 mdname(mddev)); 3944 /* Kick recovery or resync if necessary */ 3945 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3946 md_wakeup_thread(mddev->thread); 3947 md_wakeup_thread(mddev->sync_thread); 3948 sysfs_notify_dirent(mddev->sysfs_state); 3949 return 0; 3950 } 3951 3952 /* similar to deny_write_access, but accounts for our holding a reference 3953 * to the file ourselves */ 3954 static int deny_bitmap_write_access(struct file * file) 3955 { 3956 struct inode *inode = file->f_mapping->host; 3957 3958 spin_lock(&inode->i_lock); 3959 if (atomic_read(&inode->i_writecount) > 1) { 3960 spin_unlock(&inode->i_lock); 3961 return -ETXTBSY; 3962 } 3963 atomic_set(&inode->i_writecount, -1); 3964 spin_unlock(&inode->i_lock); 3965 3966 return 0; 3967 } 3968 3969 static void restore_bitmap_write_access(struct file *file) 3970 { 3971 struct inode *inode = file->f_mapping->host; 3972 3973 spin_lock(&inode->i_lock); 3974 atomic_set(&inode->i_writecount, 1); 3975 spin_unlock(&inode->i_lock); 3976 } 3977 3978 /* mode: 3979 * 0 - completely stop and dis-assemble array 3980 * 1 - switch to readonly 3981 * 2 - stop but do not disassemble array 3982 */ 3983 static int do_md_stop(mddev_t * mddev, int mode, int is_open) 3984 { 3985 int err = 0; 3986 struct gendisk *disk = mddev->gendisk; 3987 3988 if (atomic_read(&mddev->openers) > is_open) { 3989 printk("md: %s still in use.\n",mdname(mddev)); 3990 return -EBUSY; 3991 } 3992 3993 if (mddev->pers) { 3994 3995 if (mddev->sync_thread) { 3996 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 3997 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 3998 md_unregister_thread(mddev->sync_thread); 3999 mddev->sync_thread = NULL; 4000 } 4001 4002 del_timer_sync(&mddev->safemode_timer); 4003 4004 switch(mode) { 4005 case 1: /* readonly */ 4006 err = -ENXIO; 4007 if (mddev->ro==1) 4008 goto out; 4009 mddev->ro = 1; 4010 break; 4011 case 0: /* disassemble */ 4012 case 2: /* stop */ 4013 bitmap_flush(mddev); 4014 md_super_wait(mddev); 4015 if (mddev->ro) 4016 set_disk_ro(disk, 0); 4017 blk_queue_make_request(mddev->queue, md_fail_request); 4018 mddev->pers->stop(mddev); 4019 mddev->queue->merge_bvec_fn = NULL; 4020 mddev->queue->unplug_fn = NULL; 4021 mddev->queue->backing_dev_info.congested_fn = NULL; 4022 if (mddev->pers->sync_request) { 4023 sysfs_remove_group(&mddev->kobj, &md_redundancy_group); 4024 if (mddev->sysfs_action) 4025 sysfs_put(mddev->sysfs_action); 4026 mddev->sysfs_action = NULL; 4027 } 4028 module_put(mddev->pers->owner); 4029 mddev->pers = NULL; 4030 /* tell userspace to handle 'inactive' */ 4031 sysfs_notify_dirent(mddev->sysfs_state); 4032 4033 set_capacity(disk, 0); 4034 mddev->changed = 1; 4035 4036 if (mddev->ro) 4037 mddev->ro = 0; 4038 } 4039 if (!mddev->in_sync || mddev->flags) { 4040 /* mark array as shutdown cleanly */ 4041 mddev->in_sync = 1; 4042 md_update_sb(mddev, 1); 4043 } 4044 if (mode == 1) 4045 set_disk_ro(disk, 1); 4046 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4047 } 4048 4049 /* 4050 * Free resources if final stop 4051 */ 4052 if (mode == 0) { 4053 mdk_rdev_t *rdev; 4054 4055 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev)); 4056 4057 bitmap_destroy(mddev); 4058 if (mddev->bitmap_file) { 4059 restore_bitmap_write_access(mddev->bitmap_file); 4060 fput(mddev->bitmap_file); 4061 mddev->bitmap_file = NULL; 4062 } 4063 mddev->bitmap_offset = 0; 4064 4065 list_for_each_entry(rdev, &mddev->disks, same_set) 4066 if (rdev->raid_disk >= 0) { 4067 char nm[20]; 4068 sprintf(nm, "rd%d", rdev->raid_disk); 4069 sysfs_remove_link(&mddev->kobj, nm); 4070 } 4071 4072 /* make sure all md_delayed_delete calls have finished */ 4073 flush_scheduled_work(); 4074 4075 export_array(mddev); 4076 4077 mddev->array_sectors = 0; 4078 mddev->size = 0; 4079 mddev->raid_disks = 0; 4080 mddev->recovery_cp = 0; 4081 mddev->resync_min = 0; 4082 mddev->resync_max = MaxSector; 4083 mddev->reshape_position = MaxSector; 4084 mddev->external = 0; 4085 mddev->persistent = 0; 4086 mddev->level = LEVEL_NONE; 4087 mddev->clevel[0] = 0; 4088 mddev->flags = 0; 4089 mddev->ro = 0; 4090 mddev->metadata_type[0] = 0; 4091 mddev->chunk_size = 0; 4092 mddev->ctime = mddev->utime = 0; 4093 mddev->layout = 0; 4094 mddev->max_disks = 0; 4095 mddev->events = 0; 4096 mddev->delta_disks = 0; 4097 mddev->new_level = LEVEL_NONE; 4098 mddev->new_layout = 0; 4099 mddev->new_chunk = 0; 4100 mddev->curr_resync = 0; 4101 mddev->resync_mismatches = 0; 4102 mddev->suspend_lo = mddev->suspend_hi = 0; 4103 mddev->sync_speed_min = mddev->sync_speed_max = 0; 4104 mddev->recovery = 0; 4105 mddev->in_sync = 0; 4106 mddev->changed = 0; 4107 mddev->degraded = 0; 4108 mddev->barriers_work = 0; 4109 mddev->safemode = 0; 4110 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); 4111 if (mddev->hold_active == UNTIL_STOP) 4112 mddev->hold_active = 0; 4113 4114 } else if (mddev->pers) 4115 printk(KERN_INFO "md: %s switched to read-only mode.\n", 4116 mdname(mddev)); 4117 err = 0; 4118 md_new_event(mddev); 4119 sysfs_notify_dirent(mddev->sysfs_state); 4120 out: 4121 return err; 4122 } 4123 4124 #ifndef MODULE 4125 static void autorun_array(mddev_t *mddev) 4126 { 4127 mdk_rdev_t *rdev; 4128 int err; 4129 4130 if (list_empty(&mddev->disks)) 4131 return; 4132 4133 printk(KERN_INFO "md: running: "); 4134 4135 list_for_each_entry(rdev, &mddev->disks, same_set) { 4136 char b[BDEVNAME_SIZE]; 4137 printk("<%s>", bdevname(rdev->bdev,b)); 4138 } 4139 printk("\n"); 4140 4141 err = do_md_run(mddev); 4142 if (err) { 4143 printk(KERN_WARNING "md: do_md_run() returned %d\n", err); 4144 do_md_stop(mddev, 0, 0); 4145 } 4146 } 4147 4148 /* 4149 * lets try to run arrays based on all disks that have arrived 4150 * until now. (those are in pending_raid_disks) 4151 * 4152 * the method: pick the first pending disk, collect all disks with 4153 * the same UUID, remove all from the pending list and put them into 4154 * the 'same_array' list. Then order this list based on superblock 4155 * update time (freshest comes first), kick out 'old' disks and 4156 * compare superblocks. If everything's fine then run it. 4157 * 4158 * If "unit" is allocated, then bump its reference count 4159 */ 4160 static void autorun_devices(int part) 4161 { 4162 mdk_rdev_t *rdev0, *rdev, *tmp; 4163 mddev_t *mddev; 4164 char b[BDEVNAME_SIZE]; 4165 4166 printk(KERN_INFO "md: autorun ...\n"); 4167 while (!list_empty(&pending_raid_disks)) { 4168 int unit; 4169 dev_t dev; 4170 LIST_HEAD(candidates); 4171 rdev0 = list_entry(pending_raid_disks.next, 4172 mdk_rdev_t, same_set); 4173 4174 printk(KERN_INFO "md: considering %s ...\n", 4175 bdevname(rdev0->bdev,b)); 4176 INIT_LIST_HEAD(&candidates); 4177 rdev_for_each_list(rdev, tmp, &pending_raid_disks) 4178 if (super_90_load(rdev, rdev0, 0) >= 0) { 4179 printk(KERN_INFO "md: adding %s ...\n", 4180 bdevname(rdev->bdev,b)); 4181 list_move(&rdev->same_set, &candidates); 4182 } 4183 /* 4184 * now we have a set of devices, with all of them having 4185 * mostly sane superblocks. It's time to allocate the 4186 * mddev. 4187 */ 4188 if (part) { 4189 dev = MKDEV(mdp_major, 4190 rdev0->preferred_minor << MdpMinorShift); 4191 unit = MINOR(dev) >> MdpMinorShift; 4192 } else { 4193 dev = MKDEV(MD_MAJOR, rdev0->preferred_minor); 4194 unit = MINOR(dev); 4195 } 4196 if (rdev0->preferred_minor != unit) { 4197 printk(KERN_INFO "md: unit number in %s is bad: %d\n", 4198 bdevname(rdev0->bdev, b), rdev0->preferred_minor); 4199 break; 4200 } 4201 4202 md_probe(dev, NULL, NULL); 4203 mddev = mddev_find(dev); 4204 if (!mddev || !mddev->gendisk) { 4205 if (mddev) 4206 mddev_put(mddev); 4207 printk(KERN_ERR 4208 "md: cannot allocate memory for md drive.\n"); 4209 break; 4210 } 4211 if (mddev_lock(mddev)) 4212 printk(KERN_WARNING "md: %s locked, cannot run\n", 4213 mdname(mddev)); 4214 else if (mddev->raid_disks || mddev->major_version 4215 || !list_empty(&mddev->disks)) { 4216 printk(KERN_WARNING 4217 "md: %s already running, cannot run %s\n", 4218 mdname(mddev), bdevname(rdev0->bdev,b)); 4219 mddev_unlock(mddev); 4220 } else { 4221 printk(KERN_INFO "md: created %s\n", mdname(mddev)); 4222 mddev->persistent = 1; 4223 rdev_for_each_list(rdev, tmp, &candidates) { 4224 list_del_init(&rdev->same_set); 4225 if (bind_rdev_to_array(rdev, mddev)) 4226 export_rdev(rdev); 4227 } 4228 autorun_array(mddev); 4229 mddev_unlock(mddev); 4230 } 4231 /* on success, candidates will be empty, on error 4232 * it won't... 4233 */ 4234 rdev_for_each_list(rdev, tmp, &candidates) { 4235 list_del_init(&rdev->same_set); 4236 export_rdev(rdev); 4237 } 4238 mddev_put(mddev); 4239 } 4240 printk(KERN_INFO "md: ... autorun DONE.\n"); 4241 } 4242 #endif /* !MODULE */ 4243 4244 static int get_version(void __user * arg) 4245 { 4246 mdu_version_t ver; 4247 4248 ver.major = MD_MAJOR_VERSION; 4249 ver.minor = MD_MINOR_VERSION; 4250 ver.patchlevel = MD_PATCHLEVEL_VERSION; 4251 4252 if (copy_to_user(arg, &ver, sizeof(ver))) 4253 return -EFAULT; 4254 4255 return 0; 4256 } 4257 4258 static int get_array_info(mddev_t * mddev, void __user * arg) 4259 { 4260 mdu_array_info_t info; 4261 int nr,working,active,failed,spare; 4262 mdk_rdev_t *rdev; 4263 4264 nr=working=active=failed=spare=0; 4265 list_for_each_entry(rdev, &mddev->disks, same_set) { 4266 nr++; 4267 if (test_bit(Faulty, &rdev->flags)) 4268 failed++; 4269 else { 4270 working++; 4271 if (test_bit(In_sync, &rdev->flags)) 4272 active++; 4273 else 4274 spare++; 4275 } 4276 } 4277 4278 info.major_version = mddev->major_version; 4279 info.minor_version = mddev->minor_version; 4280 info.patch_version = MD_PATCHLEVEL_VERSION; 4281 info.ctime = mddev->ctime; 4282 info.level = mddev->level; 4283 info.size = mddev->size; 4284 if (info.size != mddev->size) /* overflow */ 4285 info.size = -1; 4286 info.nr_disks = nr; 4287 info.raid_disks = mddev->raid_disks; 4288 info.md_minor = mddev->md_minor; 4289 info.not_persistent= !mddev->persistent; 4290 4291 info.utime = mddev->utime; 4292 info.state = 0; 4293 if (mddev->in_sync) 4294 info.state = (1<<MD_SB_CLEAN); 4295 if (mddev->bitmap && mddev->bitmap_offset) 4296 info.state = (1<<MD_SB_BITMAP_PRESENT); 4297 info.active_disks = active; 4298 info.working_disks = working; 4299 info.failed_disks = failed; 4300 info.spare_disks = spare; 4301 4302 info.layout = mddev->layout; 4303 info.chunk_size = mddev->chunk_size; 4304 4305 if (copy_to_user(arg, &info, sizeof(info))) 4306 return -EFAULT; 4307 4308 return 0; 4309 } 4310 4311 static int get_bitmap_file(mddev_t * mddev, void __user * arg) 4312 { 4313 mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */ 4314 char *ptr, *buf = NULL; 4315 int err = -ENOMEM; 4316 4317 if (md_allow_write(mddev)) 4318 file = kmalloc(sizeof(*file), GFP_NOIO); 4319 else 4320 file = kmalloc(sizeof(*file), GFP_KERNEL); 4321 4322 if (!file) 4323 goto out; 4324 4325 /* bitmap disabled, zero the first byte and copy out */ 4326 if (!mddev->bitmap || !mddev->bitmap->file) { 4327 file->pathname[0] = '\0'; 4328 goto copy_out; 4329 } 4330 4331 buf = kmalloc(sizeof(file->pathname), GFP_KERNEL); 4332 if (!buf) 4333 goto out; 4334 4335 ptr = d_path(&mddev->bitmap->file->f_path, buf, sizeof(file->pathname)); 4336 if (IS_ERR(ptr)) 4337 goto out; 4338 4339 strcpy(file->pathname, ptr); 4340 4341 copy_out: 4342 err = 0; 4343 if (copy_to_user(arg, file, sizeof(*file))) 4344 err = -EFAULT; 4345 out: 4346 kfree(buf); 4347 kfree(file); 4348 return err; 4349 } 4350 4351 static int get_disk_info(mddev_t * mddev, void __user * arg) 4352 { 4353 mdu_disk_info_t info; 4354 mdk_rdev_t *rdev; 4355 4356 if (copy_from_user(&info, arg, sizeof(info))) 4357 return -EFAULT; 4358 4359 rdev = find_rdev_nr(mddev, info.number); 4360 if (rdev) { 4361 info.major = MAJOR(rdev->bdev->bd_dev); 4362 info.minor = MINOR(rdev->bdev->bd_dev); 4363 info.raid_disk = rdev->raid_disk; 4364 info.state = 0; 4365 if (test_bit(Faulty, &rdev->flags)) 4366 info.state |= (1<<MD_DISK_FAULTY); 4367 else if (test_bit(In_sync, &rdev->flags)) { 4368 info.state |= (1<<MD_DISK_ACTIVE); 4369 info.state |= (1<<MD_DISK_SYNC); 4370 } 4371 if (test_bit(WriteMostly, &rdev->flags)) 4372 info.state |= (1<<MD_DISK_WRITEMOSTLY); 4373 } else { 4374 info.major = info.minor = 0; 4375 info.raid_disk = -1; 4376 info.state = (1<<MD_DISK_REMOVED); 4377 } 4378 4379 if (copy_to_user(arg, &info, sizeof(info))) 4380 return -EFAULT; 4381 4382 return 0; 4383 } 4384 4385 static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info) 4386 { 4387 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 4388 mdk_rdev_t *rdev; 4389 dev_t dev = MKDEV(info->major,info->minor); 4390 4391 if (info->major != MAJOR(dev) || info->minor != MINOR(dev)) 4392 return -EOVERFLOW; 4393 4394 if (!mddev->raid_disks) { 4395 int err; 4396 /* expecting a device which has a superblock */ 4397 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version); 4398 if (IS_ERR(rdev)) { 4399 printk(KERN_WARNING 4400 "md: md_import_device returned %ld\n", 4401 PTR_ERR(rdev)); 4402 return PTR_ERR(rdev); 4403 } 4404 if (!list_empty(&mddev->disks)) { 4405 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next, 4406 mdk_rdev_t, same_set); 4407 int err = super_types[mddev->major_version] 4408 .load_super(rdev, rdev0, mddev->minor_version); 4409 if (err < 0) { 4410 printk(KERN_WARNING 4411 "md: %s has different UUID to %s\n", 4412 bdevname(rdev->bdev,b), 4413 bdevname(rdev0->bdev,b2)); 4414 export_rdev(rdev); 4415 return -EINVAL; 4416 } 4417 } 4418 err = bind_rdev_to_array(rdev, mddev); 4419 if (err) 4420 export_rdev(rdev); 4421 return err; 4422 } 4423 4424 /* 4425 * add_new_disk can be used once the array is assembled 4426 * to add "hot spares". They must already have a superblock 4427 * written 4428 */ 4429 if (mddev->pers) { 4430 int err; 4431 if (!mddev->pers->hot_add_disk) { 4432 printk(KERN_WARNING 4433 "%s: personality does not support diskops!\n", 4434 mdname(mddev)); 4435 return -EINVAL; 4436 } 4437 if (mddev->persistent) 4438 rdev = md_import_device(dev, mddev->major_version, 4439 mddev->minor_version); 4440 else 4441 rdev = md_import_device(dev, -1, -1); 4442 if (IS_ERR(rdev)) { 4443 printk(KERN_WARNING 4444 "md: md_import_device returned %ld\n", 4445 PTR_ERR(rdev)); 4446 return PTR_ERR(rdev); 4447 } 4448 /* set save_raid_disk if appropriate */ 4449 if (!mddev->persistent) { 4450 if (info->state & (1<<MD_DISK_SYNC) && 4451 info->raid_disk < mddev->raid_disks) 4452 rdev->raid_disk = info->raid_disk; 4453 else 4454 rdev->raid_disk = -1; 4455 } else 4456 super_types[mddev->major_version]. 4457 validate_super(mddev, rdev); 4458 rdev->saved_raid_disk = rdev->raid_disk; 4459 4460 clear_bit(In_sync, &rdev->flags); /* just to be sure */ 4461 if (info->state & (1<<MD_DISK_WRITEMOSTLY)) 4462 set_bit(WriteMostly, &rdev->flags); 4463 4464 rdev->raid_disk = -1; 4465 err = bind_rdev_to_array(rdev, mddev); 4466 if (!err && !mddev->pers->hot_remove_disk) { 4467 /* If there is hot_add_disk but no hot_remove_disk 4468 * then added disks for geometry changes, 4469 * and should be added immediately. 4470 */ 4471 super_types[mddev->major_version]. 4472 validate_super(mddev, rdev); 4473 err = mddev->pers->hot_add_disk(mddev, rdev); 4474 if (err) 4475 unbind_rdev_from_array(rdev); 4476 } 4477 if (err) 4478 export_rdev(rdev); 4479 else 4480 sysfs_notify_dirent(rdev->sysfs_state); 4481 4482 md_update_sb(mddev, 1); 4483 if (mddev->degraded) 4484 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 4485 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4486 md_wakeup_thread(mddev->thread); 4487 return err; 4488 } 4489 4490 /* otherwise, add_new_disk is only allowed 4491 * for major_version==0 superblocks 4492 */ 4493 if (mddev->major_version != 0) { 4494 printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n", 4495 mdname(mddev)); 4496 return -EINVAL; 4497 } 4498 4499 if (!(info->state & (1<<MD_DISK_FAULTY))) { 4500 int err; 4501 rdev = md_import_device(dev, -1, 0); 4502 if (IS_ERR(rdev)) { 4503 printk(KERN_WARNING 4504 "md: error, md_import_device() returned %ld\n", 4505 PTR_ERR(rdev)); 4506 return PTR_ERR(rdev); 4507 } 4508 rdev->desc_nr = info->number; 4509 if (info->raid_disk < mddev->raid_disks) 4510 rdev->raid_disk = info->raid_disk; 4511 else 4512 rdev->raid_disk = -1; 4513 4514 if (rdev->raid_disk < mddev->raid_disks) 4515 if (info->state & (1<<MD_DISK_SYNC)) 4516 set_bit(In_sync, &rdev->flags); 4517 4518 if (info->state & (1<<MD_DISK_WRITEMOSTLY)) 4519 set_bit(WriteMostly, &rdev->flags); 4520 4521 if (!mddev->persistent) { 4522 printk(KERN_INFO "md: nonpersistent superblock ...\n"); 4523 rdev->sb_start = rdev->bdev->bd_inode->i_size / 512; 4524 } else 4525 rdev->sb_start = calc_dev_sboffset(rdev->bdev); 4526 rdev->size = calc_num_sectors(rdev, mddev->chunk_size) / 2; 4527 4528 err = bind_rdev_to_array(rdev, mddev); 4529 if (err) { 4530 export_rdev(rdev); 4531 return err; 4532 } 4533 } 4534 4535 return 0; 4536 } 4537 4538 static int hot_remove_disk(mddev_t * mddev, dev_t dev) 4539 { 4540 char b[BDEVNAME_SIZE]; 4541 mdk_rdev_t *rdev; 4542 4543 rdev = find_rdev(mddev, dev); 4544 if (!rdev) 4545 return -ENXIO; 4546 4547 if (rdev->raid_disk >= 0) 4548 goto busy; 4549 4550 kick_rdev_from_array(rdev); 4551 md_update_sb(mddev, 1); 4552 md_new_event(mddev); 4553 4554 return 0; 4555 busy: 4556 printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n", 4557 bdevname(rdev->bdev,b), mdname(mddev)); 4558 return -EBUSY; 4559 } 4560 4561 static int hot_add_disk(mddev_t * mddev, dev_t dev) 4562 { 4563 char b[BDEVNAME_SIZE]; 4564 int err; 4565 mdk_rdev_t *rdev; 4566 4567 if (!mddev->pers) 4568 return -ENODEV; 4569 4570 if (mddev->major_version != 0) { 4571 printk(KERN_WARNING "%s: HOT_ADD may only be used with" 4572 " version-0 superblocks.\n", 4573 mdname(mddev)); 4574 return -EINVAL; 4575 } 4576 if (!mddev->pers->hot_add_disk) { 4577 printk(KERN_WARNING 4578 "%s: personality does not support diskops!\n", 4579 mdname(mddev)); 4580 return -EINVAL; 4581 } 4582 4583 rdev = md_import_device(dev, -1, 0); 4584 if (IS_ERR(rdev)) { 4585 printk(KERN_WARNING 4586 "md: error, md_import_device() returned %ld\n", 4587 PTR_ERR(rdev)); 4588 return -EINVAL; 4589 } 4590 4591 if (mddev->persistent) 4592 rdev->sb_start = calc_dev_sboffset(rdev->bdev); 4593 else 4594 rdev->sb_start = rdev->bdev->bd_inode->i_size / 512; 4595 4596 rdev->size = calc_num_sectors(rdev, mddev->chunk_size) / 2; 4597 4598 if (test_bit(Faulty, &rdev->flags)) { 4599 printk(KERN_WARNING 4600 "md: can not hot-add faulty %s disk to %s!\n", 4601 bdevname(rdev->bdev,b), mdname(mddev)); 4602 err = -EINVAL; 4603 goto abort_export; 4604 } 4605 clear_bit(In_sync, &rdev->flags); 4606 rdev->desc_nr = -1; 4607 rdev->saved_raid_disk = -1; 4608 err = bind_rdev_to_array(rdev, mddev); 4609 if (err) 4610 goto abort_export; 4611 4612 /* 4613 * The rest should better be atomic, we can have disk failures 4614 * noticed in interrupt contexts ... 4615 */ 4616 4617 if (rdev->desc_nr == mddev->max_disks) { 4618 printk(KERN_WARNING "%s: can not hot-add to full array!\n", 4619 mdname(mddev)); 4620 err = -EBUSY; 4621 goto abort_unbind_export; 4622 } 4623 4624 rdev->raid_disk = -1; 4625 4626 md_update_sb(mddev, 1); 4627 4628 /* 4629 * Kick recovery, maybe this spare has to be added to the 4630 * array immediately. 4631 */ 4632 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4633 md_wakeup_thread(mddev->thread); 4634 md_new_event(mddev); 4635 return 0; 4636 4637 abort_unbind_export: 4638 unbind_rdev_from_array(rdev); 4639 4640 abort_export: 4641 export_rdev(rdev); 4642 return err; 4643 } 4644 4645 static int set_bitmap_file(mddev_t *mddev, int fd) 4646 { 4647 int err; 4648 4649 if (mddev->pers) { 4650 if (!mddev->pers->quiesce) 4651 return -EBUSY; 4652 if (mddev->recovery || mddev->sync_thread) 4653 return -EBUSY; 4654 /* we should be able to change the bitmap.. */ 4655 } 4656 4657 4658 if (fd >= 0) { 4659 if (mddev->bitmap) 4660 return -EEXIST; /* cannot add when bitmap is present */ 4661 mddev->bitmap_file = fget(fd); 4662 4663 if (mddev->bitmap_file == NULL) { 4664 printk(KERN_ERR "%s: error: failed to get bitmap file\n", 4665 mdname(mddev)); 4666 return -EBADF; 4667 } 4668 4669 err = deny_bitmap_write_access(mddev->bitmap_file); 4670 if (err) { 4671 printk(KERN_ERR "%s: error: bitmap file is already in use\n", 4672 mdname(mddev)); 4673 fput(mddev->bitmap_file); 4674 mddev->bitmap_file = NULL; 4675 return err; 4676 } 4677 mddev->bitmap_offset = 0; /* file overrides offset */ 4678 } else if (mddev->bitmap == NULL) 4679 return -ENOENT; /* cannot remove what isn't there */ 4680 err = 0; 4681 if (mddev->pers) { 4682 mddev->pers->quiesce(mddev, 1); 4683 if (fd >= 0) 4684 err = bitmap_create(mddev); 4685 if (fd < 0 || err) { 4686 bitmap_destroy(mddev); 4687 fd = -1; /* make sure to put the file */ 4688 } 4689 mddev->pers->quiesce(mddev, 0); 4690 } 4691 if (fd < 0) { 4692 if (mddev->bitmap_file) { 4693 restore_bitmap_write_access(mddev->bitmap_file); 4694 fput(mddev->bitmap_file); 4695 } 4696 mddev->bitmap_file = NULL; 4697 } 4698 4699 return err; 4700 } 4701 4702 /* 4703 * set_array_info is used two different ways 4704 * The original usage is when creating a new array. 4705 * In this usage, raid_disks is > 0 and it together with 4706 * level, size, not_persistent,layout,chunksize determine the 4707 * shape of the array. 4708 * This will always create an array with a type-0.90.0 superblock. 4709 * The newer usage is when assembling an array. 4710 * In this case raid_disks will be 0, and the major_version field is 4711 * use to determine which style super-blocks are to be found on the devices. 4712 * The minor and patch _version numbers are also kept incase the 4713 * super_block handler wishes to interpret them. 4714 */ 4715 static int set_array_info(mddev_t * mddev, mdu_array_info_t *info) 4716 { 4717 4718 if (info->raid_disks == 0) { 4719 /* just setting version number for superblock loading */ 4720 if (info->major_version < 0 || 4721 info->major_version >= ARRAY_SIZE(super_types) || 4722 super_types[info->major_version].name == NULL) { 4723 /* maybe try to auto-load a module? */ 4724 printk(KERN_INFO 4725 "md: superblock version %d not known\n", 4726 info->major_version); 4727 return -EINVAL; 4728 } 4729 mddev->major_version = info->major_version; 4730 mddev->minor_version = info->minor_version; 4731 mddev->patch_version = info->patch_version; 4732 mddev->persistent = !info->not_persistent; 4733 return 0; 4734 } 4735 mddev->major_version = MD_MAJOR_VERSION; 4736 mddev->minor_version = MD_MINOR_VERSION; 4737 mddev->patch_version = MD_PATCHLEVEL_VERSION; 4738 mddev->ctime = get_seconds(); 4739 4740 mddev->level = info->level; 4741 mddev->clevel[0] = 0; 4742 mddev->size = info->size; 4743 mddev->raid_disks = info->raid_disks; 4744 /* don't set md_minor, it is determined by which /dev/md* was 4745 * openned 4746 */ 4747 if (info->state & (1<<MD_SB_CLEAN)) 4748 mddev->recovery_cp = MaxSector; 4749 else 4750 mddev->recovery_cp = 0; 4751 mddev->persistent = ! info->not_persistent; 4752 mddev->external = 0; 4753 4754 mddev->layout = info->layout; 4755 mddev->chunk_size = info->chunk_size; 4756 4757 mddev->max_disks = MD_SB_DISKS; 4758 4759 if (mddev->persistent) 4760 mddev->flags = 0; 4761 set_bit(MD_CHANGE_DEVS, &mddev->flags); 4762 4763 mddev->default_bitmap_offset = MD_SB_BYTES >> 9; 4764 mddev->bitmap_offset = 0; 4765 4766 mddev->reshape_position = MaxSector; 4767 4768 /* 4769 * Generate a 128 bit UUID 4770 */ 4771 get_random_bytes(mddev->uuid, 16); 4772 4773 mddev->new_level = mddev->level; 4774 mddev->new_chunk = mddev->chunk_size; 4775 mddev->new_layout = mddev->layout; 4776 mddev->delta_disks = 0; 4777 4778 return 0; 4779 } 4780 4781 static int update_size(mddev_t *mddev, sector_t num_sectors) 4782 { 4783 mdk_rdev_t *rdev; 4784 int rv; 4785 int fit = (num_sectors == 0); 4786 4787 if (mddev->pers->resize == NULL) 4788 return -EINVAL; 4789 /* The "num_sectors" is the number of sectors of each device that 4790 * is used. This can only make sense for arrays with redundancy. 4791 * linear and raid0 always use whatever space is available. We can only 4792 * consider changing this number if no resync or reconstruction is 4793 * happening, and if the new size is acceptable. It must fit before the 4794 * sb_start or, if that is <data_offset, it must fit before the size 4795 * of each device. If num_sectors is zero, we find the largest size 4796 * that fits. 4797 4798 */ 4799 if (mddev->sync_thread) 4800 return -EBUSY; 4801 if (mddev->bitmap) 4802 /* Sorry, cannot grow a bitmap yet, just remove it, 4803 * grow, and re-add. 4804 */ 4805 return -EBUSY; 4806 list_for_each_entry(rdev, &mddev->disks, same_set) { 4807 sector_t avail; 4808 avail = rdev->size * 2; 4809 4810 if (fit && (num_sectors == 0 || num_sectors > avail)) 4811 num_sectors = avail; 4812 if (avail < num_sectors) 4813 return -ENOSPC; 4814 } 4815 rv = mddev->pers->resize(mddev, num_sectors); 4816 if (!rv) { 4817 struct block_device *bdev; 4818 4819 bdev = bdget_disk(mddev->gendisk, 0); 4820 if (bdev) { 4821 mutex_lock(&bdev->bd_inode->i_mutex); 4822 i_size_write(bdev->bd_inode, 4823 (loff_t)mddev->array_sectors << 9); 4824 mutex_unlock(&bdev->bd_inode->i_mutex); 4825 bdput(bdev); 4826 } 4827 } 4828 return rv; 4829 } 4830 4831 static int update_raid_disks(mddev_t *mddev, int raid_disks) 4832 { 4833 int rv; 4834 /* change the number of raid disks */ 4835 if (mddev->pers->check_reshape == NULL) 4836 return -EINVAL; 4837 if (raid_disks <= 0 || 4838 raid_disks >= mddev->max_disks) 4839 return -EINVAL; 4840 if (mddev->sync_thread || mddev->reshape_position != MaxSector) 4841 return -EBUSY; 4842 mddev->delta_disks = raid_disks - mddev->raid_disks; 4843 4844 rv = mddev->pers->check_reshape(mddev); 4845 return rv; 4846 } 4847 4848 4849 /* 4850 * update_array_info is used to change the configuration of an 4851 * on-line array. 4852 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size 4853 * fields in the info are checked against the array. 4854 * Any differences that cannot be handled will cause an error. 4855 * Normally, only one change can be managed at a time. 4856 */ 4857 static int update_array_info(mddev_t *mddev, mdu_array_info_t *info) 4858 { 4859 int rv = 0; 4860 int cnt = 0; 4861 int state = 0; 4862 4863 /* calculate expected state,ignoring low bits */ 4864 if (mddev->bitmap && mddev->bitmap_offset) 4865 state |= (1 << MD_SB_BITMAP_PRESENT); 4866 4867 if (mddev->major_version != info->major_version || 4868 mddev->minor_version != info->minor_version || 4869 /* mddev->patch_version != info->patch_version || */ 4870 mddev->ctime != info->ctime || 4871 mddev->level != info->level || 4872 /* mddev->layout != info->layout || */ 4873 !mddev->persistent != info->not_persistent|| 4874 mddev->chunk_size != info->chunk_size || 4875 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */ 4876 ((state^info->state) & 0xfffffe00) 4877 ) 4878 return -EINVAL; 4879 /* Check there is only one change */ 4880 if (info->size >= 0 && mddev->size != info->size) cnt++; 4881 if (mddev->raid_disks != info->raid_disks) cnt++; 4882 if (mddev->layout != info->layout) cnt++; 4883 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) cnt++; 4884 if (cnt == 0) return 0; 4885 if (cnt > 1) return -EINVAL; 4886 4887 if (mddev->layout != info->layout) { 4888 /* Change layout 4889 * we don't need to do anything at the md level, the 4890 * personality will take care of it all. 4891 */ 4892 if (mddev->pers->reconfig == NULL) 4893 return -EINVAL; 4894 else 4895 return mddev->pers->reconfig(mddev, info->layout, -1); 4896 } 4897 if (info->size >= 0 && mddev->size != info->size) 4898 rv = update_size(mddev, (sector_t)info->size * 2); 4899 4900 if (mddev->raid_disks != info->raid_disks) 4901 rv = update_raid_disks(mddev, info->raid_disks); 4902 4903 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) { 4904 if (mddev->pers->quiesce == NULL) 4905 return -EINVAL; 4906 if (mddev->recovery || mddev->sync_thread) 4907 return -EBUSY; 4908 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) { 4909 /* add the bitmap */ 4910 if (mddev->bitmap) 4911 return -EEXIST; 4912 if (mddev->default_bitmap_offset == 0) 4913 return -EINVAL; 4914 mddev->bitmap_offset = mddev->default_bitmap_offset; 4915 mddev->pers->quiesce(mddev, 1); 4916 rv = bitmap_create(mddev); 4917 if (rv) 4918 bitmap_destroy(mddev); 4919 mddev->pers->quiesce(mddev, 0); 4920 } else { 4921 /* remove the bitmap */ 4922 if (!mddev->bitmap) 4923 return -ENOENT; 4924 if (mddev->bitmap->file) 4925 return -EINVAL; 4926 mddev->pers->quiesce(mddev, 1); 4927 bitmap_destroy(mddev); 4928 mddev->pers->quiesce(mddev, 0); 4929 mddev->bitmap_offset = 0; 4930 } 4931 } 4932 md_update_sb(mddev, 1); 4933 return rv; 4934 } 4935 4936 static int set_disk_faulty(mddev_t *mddev, dev_t dev) 4937 { 4938 mdk_rdev_t *rdev; 4939 4940 if (mddev->pers == NULL) 4941 return -ENODEV; 4942 4943 rdev = find_rdev(mddev, dev); 4944 if (!rdev) 4945 return -ENODEV; 4946 4947 md_error(mddev, rdev); 4948 return 0; 4949 } 4950 4951 /* 4952 * We have a problem here : there is no easy way to give a CHS 4953 * virtual geometry. We currently pretend that we have a 2 heads 4954 * 4 sectors (with a BIG number of cylinders...). This drives 4955 * dosfs just mad... ;-) 4956 */ 4957 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo) 4958 { 4959 mddev_t *mddev = bdev->bd_disk->private_data; 4960 4961 geo->heads = 2; 4962 geo->sectors = 4; 4963 geo->cylinders = get_capacity(mddev->gendisk) / 8; 4964 return 0; 4965 } 4966 4967 static int md_ioctl(struct block_device *bdev, fmode_t mode, 4968 unsigned int cmd, unsigned long arg) 4969 { 4970 int err = 0; 4971 void __user *argp = (void __user *)arg; 4972 mddev_t *mddev = NULL; 4973 4974 if (!capable(CAP_SYS_ADMIN)) 4975 return -EACCES; 4976 4977 /* 4978 * Commands dealing with the RAID driver but not any 4979 * particular array: 4980 */ 4981 switch (cmd) 4982 { 4983 case RAID_VERSION: 4984 err = get_version(argp); 4985 goto done; 4986 4987 case PRINT_RAID_DEBUG: 4988 err = 0; 4989 md_print_devices(); 4990 goto done; 4991 4992 #ifndef MODULE 4993 case RAID_AUTORUN: 4994 err = 0; 4995 autostart_arrays(arg); 4996 goto done; 4997 #endif 4998 default:; 4999 } 5000 5001 /* 5002 * Commands creating/starting a new array: 5003 */ 5004 5005 mddev = bdev->bd_disk->private_data; 5006 5007 if (!mddev) { 5008 BUG(); 5009 goto abort; 5010 } 5011 5012 err = mddev_lock(mddev); 5013 if (err) { 5014 printk(KERN_INFO 5015 "md: ioctl lock interrupted, reason %d, cmd %d\n", 5016 err, cmd); 5017 goto abort; 5018 } 5019 5020 switch (cmd) 5021 { 5022 case SET_ARRAY_INFO: 5023 { 5024 mdu_array_info_t info; 5025 if (!arg) 5026 memset(&info, 0, sizeof(info)); 5027 else if (copy_from_user(&info, argp, sizeof(info))) { 5028 err = -EFAULT; 5029 goto abort_unlock; 5030 } 5031 if (mddev->pers) { 5032 err = update_array_info(mddev, &info); 5033 if (err) { 5034 printk(KERN_WARNING "md: couldn't update" 5035 " array info. %d\n", err); 5036 goto abort_unlock; 5037 } 5038 goto done_unlock; 5039 } 5040 if (!list_empty(&mddev->disks)) { 5041 printk(KERN_WARNING 5042 "md: array %s already has disks!\n", 5043 mdname(mddev)); 5044 err = -EBUSY; 5045 goto abort_unlock; 5046 } 5047 if (mddev->raid_disks) { 5048 printk(KERN_WARNING 5049 "md: array %s already initialised!\n", 5050 mdname(mddev)); 5051 err = -EBUSY; 5052 goto abort_unlock; 5053 } 5054 err = set_array_info(mddev, &info); 5055 if (err) { 5056 printk(KERN_WARNING "md: couldn't set" 5057 " array info. %d\n", err); 5058 goto abort_unlock; 5059 } 5060 } 5061 goto done_unlock; 5062 5063 default:; 5064 } 5065 5066 /* 5067 * Commands querying/configuring an existing array: 5068 */ 5069 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY, 5070 * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */ 5071 if ((!mddev->raid_disks && !mddev->external) 5072 && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY 5073 && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE 5074 && cmd != GET_BITMAP_FILE) { 5075 err = -ENODEV; 5076 goto abort_unlock; 5077 } 5078 5079 /* 5080 * Commands even a read-only array can execute: 5081 */ 5082 switch (cmd) 5083 { 5084 case GET_ARRAY_INFO: 5085 err = get_array_info(mddev, argp); 5086 goto done_unlock; 5087 5088 case GET_BITMAP_FILE: 5089 err = get_bitmap_file(mddev, argp); 5090 goto done_unlock; 5091 5092 case GET_DISK_INFO: 5093 err = get_disk_info(mddev, argp); 5094 goto done_unlock; 5095 5096 case RESTART_ARRAY_RW: 5097 err = restart_array(mddev); 5098 goto done_unlock; 5099 5100 case STOP_ARRAY: 5101 err = do_md_stop(mddev, 0, 1); 5102 goto done_unlock; 5103 5104 case STOP_ARRAY_RO: 5105 err = do_md_stop(mddev, 1, 1); 5106 goto done_unlock; 5107 5108 } 5109 5110 /* 5111 * The remaining ioctls are changing the state of the 5112 * superblock, so we do not allow them on read-only arrays. 5113 * However non-MD ioctls (e.g. get-size) will still come through 5114 * here and hit the 'default' below, so only disallow 5115 * 'md' ioctls, and switch to rw mode if started auto-readonly. 5116 */ 5117 if (_IOC_TYPE(cmd) == MD_MAJOR && mddev->ro && mddev->pers) { 5118 if (mddev->ro == 2) { 5119 mddev->ro = 0; 5120 sysfs_notify_dirent(mddev->sysfs_state); 5121 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5122 md_wakeup_thread(mddev->thread); 5123 } else { 5124 err = -EROFS; 5125 goto abort_unlock; 5126 } 5127 } 5128 5129 switch (cmd) 5130 { 5131 case ADD_NEW_DISK: 5132 { 5133 mdu_disk_info_t info; 5134 if (copy_from_user(&info, argp, sizeof(info))) 5135 err = -EFAULT; 5136 else 5137 err = add_new_disk(mddev, &info); 5138 goto done_unlock; 5139 } 5140 5141 case HOT_REMOVE_DISK: 5142 err = hot_remove_disk(mddev, new_decode_dev(arg)); 5143 goto done_unlock; 5144 5145 case HOT_ADD_DISK: 5146 err = hot_add_disk(mddev, new_decode_dev(arg)); 5147 goto done_unlock; 5148 5149 case SET_DISK_FAULTY: 5150 err = set_disk_faulty(mddev, new_decode_dev(arg)); 5151 goto done_unlock; 5152 5153 case RUN_ARRAY: 5154 err = do_md_run(mddev); 5155 goto done_unlock; 5156 5157 case SET_BITMAP_FILE: 5158 err = set_bitmap_file(mddev, (int)arg); 5159 goto done_unlock; 5160 5161 default: 5162 err = -EINVAL; 5163 goto abort_unlock; 5164 } 5165 5166 done_unlock: 5167 abort_unlock: 5168 if (mddev->hold_active == UNTIL_IOCTL && 5169 err != -EINVAL) 5170 mddev->hold_active = 0; 5171 mddev_unlock(mddev); 5172 5173 return err; 5174 done: 5175 if (err) 5176 MD_BUG(); 5177 abort: 5178 return err; 5179 } 5180 5181 static int md_open(struct block_device *bdev, fmode_t mode) 5182 { 5183 /* 5184 * Succeed if we can lock the mddev, which confirms that 5185 * it isn't being stopped right now. 5186 */ 5187 mddev_t *mddev = mddev_find(bdev->bd_dev); 5188 int err; 5189 5190 if (mddev->gendisk != bdev->bd_disk) { 5191 /* we are racing with mddev_put which is discarding this 5192 * bd_disk. 5193 */ 5194 mddev_put(mddev); 5195 /* Wait until bdev->bd_disk is definitely gone */ 5196 flush_scheduled_work(); 5197 /* Then retry the open from the top */ 5198 return -ERESTARTSYS; 5199 } 5200 BUG_ON(mddev != bdev->bd_disk->private_data); 5201 5202 if ((err = mutex_lock_interruptible_nested(&mddev->reconfig_mutex, 1))) 5203 goto out; 5204 5205 err = 0; 5206 atomic_inc(&mddev->openers); 5207 mddev_unlock(mddev); 5208 5209 check_disk_change(bdev); 5210 out: 5211 return err; 5212 } 5213 5214 static int md_release(struct gendisk *disk, fmode_t mode) 5215 { 5216 mddev_t *mddev = disk->private_data; 5217 5218 BUG_ON(!mddev); 5219 atomic_dec(&mddev->openers); 5220 mddev_put(mddev); 5221 5222 return 0; 5223 } 5224 5225 static int md_media_changed(struct gendisk *disk) 5226 { 5227 mddev_t *mddev = disk->private_data; 5228 5229 return mddev->changed; 5230 } 5231 5232 static int md_revalidate(struct gendisk *disk) 5233 { 5234 mddev_t *mddev = disk->private_data; 5235 5236 mddev->changed = 0; 5237 return 0; 5238 } 5239 static struct block_device_operations md_fops = 5240 { 5241 .owner = THIS_MODULE, 5242 .open = md_open, 5243 .release = md_release, 5244 .locked_ioctl = md_ioctl, 5245 .getgeo = md_getgeo, 5246 .media_changed = md_media_changed, 5247 .revalidate_disk= md_revalidate, 5248 }; 5249 5250 static int md_thread(void * arg) 5251 { 5252 mdk_thread_t *thread = arg; 5253 5254 /* 5255 * md_thread is a 'system-thread', it's priority should be very 5256 * high. We avoid resource deadlocks individually in each 5257 * raid personality. (RAID5 does preallocation) We also use RR and 5258 * the very same RT priority as kswapd, thus we will never get 5259 * into a priority inversion deadlock. 5260 * 5261 * we definitely have to have equal or higher priority than 5262 * bdflush, otherwise bdflush will deadlock if there are too 5263 * many dirty RAID5 blocks. 5264 */ 5265 5266 allow_signal(SIGKILL); 5267 while (!kthread_should_stop()) { 5268 5269 /* We need to wait INTERRUPTIBLE so that 5270 * we don't add to the load-average. 5271 * That means we need to be sure no signals are 5272 * pending 5273 */ 5274 if (signal_pending(current)) 5275 flush_signals(current); 5276 5277 wait_event_interruptible_timeout 5278 (thread->wqueue, 5279 test_bit(THREAD_WAKEUP, &thread->flags) 5280 || kthread_should_stop(), 5281 thread->timeout); 5282 5283 clear_bit(THREAD_WAKEUP, &thread->flags); 5284 5285 thread->run(thread->mddev); 5286 } 5287 5288 return 0; 5289 } 5290 5291 void md_wakeup_thread(mdk_thread_t *thread) 5292 { 5293 if (thread) { 5294 dprintk("md: waking up MD thread %s.\n", thread->tsk->comm); 5295 set_bit(THREAD_WAKEUP, &thread->flags); 5296 wake_up(&thread->wqueue); 5297 } 5298 } 5299 5300 mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev, 5301 const char *name) 5302 { 5303 mdk_thread_t *thread; 5304 5305 thread = kzalloc(sizeof(mdk_thread_t), GFP_KERNEL); 5306 if (!thread) 5307 return NULL; 5308 5309 init_waitqueue_head(&thread->wqueue); 5310 5311 thread->run = run; 5312 thread->mddev = mddev; 5313 thread->timeout = MAX_SCHEDULE_TIMEOUT; 5314 thread->tsk = kthread_run(md_thread, thread, name, mdname(thread->mddev)); 5315 if (IS_ERR(thread->tsk)) { 5316 kfree(thread); 5317 return NULL; 5318 } 5319 return thread; 5320 } 5321 5322 void md_unregister_thread(mdk_thread_t *thread) 5323 { 5324 dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk)); 5325 5326 kthread_stop(thread->tsk); 5327 kfree(thread); 5328 } 5329 5330 void md_error(mddev_t *mddev, mdk_rdev_t *rdev) 5331 { 5332 if (!mddev) { 5333 MD_BUG(); 5334 return; 5335 } 5336 5337 if (!rdev || test_bit(Faulty, &rdev->flags)) 5338 return; 5339 5340 if (mddev->external) 5341 set_bit(Blocked, &rdev->flags); 5342 /* 5343 dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n", 5344 mdname(mddev), 5345 MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev), 5346 __builtin_return_address(0),__builtin_return_address(1), 5347 __builtin_return_address(2),__builtin_return_address(3)); 5348 */ 5349 if (!mddev->pers) 5350 return; 5351 if (!mddev->pers->error_handler) 5352 return; 5353 mddev->pers->error_handler(mddev,rdev); 5354 if (mddev->degraded) 5355 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 5356 set_bit(StateChanged, &rdev->flags); 5357 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 5358 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5359 md_wakeup_thread(mddev->thread); 5360 md_new_event_inintr(mddev); 5361 } 5362 5363 /* seq_file implementation /proc/mdstat */ 5364 5365 static void status_unused(struct seq_file *seq) 5366 { 5367 int i = 0; 5368 mdk_rdev_t *rdev; 5369 5370 seq_printf(seq, "unused devices: "); 5371 5372 list_for_each_entry(rdev, &pending_raid_disks, same_set) { 5373 char b[BDEVNAME_SIZE]; 5374 i++; 5375 seq_printf(seq, "%s ", 5376 bdevname(rdev->bdev,b)); 5377 } 5378 if (!i) 5379 seq_printf(seq, "<none>"); 5380 5381 seq_printf(seq, "\n"); 5382 } 5383 5384 5385 static void status_resync(struct seq_file *seq, mddev_t * mddev) 5386 { 5387 sector_t max_blocks, resync, res; 5388 unsigned long dt, db, rt; 5389 int scale; 5390 unsigned int per_milli; 5391 5392 resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active))/2; 5393 5394 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 5395 max_blocks = mddev->resync_max_sectors >> 1; 5396 else 5397 max_blocks = mddev->size; 5398 5399 /* 5400 * Should not happen. 5401 */ 5402 if (!max_blocks) { 5403 MD_BUG(); 5404 return; 5405 } 5406 /* Pick 'scale' such that (resync>>scale)*1000 will fit 5407 * in a sector_t, and (max_blocks>>scale) will fit in a 5408 * u32, as those are the requirements for sector_div. 5409 * Thus 'scale' must be at least 10 5410 */ 5411 scale = 10; 5412 if (sizeof(sector_t) > sizeof(unsigned long)) { 5413 while ( max_blocks/2 > (1ULL<<(scale+32))) 5414 scale++; 5415 } 5416 res = (resync>>scale)*1000; 5417 sector_div(res, (u32)((max_blocks>>scale)+1)); 5418 5419 per_milli = res; 5420 { 5421 int i, x = per_milli/50, y = 20-x; 5422 seq_printf(seq, "["); 5423 for (i = 0; i < x; i++) 5424 seq_printf(seq, "="); 5425 seq_printf(seq, ">"); 5426 for (i = 0; i < y; i++) 5427 seq_printf(seq, "."); 5428 seq_printf(seq, "] "); 5429 } 5430 seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)", 5431 (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)? 5432 "reshape" : 5433 (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)? 5434 "check" : 5435 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ? 5436 "resync" : "recovery"))), 5437 per_milli/10, per_milli % 10, 5438 (unsigned long long) resync, 5439 (unsigned long long) max_blocks); 5440 5441 /* 5442 * We do not want to overflow, so the order of operands and 5443 * the * 100 / 100 trick are important. We do a +1 to be 5444 * safe against division by zero. We only estimate anyway. 5445 * 5446 * dt: time from mark until now 5447 * db: blocks written from mark until now 5448 * rt: remaining time 5449 */ 5450 dt = ((jiffies - mddev->resync_mark) / HZ); 5451 if (!dt) dt++; 5452 db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active)) 5453 - mddev->resync_mark_cnt; 5454 rt = (dt * ((unsigned long)(max_blocks-resync) / (db/2/100+1)))/100; 5455 5456 seq_printf(seq, " finish=%lu.%lumin", rt / 60, (rt % 60)/6); 5457 5458 seq_printf(seq, " speed=%ldK/sec", db/2/dt); 5459 } 5460 5461 static void *md_seq_start(struct seq_file *seq, loff_t *pos) 5462 { 5463 struct list_head *tmp; 5464 loff_t l = *pos; 5465 mddev_t *mddev; 5466 5467 if (l >= 0x10000) 5468 return NULL; 5469 if (!l--) 5470 /* header */ 5471 return (void*)1; 5472 5473 spin_lock(&all_mddevs_lock); 5474 list_for_each(tmp,&all_mddevs) 5475 if (!l--) { 5476 mddev = list_entry(tmp, mddev_t, all_mddevs); 5477 mddev_get(mddev); 5478 spin_unlock(&all_mddevs_lock); 5479 return mddev; 5480 } 5481 spin_unlock(&all_mddevs_lock); 5482 if (!l--) 5483 return (void*)2;/* tail */ 5484 return NULL; 5485 } 5486 5487 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos) 5488 { 5489 struct list_head *tmp; 5490 mddev_t *next_mddev, *mddev = v; 5491 5492 ++*pos; 5493 if (v == (void*)2) 5494 return NULL; 5495 5496 spin_lock(&all_mddevs_lock); 5497 if (v == (void*)1) 5498 tmp = all_mddevs.next; 5499 else 5500 tmp = mddev->all_mddevs.next; 5501 if (tmp != &all_mddevs) 5502 next_mddev = mddev_get(list_entry(tmp,mddev_t,all_mddevs)); 5503 else { 5504 next_mddev = (void*)2; 5505 *pos = 0x10000; 5506 } 5507 spin_unlock(&all_mddevs_lock); 5508 5509 if (v != (void*)1) 5510 mddev_put(mddev); 5511 return next_mddev; 5512 5513 } 5514 5515 static void md_seq_stop(struct seq_file *seq, void *v) 5516 { 5517 mddev_t *mddev = v; 5518 5519 if (mddev && v != (void*)1 && v != (void*)2) 5520 mddev_put(mddev); 5521 } 5522 5523 struct mdstat_info { 5524 int event; 5525 }; 5526 5527 static int md_seq_show(struct seq_file *seq, void *v) 5528 { 5529 mddev_t *mddev = v; 5530 sector_t size; 5531 mdk_rdev_t *rdev; 5532 struct mdstat_info *mi = seq->private; 5533 struct bitmap *bitmap; 5534 5535 if (v == (void*)1) { 5536 struct mdk_personality *pers; 5537 seq_printf(seq, "Personalities : "); 5538 spin_lock(&pers_lock); 5539 list_for_each_entry(pers, &pers_list, list) 5540 seq_printf(seq, "[%s] ", pers->name); 5541 5542 spin_unlock(&pers_lock); 5543 seq_printf(seq, "\n"); 5544 mi->event = atomic_read(&md_event_count); 5545 return 0; 5546 } 5547 if (v == (void*)2) { 5548 status_unused(seq); 5549 return 0; 5550 } 5551 5552 if (mddev_lock(mddev) < 0) 5553 return -EINTR; 5554 5555 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) { 5556 seq_printf(seq, "%s : %sactive", mdname(mddev), 5557 mddev->pers ? "" : "in"); 5558 if (mddev->pers) { 5559 if (mddev->ro==1) 5560 seq_printf(seq, " (read-only)"); 5561 if (mddev->ro==2) 5562 seq_printf(seq, " (auto-read-only)"); 5563 seq_printf(seq, " %s", mddev->pers->name); 5564 } 5565 5566 size = 0; 5567 list_for_each_entry(rdev, &mddev->disks, same_set) { 5568 char b[BDEVNAME_SIZE]; 5569 seq_printf(seq, " %s[%d]", 5570 bdevname(rdev->bdev,b), rdev->desc_nr); 5571 if (test_bit(WriteMostly, &rdev->flags)) 5572 seq_printf(seq, "(W)"); 5573 if (test_bit(Faulty, &rdev->flags)) { 5574 seq_printf(seq, "(F)"); 5575 continue; 5576 } else if (rdev->raid_disk < 0) 5577 seq_printf(seq, "(S)"); /* spare */ 5578 size += rdev->size; 5579 } 5580 5581 if (!list_empty(&mddev->disks)) { 5582 if (mddev->pers) 5583 seq_printf(seq, "\n %llu blocks", 5584 (unsigned long long) 5585 mddev->array_sectors / 2); 5586 else 5587 seq_printf(seq, "\n %llu blocks", 5588 (unsigned long long)size); 5589 } 5590 if (mddev->persistent) { 5591 if (mddev->major_version != 0 || 5592 mddev->minor_version != 90) { 5593 seq_printf(seq," super %d.%d", 5594 mddev->major_version, 5595 mddev->minor_version); 5596 } 5597 } else if (mddev->external) 5598 seq_printf(seq, " super external:%s", 5599 mddev->metadata_type); 5600 else 5601 seq_printf(seq, " super non-persistent"); 5602 5603 if (mddev->pers) { 5604 mddev->pers->status(seq, mddev); 5605 seq_printf(seq, "\n "); 5606 if (mddev->pers->sync_request) { 5607 if (mddev->curr_resync > 2) { 5608 status_resync(seq, mddev); 5609 seq_printf(seq, "\n "); 5610 } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2) 5611 seq_printf(seq, "\tresync=DELAYED\n "); 5612 else if (mddev->recovery_cp < MaxSector) 5613 seq_printf(seq, "\tresync=PENDING\n "); 5614 } 5615 } else 5616 seq_printf(seq, "\n "); 5617 5618 if ((bitmap = mddev->bitmap)) { 5619 unsigned long chunk_kb; 5620 unsigned long flags; 5621 spin_lock_irqsave(&bitmap->lock, flags); 5622 chunk_kb = bitmap->chunksize >> 10; 5623 seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], " 5624 "%lu%s chunk", 5625 bitmap->pages - bitmap->missing_pages, 5626 bitmap->pages, 5627 (bitmap->pages - bitmap->missing_pages) 5628 << (PAGE_SHIFT - 10), 5629 chunk_kb ? chunk_kb : bitmap->chunksize, 5630 chunk_kb ? "KB" : "B"); 5631 if (bitmap->file) { 5632 seq_printf(seq, ", file: "); 5633 seq_path(seq, &bitmap->file->f_path, " \t\n"); 5634 } 5635 5636 seq_printf(seq, "\n"); 5637 spin_unlock_irqrestore(&bitmap->lock, flags); 5638 } 5639 5640 seq_printf(seq, "\n"); 5641 } 5642 mddev_unlock(mddev); 5643 5644 return 0; 5645 } 5646 5647 static struct seq_operations md_seq_ops = { 5648 .start = md_seq_start, 5649 .next = md_seq_next, 5650 .stop = md_seq_stop, 5651 .show = md_seq_show, 5652 }; 5653 5654 static int md_seq_open(struct inode *inode, struct file *file) 5655 { 5656 int error; 5657 struct mdstat_info *mi = kmalloc(sizeof(*mi), GFP_KERNEL); 5658 if (mi == NULL) 5659 return -ENOMEM; 5660 5661 error = seq_open(file, &md_seq_ops); 5662 if (error) 5663 kfree(mi); 5664 else { 5665 struct seq_file *p = file->private_data; 5666 p->private = mi; 5667 mi->event = atomic_read(&md_event_count); 5668 } 5669 return error; 5670 } 5671 5672 static unsigned int mdstat_poll(struct file *filp, poll_table *wait) 5673 { 5674 struct seq_file *m = filp->private_data; 5675 struct mdstat_info *mi = m->private; 5676 int mask; 5677 5678 poll_wait(filp, &md_event_waiters, wait); 5679 5680 /* always allow read */ 5681 mask = POLLIN | POLLRDNORM; 5682 5683 if (mi->event != atomic_read(&md_event_count)) 5684 mask |= POLLERR | POLLPRI; 5685 return mask; 5686 } 5687 5688 static const struct file_operations md_seq_fops = { 5689 .owner = THIS_MODULE, 5690 .open = md_seq_open, 5691 .read = seq_read, 5692 .llseek = seq_lseek, 5693 .release = seq_release_private, 5694 .poll = mdstat_poll, 5695 }; 5696 5697 int register_md_personality(struct mdk_personality *p) 5698 { 5699 spin_lock(&pers_lock); 5700 list_add_tail(&p->list, &pers_list); 5701 printk(KERN_INFO "md: %s personality registered for level %d\n", p->name, p->level); 5702 spin_unlock(&pers_lock); 5703 return 0; 5704 } 5705 5706 int unregister_md_personality(struct mdk_personality *p) 5707 { 5708 printk(KERN_INFO "md: %s personality unregistered\n", p->name); 5709 spin_lock(&pers_lock); 5710 list_del_init(&p->list); 5711 spin_unlock(&pers_lock); 5712 return 0; 5713 } 5714 5715 static int is_mddev_idle(mddev_t *mddev) 5716 { 5717 mdk_rdev_t * rdev; 5718 int idle; 5719 long curr_events; 5720 5721 idle = 1; 5722 rcu_read_lock(); 5723 rdev_for_each_rcu(rdev, mddev) { 5724 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk; 5725 curr_events = part_stat_read(&disk->part0, sectors[0]) + 5726 part_stat_read(&disk->part0, sectors[1]) - 5727 atomic_read(&disk->sync_io); 5728 /* sync IO will cause sync_io to increase before the disk_stats 5729 * as sync_io is counted when a request starts, and 5730 * disk_stats is counted when it completes. 5731 * So resync activity will cause curr_events to be smaller than 5732 * when there was no such activity. 5733 * non-sync IO will cause disk_stat to increase without 5734 * increasing sync_io so curr_events will (eventually) 5735 * be larger than it was before. Once it becomes 5736 * substantially larger, the test below will cause 5737 * the array to appear non-idle, and resync will slow 5738 * down. 5739 * If there is a lot of outstanding resync activity when 5740 * we set last_event to curr_events, then all that activity 5741 * completing might cause the array to appear non-idle 5742 * and resync will be slowed down even though there might 5743 * not have been non-resync activity. This will only 5744 * happen once though. 'last_events' will soon reflect 5745 * the state where there is little or no outstanding 5746 * resync requests, and further resync activity will 5747 * always make curr_events less than last_events. 5748 * 5749 */ 5750 if (curr_events - rdev->last_events > 4096) { 5751 rdev->last_events = curr_events; 5752 idle = 0; 5753 } 5754 } 5755 rcu_read_unlock(); 5756 return idle; 5757 } 5758 5759 void md_done_sync(mddev_t *mddev, int blocks, int ok) 5760 { 5761 /* another "blocks" (512byte) blocks have been synced */ 5762 atomic_sub(blocks, &mddev->recovery_active); 5763 wake_up(&mddev->recovery_wait); 5764 if (!ok) { 5765 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 5766 md_wakeup_thread(mddev->thread); 5767 // stop recovery, signal do_sync .... 5768 } 5769 } 5770 5771 5772 /* md_write_start(mddev, bi) 5773 * If we need to update some array metadata (e.g. 'active' flag 5774 * in superblock) before writing, schedule a superblock update 5775 * and wait for it to complete. 5776 */ 5777 void md_write_start(mddev_t *mddev, struct bio *bi) 5778 { 5779 int did_change = 0; 5780 if (bio_data_dir(bi) != WRITE) 5781 return; 5782 5783 BUG_ON(mddev->ro == 1); 5784 if (mddev->ro == 2) { 5785 /* need to switch to read/write */ 5786 mddev->ro = 0; 5787 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5788 md_wakeup_thread(mddev->thread); 5789 md_wakeup_thread(mddev->sync_thread); 5790 did_change = 1; 5791 } 5792 atomic_inc(&mddev->writes_pending); 5793 if (mddev->safemode == 1) 5794 mddev->safemode = 0; 5795 if (mddev->in_sync) { 5796 spin_lock_irq(&mddev->write_lock); 5797 if (mddev->in_sync) { 5798 mddev->in_sync = 0; 5799 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 5800 md_wakeup_thread(mddev->thread); 5801 did_change = 1; 5802 } 5803 spin_unlock_irq(&mddev->write_lock); 5804 } 5805 if (did_change) 5806 sysfs_notify_dirent(mddev->sysfs_state); 5807 wait_event(mddev->sb_wait, 5808 !test_bit(MD_CHANGE_CLEAN, &mddev->flags) && 5809 !test_bit(MD_CHANGE_PENDING, &mddev->flags)); 5810 } 5811 5812 void md_write_end(mddev_t *mddev) 5813 { 5814 if (atomic_dec_and_test(&mddev->writes_pending)) { 5815 if (mddev->safemode == 2) 5816 md_wakeup_thread(mddev->thread); 5817 else if (mddev->safemode_delay) 5818 mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay); 5819 } 5820 } 5821 5822 /* md_allow_write(mddev) 5823 * Calling this ensures that the array is marked 'active' so that writes 5824 * may proceed without blocking. It is important to call this before 5825 * attempting a GFP_KERNEL allocation while holding the mddev lock. 5826 * Must be called with mddev_lock held. 5827 * 5828 * In the ->external case MD_CHANGE_CLEAN can not be cleared until mddev->lock 5829 * is dropped, so return -EAGAIN after notifying userspace. 5830 */ 5831 int md_allow_write(mddev_t *mddev) 5832 { 5833 if (!mddev->pers) 5834 return 0; 5835 if (mddev->ro) 5836 return 0; 5837 if (!mddev->pers->sync_request) 5838 return 0; 5839 5840 spin_lock_irq(&mddev->write_lock); 5841 if (mddev->in_sync) { 5842 mddev->in_sync = 0; 5843 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 5844 if (mddev->safemode_delay && 5845 mddev->safemode == 0) 5846 mddev->safemode = 1; 5847 spin_unlock_irq(&mddev->write_lock); 5848 md_update_sb(mddev, 0); 5849 sysfs_notify_dirent(mddev->sysfs_state); 5850 } else 5851 spin_unlock_irq(&mddev->write_lock); 5852 5853 if (test_bit(MD_CHANGE_CLEAN, &mddev->flags)) 5854 return -EAGAIN; 5855 else 5856 return 0; 5857 } 5858 EXPORT_SYMBOL_GPL(md_allow_write); 5859 5860 #define SYNC_MARKS 10 5861 #define SYNC_MARK_STEP (3*HZ) 5862 void md_do_sync(mddev_t *mddev) 5863 { 5864 mddev_t *mddev2; 5865 unsigned int currspeed = 0, 5866 window; 5867 sector_t max_sectors,j, io_sectors; 5868 unsigned long mark[SYNC_MARKS]; 5869 sector_t mark_cnt[SYNC_MARKS]; 5870 int last_mark,m; 5871 struct list_head *tmp; 5872 sector_t last_check; 5873 int skipped = 0; 5874 mdk_rdev_t *rdev; 5875 char *desc; 5876 5877 /* just incase thread restarts... */ 5878 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) 5879 return; 5880 if (mddev->ro) /* never try to sync a read-only array */ 5881 return; 5882 5883 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 5884 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) 5885 desc = "data-check"; 5886 else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 5887 desc = "requested-resync"; 5888 else 5889 desc = "resync"; 5890 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 5891 desc = "reshape"; 5892 else 5893 desc = "recovery"; 5894 5895 /* we overload curr_resync somewhat here. 5896 * 0 == not engaged in resync at all 5897 * 2 == checking that there is no conflict with another sync 5898 * 1 == like 2, but have yielded to allow conflicting resync to 5899 * commense 5900 * other == active in resync - this many blocks 5901 * 5902 * Before starting a resync we must have set curr_resync to 5903 * 2, and then checked that every "conflicting" array has curr_resync 5904 * less than ours. When we find one that is the same or higher 5905 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync 5906 * to 1 if we choose to yield (based arbitrarily on address of mddev structure). 5907 * This will mean we have to start checking from the beginning again. 5908 * 5909 */ 5910 5911 do { 5912 mddev->curr_resync = 2; 5913 5914 try_again: 5915 if (kthread_should_stop()) { 5916 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 5917 goto skip; 5918 } 5919 for_each_mddev(mddev2, tmp) { 5920 if (mddev2 == mddev) 5921 continue; 5922 if (!mddev->parallel_resync 5923 && mddev2->curr_resync 5924 && match_mddev_units(mddev, mddev2)) { 5925 DEFINE_WAIT(wq); 5926 if (mddev < mddev2 && mddev->curr_resync == 2) { 5927 /* arbitrarily yield */ 5928 mddev->curr_resync = 1; 5929 wake_up(&resync_wait); 5930 } 5931 if (mddev > mddev2 && mddev->curr_resync == 1) 5932 /* no need to wait here, we can wait the next 5933 * time 'round when curr_resync == 2 5934 */ 5935 continue; 5936 /* We need to wait 'interruptible' so as not to 5937 * contribute to the load average, and not to 5938 * be caught by 'softlockup' 5939 */ 5940 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE); 5941 if (!kthread_should_stop() && 5942 mddev2->curr_resync >= mddev->curr_resync) { 5943 printk(KERN_INFO "md: delaying %s of %s" 5944 " until %s has finished (they" 5945 " share one or more physical units)\n", 5946 desc, mdname(mddev), mdname(mddev2)); 5947 mddev_put(mddev2); 5948 if (signal_pending(current)) 5949 flush_signals(current); 5950 schedule(); 5951 finish_wait(&resync_wait, &wq); 5952 goto try_again; 5953 } 5954 finish_wait(&resync_wait, &wq); 5955 } 5956 } 5957 } while (mddev->curr_resync < 2); 5958 5959 j = 0; 5960 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 5961 /* resync follows the size requested by the personality, 5962 * which defaults to physical size, but can be virtual size 5963 */ 5964 max_sectors = mddev->resync_max_sectors; 5965 mddev->resync_mismatches = 0; 5966 /* we don't use the checkpoint if there's a bitmap */ 5967 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 5968 j = mddev->resync_min; 5969 else if (!mddev->bitmap) 5970 j = mddev->recovery_cp; 5971 5972 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 5973 max_sectors = mddev->size << 1; 5974 else { 5975 /* recovery follows the physical size of devices */ 5976 max_sectors = mddev->size << 1; 5977 j = MaxSector; 5978 list_for_each_entry(rdev, &mddev->disks, same_set) 5979 if (rdev->raid_disk >= 0 && 5980 !test_bit(Faulty, &rdev->flags) && 5981 !test_bit(In_sync, &rdev->flags) && 5982 rdev->recovery_offset < j) 5983 j = rdev->recovery_offset; 5984 } 5985 5986 printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev)); 5987 printk(KERN_INFO "md: minimum _guaranteed_ speed:" 5988 " %d KB/sec/disk.\n", speed_min(mddev)); 5989 printk(KERN_INFO "md: using maximum available idle IO bandwidth " 5990 "(but not more than %d KB/sec) for %s.\n", 5991 speed_max(mddev), desc); 5992 5993 is_mddev_idle(mddev); /* this also initializes IO event counters */ 5994 5995 io_sectors = 0; 5996 for (m = 0; m < SYNC_MARKS; m++) { 5997 mark[m] = jiffies; 5998 mark_cnt[m] = io_sectors; 5999 } 6000 last_mark = 0; 6001 mddev->resync_mark = mark[last_mark]; 6002 mddev->resync_mark_cnt = mark_cnt[last_mark]; 6003 6004 /* 6005 * Tune reconstruction: 6006 */ 6007 window = 32*(PAGE_SIZE/512); 6008 printk(KERN_INFO "md: using %dk window, over a total of %llu blocks.\n", 6009 window/2,(unsigned long long) max_sectors/2); 6010 6011 atomic_set(&mddev->recovery_active, 0); 6012 last_check = 0; 6013 6014 if (j>2) { 6015 printk(KERN_INFO 6016 "md: resuming %s of %s from checkpoint.\n", 6017 desc, mdname(mddev)); 6018 mddev->curr_resync = j; 6019 } 6020 6021 while (j < max_sectors) { 6022 sector_t sectors; 6023 6024 skipped = 0; 6025 if (j >= mddev->resync_max) { 6026 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 6027 wait_event(mddev->recovery_wait, 6028 mddev->resync_max > j 6029 || kthread_should_stop()); 6030 } 6031 if (kthread_should_stop()) 6032 goto interrupted; 6033 sectors = mddev->pers->sync_request(mddev, j, &skipped, 6034 currspeed < speed_min(mddev)); 6035 if (sectors == 0) { 6036 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 6037 goto out; 6038 } 6039 6040 if (!skipped) { /* actual IO requested */ 6041 io_sectors += sectors; 6042 atomic_add(sectors, &mddev->recovery_active); 6043 } 6044 6045 j += sectors; 6046 if (j>1) mddev->curr_resync = j; 6047 mddev->curr_mark_cnt = io_sectors; 6048 if (last_check == 0) 6049 /* this is the earliers that rebuilt will be 6050 * visible in /proc/mdstat 6051 */ 6052 md_new_event(mddev); 6053 6054 if (last_check + window > io_sectors || j == max_sectors) 6055 continue; 6056 6057 last_check = io_sectors; 6058 6059 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 6060 break; 6061 6062 repeat: 6063 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) { 6064 /* step marks */ 6065 int next = (last_mark+1) % SYNC_MARKS; 6066 6067 mddev->resync_mark = mark[next]; 6068 mddev->resync_mark_cnt = mark_cnt[next]; 6069 mark[next] = jiffies; 6070 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active); 6071 last_mark = next; 6072 } 6073 6074 6075 if (kthread_should_stop()) 6076 goto interrupted; 6077 6078 6079 /* 6080 * this loop exits only if either when we are slower than 6081 * the 'hard' speed limit, or the system was IO-idle for 6082 * a jiffy. 6083 * the system might be non-idle CPU-wise, but we only care 6084 * about not overloading the IO subsystem. (things like an 6085 * e2fsck being done on the RAID array should execute fast) 6086 */ 6087 blk_unplug(mddev->queue); 6088 cond_resched(); 6089 6090 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2 6091 /((jiffies-mddev->resync_mark)/HZ +1) +1; 6092 6093 if (currspeed > speed_min(mddev)) { 6094 if ((currspeed > speed_max(mddev)) || 6095 !is_mddev_idle(mddev)) { 6096 msleep(500); 6097 goto repeat; 6098 } 6099 } 6100 } 6101 printk(KERN_INFO "md: %s: %s done.\n",mdname(mddev), desc); 6102 /* 6103 * this also signals 'finished resyncing' to md_stop 6104 */ 6105 out: 6106 blk_unplug(mddev->queue); 6107 6108 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); 6109 6110 /* tell personality that we are finished */ 6111 mddev->pers->sync_request(mddev, max_sectors, &skipped, 1); 6112 6113 if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) && 6114 mddev->curr_resync > 2) { 6115 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 6116 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 6117 if (mddev->curr_resync >= mddev->recovery_cp) { 6118 printk(KERN_INFO 6119 "md: checkpointing %s of %s.\n", 6120 desc, mdname(mddev)); 6121 mddev->recovery_cp = mddev->curr_resync; 6122 } 6123 } else 6124 mddev->recovery_cp = MaxSector; 6125 } else { 6126 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 6127 mddev->curr_resync = MaxSector; 6128 list_for_each_entry(rdev, &mddev->disks, same_set) 6129 if (rdev->raid_disk >= 0 && 6130 !test_bit(Faulty, &rdev->flags) && 6131 !test_bit(In_sync, &rdev->flags) && 6132 rdev->recovery_offset < mddev->curr_resync) 6133 rdev->recovery_offset = mddev->curr_resync; 6134 } 6135 } 6136 set_bit(MD_CHANGE_DEVS, &mddev->flags); 6137 6138 skip: 6139 mddev->curr_resync = 0; 6140 mddev->resync_min = 0; 6141 mddev->resync_max = MaxSector; 6142 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 6143 wake_up(&resync_wait); 6144 set_bit(MD_RECOVERY_DONE, &mddev->recovery); 6145 md_wakeup_thread(mddev->thread); 6146 return; 6147 6148 interrupted: 6149 /* 6150 * got a signal, exit. 6151 */ 6152 printk(KERN_INFO 6153 "md: md_do_sync() got signal ... exiting\n"); 6154 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 6155 goto out; 6156 6157 } 6158 EXPORT_SYMBOL_GPL(md_do_sync); 6159 6160 6161 static int remove_and_add_spares(mddev_t *mddev) 6162 { 6163 mdk_rdev_t *rdev; 6164 int spares = 0; 6165 6166 list_for_each_entry(rdev, &mddev->disks, same_set) 6167 if (rdev->raid_disk >= 0 && 6168 !test_bit(Blocked, &rdev->flags) && 6169 (test_bit(Faulty, &rdev->flags) || 6170 ! test_bit(In_sync, &rdev->flags)) && 6171 atomic_read(&rdev->nr_pending)==0) { 6172 if (mddev->pers->hot_remove_disk( 6173 mddev, rdev->raid_disk)==0) { 6174 char nm[20]; 6175 sprintf(nm,"rd%d", rdev->raid_disk); 6176 sysfs_remove_link(&mddev->kobj, nm); 6177 rdev->raid_disk = -1; 6178 } 6179 } 6180 6181 if (mddev->degraded && ! mddev->ro && !mddev->recovery_disabled) { 6182 list_for_each_entry(rdev, &mddev->disks, same_set) { 6183 if (rdev->raid_disk >= 0 && 6184 !test_bit(In_sync, &rdev->flags) && 6185 !test_bit(Blocked, &rdev->flags)) 6186 spares++; 6187 if (rdev->raid_disk < 0 6188 && !test_bit(Faulty, &rdev->flags)) { 6189 rdev->recovery_offset = 0; 6190 if (mddev->pers-> 6191 hot_add_disk(mddev, rdev) == 0) { 6192 char nm[20]; 6193 sprintf(nm, "rd%d", rdev->raid_disk); 6194 if (sysfs_create_link(&mddev->kobj, 6195 &rdev->kobj, nm)) 6196 printk(KERN_WARNING 6197 "md: cannot register " 6198 "%s for %s\n", 6199 nm, mdname(mddev)); 6200 spares++; 6201 md_new_event(mddev); 6202 } else 6203 break; 6204 } 6205 } 6206 } 6207 return spares; 6208 } 6209 /* 6210 * This routine is regularly called by all per-raid-array threads to 6211 * deal with generic issues like resync and super-block update. 6212 * Raid personalities that don't have a thread (linear/raid0) do not 6213 * need this as they never do any recovery or update the superblock. 6214 * 6215 * It does not do any resync itself, but rather "forks" off other threads 6216 * to do that as needed. 6217 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in 6218 * "->recovery" and create a thread at ->sync_thread. 6219 * When the thread finishes it sets MD_RECOVERY_DONE 6220 * and wakeups up this thread which will reap the thread and finish up. 6221 * This thread also removes any faulty devices (with nr_pending == 0). 6222 * 6223 * The overall approach is: 6224 * 1/ if the superblock needs updating, update it. 6225 * 2/ If a recovery thread is running, don't do anything else. 6226 * 3/ If recovery has finished, clean up, possibly marking spares active. 6227 * 4/ If there are any faulty devices, remove them. 6228 * 5/ If array is degraded, try to add spares devices 6229 * 6/ If array has spares or is not in-sync, start a resync thread. 6230 */ 6231 void md_check_recovery(mddev_t *mddev) 6232 { 6233 mdk_rdev_t *rdev; 6234 6235 6236 if (mddev->bitmap) 6237 bitmap_daemon_work(mddev->bitmap); 6238 6239 if (mddev->ro) 6240 return; 6241 6242 if (signal_pending(current)) { 6243 if (mddev->pers->sync_request && !mddev->external) { 6244 printk(KERN_INFO "md: %s in immediate safe mode\n", 6245 mdname(mddev)); 6246 mddev->safemode = 2; 6247 } 6248 flush_signals(current); 6249 } 6250 6251 if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) 6252 return; 6253 if ( ! ( 6254 (mddev->flags && !mddev->external) || 6255 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || 6256 test_bit(MD_RECOVERY_DONE, &mddev->recovery) || 6257 (mddev->external == 0 && mddev->safemode == 1) || 6258 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending) 6259 && !mddev->in_sync && mddev->recovery_cp == MaxSector) 6260 )) 6261 return; 6262 6263 if (mddev_trylock(mddev)) { 6264 int spares = 0; 6265 6266 if (mddev->ro) { 6267 /* Only thing we do on a ro array is remove 6268 * failed devices. 6269 */ 6270 remove_and_add_spares(mddev); 6271 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6272 goto unlock; 6273 } 6274 6275 if (!mddev->external) { 6276 int did_change = 0; 6277 spin_lock_irq(&mddev->write_lock); 6278 if (mddev->safemode && 6279 !atomic_read(&mddev->writes_pending) && 6280 !mddev->in_sync && 6281 mddev->recovery_cp == MaxSector) { 6282 mddev->in_sync = 1; 6283 did_change = 1; 6284 if (mddev->persistent) 6285 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 6286 } 6287 if (mddev->safemode == 1) 6288 mddev->safemode = 0; 6289 spin_unlock_irq(&mddev->write_lock); 6290 if (did_change) 6291 sysfs_notify_dirent(mddev->sysfs_state); 6292 } 6293 6294 if (mddev->flags) 6295 md_update_sb(mddev, 0); 6296 6297 list_for_each_entry(rdev, &mddev->disks, same_set) 6298 if (test_and_clear_bit(StateChanged, &rdev->flags)) 6299 sysfs_notify_dirent(rdev->sysfs_state); 6300 6301 6302 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && 6303 !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) { 6304 /* resync/recovery still happening */ 6305 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6306 goto unlock; 6307 } 6308 if (mddev->sync_thread) { 6309 /* resync has finished, collect result */ 6310 md_unregister_thread(mddev->sync_thread); 6311 mddev->sync_thread = NULL; 6312 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 6313 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 6314 /* success...*/ 6315 /* activate any spares */ 6316 if (mddev->pers->spare_active(mddev)) 6317 sysfs_notify(&mddev->kobj, NULL, 6318 "degraded"); 6319 } 6320 md_update_sb(mddev, 1); 6321 6322 /* if array is no-longer degraded, then any saved_raid_disk 6323 * information must be scrapped 6324 */ 6325 if (!mddev->degraded) 6326 list_for_each_entry(rdev, &mddev->disks, same_set) 6327 rdev->saved_raid_disk = -1; 6328 6329 mddev->recovery = 0; 6330 /* flag recovery needed just to double check */ 6331 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6332 sysfs_notify_dirent(mddev->sysfs_action); 6333 md_new_event(mddev); 6334 goto unlock; 6335 } 6336 /* Set RUNNING before clearing NEEDED to avoid 6337 * any transients in the value of "sync_action". 6338 */ 6339 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 6340 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6341 /* Clear some bits that don't mean anything, but 6342 * might be left set 6343 */ 6344 clear_bit(MD_RECOVERY_INTR, &mddev->recovery); 6345 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); 6346 6347 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) 6348 goto unlock; 6349 /* no recovery is running. 6350 * remove any failed drives, then 6351 * add spares if possible. 6352 * Spare are also removed and re-added, to allow 6353 * the personality to fail the re-add. 6354 */ 6355 6356 if (mddev->reshape_position != MaxSector) { 6357 if (mddev->pers->check_reshape(mddev) != 0) 6358 /* Cannot proceed */ 6359 goto unlock; 6360 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 6361 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 6362 } else if ((spares = remove_and_add_spares(mddev))) { 6363 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 6364 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 6365 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 6366 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 6367 } else if (mddev->recovery_cp < MaxSector) { 6368 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 6369 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 6370 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 6371 /* nothing to be done ... */ 6372 goto unlock; 6373 6374 if (mddev->pers->sync_request) { 6375 if (spares && mddev->bitmap && ! mddev->bitmap->file) { 6376 /* We are adding a device or devices to an array 6377 * which has the bitmap stored on all devices. 6378 * So make sure all bitmap pages get written 6379 */ 6380 bitmap_write_all(mddev->bitmap); 6381 } 6382 mddev->sync_thread = md_register_thread(md_do_sync, 6383 mddev, 6384 "%s_resync"); 6385 if (!mddev->sync_thread) { 6386 printk(KERN_ERR "%s: could not start resync" 6387 " thread...\n", 6388 mdname(mddev)); 6389 /* leave the spares where they are, it shouldn't hurt */ 6390 mddev->recovery = 0; 6391 } else 6392 md_wakeup_thread(mddev->sync_thread); 6393 sysfs_notify_dirent(mddev->sysfs_action); 6394 md_new_event(mddev); 6395 } 6396 unlock: 6397 if (!mddev->sync_thread) { 6398 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 6399 if (test_and_clear_bit(MD_RECOVERY_RECOVER, 6400 &mddev->recovery)) 6401 if (mddev->sysfs_action) 6402 sysfs_notify_dirent(mddev->sysfs_action); 6403 } 6404 mddev_unlock(mddev); 6405 } 6406 } 6407 6408 void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev) 6409 { 6410 sysfs_notify_dirent(rdev->sysfs_state); 6411 wait_event_timeout(rdev->blocked_wait, 6412 !test_bit(Blocked, &rdev->flags), 6413 msecs_to_jiffies(5000)); 6414 rdev_dec_pending(rdev, mddev); 6415 } 6416 EXPORT_SYMBOL(md_wait_for_blocked_rdev); 6417 6418 static int md_notify_reboot(struct notifier_block *this, 6419 unsigned long code, void *x) 6420 { 6421 struct list_head *tmp; 6422 mddev_t *mddev; 6423 6424 if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) { 6425 6426 printk(KERN_INFO "md: stopping all md devices.\n"); 6427 6428 for_each_mddev(mddev, tmp) 6429 if (mddev_trylock(mddev)) { 6430 /* Force a switch to readonly even array 6431 * appears to still be in use. Hence 6432 * the '100'. 6433 */ 6434 do_md_stop(mddev, 1, 100); 6435 mddev_unlock(mddev); 6436 } 6437 /* 6438 * certain more exotic SCSI devices are known to be 6439 * volatile wrt too early system reboots. While the 6440 * right place to handle this issue is the given 6441 * driver, we do want to have a safe RAID driver ... 6442 */ 6443 mdelay(1000*1); 6444 } 6445 return NOTIFY_DONE; 6446 } 6447 6448 static struct notifier_block md_notifier = { 6449 .notifier_call = md_notify_reboot, 6450 .next = NULL, 6451 .priority = INT_MAX, /* before any real devices */ 6452 }; 6453 6454 static void md_geninit(void) 6455 { 6456 dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t)); 6457 6458 proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops); 6459 } 6460 6461 static int __init md_init(void) 6462 { 6463 if (register_blkdev(MAJOR_NR, "md")) 6464 return -1; 6465 if ((mdp_major=register_blkdev(0, "mdp"))<=0) { 6466 unregister_blkdev(MAJOR_NR, "md"); 6467 return -1; 6468 } 6469 blk_register_region(MKDEV(MAJOR_NR, 0), 1UL<<MINORBITS, THIS_MODULE, 6470 md_probe, NULL, NULL); 6471 blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE, 6472 md_probe, NULL, NULL); 6473 6474 register_reboot_notifier(&md_notifier); 6475 raid_table_header = register_sysctl_table(raid_root_table); 6476 6477 md_geninit(); 6478 return 0; 6479 } 6480 6481 6482 #ifndef MODULE 6483 6484 /* 6485 * Searches all registered partitions for autorun RAID arrays 6486 * at boot time. 6487 */ 6488 6489 static LIST_HEAD(all_detected_devices); 6490 struct detected_devices_node { 6491 struct list_head list; 6492 dev_t dev; 6493 }; 6494 6495 void md_autodetect_dev(dev_t dev) 6496 { 6497 struct detected_devices_node *node_detected_dev; 6498 6499 node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL); 6500 if (node_detected_dev) { 6501 node_detected_dev->dev = dev; 6502 list_add_tail(&node_detected_dev->list, &all_detected_devices); 6503 } else { 6504 printk(KERN_CRIT "md: md_autodetect_dev: kzalloc failed" 6505 ", skipping dev(%d,%d)\n", MAJOR(dev), MINOR(dev)); 6506 } 6507 } 6508 6509 6510 static void autostart_arrays(int part) 6511 { 6512 mdk_rdev_t *rdev; 6513 struct detected_devices_node *node_detected_dev; 6514 dev_t dev; 6515 int i_scanned, i_passed; 6516 6517 i_scanned = 0; 6518 i_passed = 0; 6519 6520 printk(KERN_INFO "md: Autodetecting RAID arrays.\n"); 6521 6522 while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) { 6523 i_scanned++; 6524 node_detected_dev = list_entry(all_detected_devices.next, 6525 struct detected_devices_node, list); 6526 list_del(&node_detected_dev->list); 6527 dev = node_detected_dev->dev; 6528 kfree(node_detected_dev); 6529 rdev = md_import_device(dev,0, 90); 6530 if (IS_ERR(rdev)) 6531 continue; 6532 6533 if (test_bit(Faulty, &rdev->flags)) { 6534 MD_BUG(); 6535 continue; 6536 } 6537 set_bit(AutoDetected, &rdev->flags); 6538 list_add(&rdev->same_set, &pending_raid_disks); 6539 i_passed++; 6540 } 6541 6542 printk(KERN_INFO "md: Scanned %d and added %d devices.\n", 6543 i_scanned, i_passed); 6544 6545 autorun_devices(part); 6546 } 6547 6548 #endif /* !MODULE */ 6549 6550 static __exit void md_exit(void) 6551 { 6552 mddev_t *mddev; 6553 struct list_head *tmp; 6554 6555 blk_unregister_region(MKDEV(MAJOR_NR,0), 1U << MINORBITS); 6556 blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS); 6557 6558 unregister_blkdev(MAJOR_NR,"md"); 6559 unregister_blkdev(mdp_major, "mdp"); 6560 unregister_reboot_notifier(&md_notifier); 6561 unregister_sysctl_table(raid_table_header); 6562 remove_proc_entry("mdstat", NULL); 6563 for_each_mddev(mddev, tmp) { 6564 export_array(mddev); 6565 mddev->hold_active = 0; 6566 } 6567 } 6568 6569 subsys_initcall(md_init); 6570 module_exit(md_exit) 6571 6572 static int get_ro(char *buffer, struct kernel_param *kp) 6573 { 6574 return sprintf(buffer, "%d", start_readonly); 6575 } 6576 static int set_ro(const char *val, struct kernel_param *kp) 6577 { 6578 char *e; 6579 int num = simple_strtoul(val, &e, 10); 6580 if (*val && (*e == '\0' || *e == '\n')) { 6581 start_readonly = num; 6582 return 0; 6583 } 6584 return -EINVAL; 6585 } 6586 6587 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR); 6588 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR); 6589 6590 module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR); 6591 6592 EXPORT_SYMBOL(register_md_personality); 6593 EXPORT_SYMBOL(unregister_md_personality); 6594 EXPORT_SYMBOL(md_error); 6595 EXPORT_SYMBOL(md_done_sync); 6596 EXPORT_SYMBOL(md_write_start); 6597 EXPORT_SYMBOL(md_write_end); 6598 EXPORT_SYMBOL(md_register_thread); 6599 EXPORT_SYMBOL(md_unregister_thread); 6600 EXPORT_SYMBOL(md_wakeup_thread); 6601 EXPORT_SYMBOL(md_check_recovery); 6602 MODULE_LICENSE("GPL"); 6603 MODULE_ALIAS("md"); 6604 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR); 6605