1 /* 2 md.c : Multiple Devices driver for Linux 3 Copyright (C) 1998, 1999, 2000 Ingo Molnar 4 5 completely rewritten, based on the MD driver code from Marc Zyngier 6 7 Changes: 8 9 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar 10 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com> 11 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net> 12 - kerneld support by Boris Tobotras <boris@xtalk.msk.su> 13 - kmod support by: Cyrus Durgin 14 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com> 15 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au> 16 17 - lots of fixes and improvements to the RAID1/RAID5 and generic 18 RAID code (such as request based resynchronization): 19 20 Neil Brown <neilb@cse.unsw.edu.au>. 21 22 - persistent bitmap code 23 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc. 24 25 This program is free software; you can redistribute it and/or modify 26 it under the terms of the GNU General Public License as published by 27 the Free Software Foundation; either version 2, or (at your option) 28 any later version. 29 30 You should have received a copy of the GNU General Public License 31 (for example /usr/src/linux/COPYING); if not, write to the Free 32 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 33 */ 34 35 #include <linux/kthread.h> 36 #include <linux/blkdev.h> 37 #include <linux/sysctl.h> 38 #include <linux/seq_file.h> 39 #include <linux/buffer_head.h> /* for invalidate_bdev */ 40 #include <linux/poll.h> 41 #include <linux/ctype.h> 42 #include <linux/hdreg.h> 43 #include <linux/proc_fs.h> 44 #include <linux/random.h> 45 #include <linux/reboot.h> 46 #include <linux/file.h> 47 #include <linux/delay.h> 48 #include <linux/raid/md_p.h> 49 #include <linux/raid/md_u.h> 50 #include "md.h" 51 #include "bitmap.h" 52 53 #define DEBUG 0 54 #define dprintk(x...) ((void)(DEBUG && printk(x))) 55 56 57 #ifndef MODULE 58 static void autostart_arrays(int part); 59 #endif 60 61 static LIST_HEAD(pers_list); 62 static DEFINE_SPINLOCK(pers_lock); 63 64 static void md_print_devices(void); 65 66 static DECLARE_WAIT_QUEUE_HEAD(resync_wait); 67 68 #define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); } 69 70 /* 71 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit' 72 * is 1000 KB/sec, so the extra system load does not show up that much. 73 * Increase it if you want to have more _guaranteed_ speed. Note that 74 * the RAID driver will use the maximum available bandwidth if the IO 75 * subsystem is idle. There is also an 'absolute maximum' reconstruction 76 * speed limit - in case reconstruction slows down your system despite 77 * idle IO detection. 78 * 79 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max. 80 * or /sys/block/mdX/md/sync_speed_{min,max} 81 */ 82 83 static int sysctl_speed_limit_min = 1000; 84 static int sysctl_speed_limit_max = 200000; 85 static inline int speed_min(mddev_t *mddev) 86 { 87 return mddev->sync_speed_min ? 88 mddev->sync_speed_min : sysctl_speed_limit_min; 89 } 90 91 static inline int speed_max(mddev_t *mddev) 92 { 93 return mddev->sync_speed_max ? 94 mddev->sync_speed_max : sysctl_speed_limit_max; 95 } 96 97 static struct ctl_table_header *raid_table_header; 98 99 static ctl_table raid_table[] = { 100 { 101 .ctl_name = DEV_RAID_SPEED_LIMIT_MIN, 102 .procname = "speed_limit_min", 103 .data = &sysctl_speed_limit_min, 104 .maxlen = sizeof(int), 105 .mode = S_IRUGO|S_IWUSR, 106 .proc_handler = &proc_dointvec, 107 }, 108 { 109 .ctl_name = DEV_RAID_SPEED_LIMIT_MAX, 110 .procname = "speed_limit_max", 111 .data = &sysctl_speed_limit_max, 112 .maxlen = sizeof(int), 113 .mode = S_IRUGO|S_IWUSR, 114 .proc_handler = &proc_dointvec, 115 }, 116 { .ctl_name = 0 } 117 }; 118 119 static ctl_table raid_dir_table[] = { 120 { 121 .ctl_name = DEV_RAID, 122 .procname = "raid", 123 .maxlen = 0, 124 .mode = S_IRUGO|S_IXUGO, 125 .child = raid_table, 126 }, 127 { .ctl_name = 0 } 128 }; 129 130 static ctl_table raid_root_table[] = { 131 { 132 .ctl_name = CTL_DEV, 133 .procname = "dev", 134 .maxlen = 0, 135 .mode = 0555, 136 .child = raid_dir_table, 137 }, 138 { .ctl_name = 0 } 139 }; 140 141 static struct block_device_operations md_fops; 142 143 static int start_readonly; 144 145 /* 146 * We have a system wide 'event count' that is incremented 147 * on any 'interesting' event, and readers of /proc/mdstat 148 * can use 'poll' or 'select' to find out when the event 149 * count increases. 150 * 151 * Events are: 152 * start array, stop array, error, add device, remove device, 153 * start build, activate spare 154 */ 155 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters); 156 static atomic_t md_event_count; 157 void md_new_event(mddev_t *mddev) 158 { 159 atomic_inc(&md_event_count); 160 wake_up(&md_event_waiters); 161 } 162 EXPORT_SYMBOL_GPL(md_new_event); 163 164 /* Alternate version that can be called from interrupts 165 * when calling sysfs_notify isn't needed. 166 */ 167 static void md_new_event_inintr(mddev_t *mddev) 168 { 169 atomic_inc(&md_event_count); 170 wake_up(&md_event_waiters); 171 } 172 173 /* 174 * Enables to iterate over all existing md arrays 175 * all_mddevs_lock protects this list. 176 */ 177 static LIST_HEAD(all_mddevs); 178 static DEFINE_SPINLOCK(all_mddevs_lock); 179 180 181 /* 182 * iterates through all used mddevs in the system. 183 * We take care to grab the all_mddevs_lock whenever navigating 184 * the list, and to always hold a refcount when unlocked. 185 * Any code which breaks out of this loop while own 186 * a reference to the current mddev and must mddev_put it. 187 */ 188 #define for_each_mddev(mddev,tmp) \ 189 \ 190 for (({ spin_lock(&all_mddevs_lock); \ 191 tmp = all_mddevs.next; \ 192 mddev = NULL;}); \ 193 ({ if (tmp != &all_mddevs) \ 194 mddev_get(list_entry(tmp, mddev_t, all_mddevs));\ 195 spin_unlock(&all_mddevs_lock); \ 196 if (mddev) mddev_put(mddev); \ 197 mddev = list_entry(tmp, mddev_t, all_mddevs); \ 198 tmp != &all_mddevs;}); \ 199 ({ spin_lock(&all_mddevs_lock); \ 200 tmp = tmp->next;}) \ 201 ) 202 203 204 /* Rather than calling directly into the personality make_request function, 205 * IO requests come here first so that we can check if the device is 206 * being suspended pending a reconfiguration. 207 * We hold a refcount over the call to ->make_request. By the time that 208 * call has finished, the bio has been linked into some internal structure 209 * and so is visible to ->quiesce(), so we don't need the refcount any more. 210 */ 211 static int md_make_request(struct request_queue *q, struct bio *bio) 212 { 213 mddev_t *mddev = q->queuedata; 214 int rv; 215 if (mddev == NULL || mddev->pers == NULL) { 216 bio_io_error(bio); 217 return 0; 218 } 219 rcu_read_lock(); 220 if (mddev->suspended) { 221 DEFINE_WAIT(__wait); 222 for (;;) { 223 prepare_to_wait(&mddev->sb_wait, &__wait, 224 TASK_UNINTERRUPTIBLE); 225 if (!mddev->suspended) 226 break; 227 rcu_read_unlock(); 228 schedule(); 229 rcu_read_lock(); 230 } 231 finish_wait(&mddev->sb_wait, &__wait); 232 } 233 atomic_inc(&mddev->active_io); 234 rcu_read_unlock(); 235 rv = mddev->pers->make_request(q, bio); 236 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) 237 wake_up(&mddev->sb_wait); 238 239 return rv; 240 } 241 242 static void mddev_suspend(mddev_t *mddev) 243 { 244 BUG_ON(mddev->suspended); 245 mddev->suspended = 1; 246 synchronize_rcu(); 247 wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0); 248 mddev->pers->quiesce(mddev, 1); 249 md_unregister_thread(mddev->thread); 250 mddev->thread = NULL; 251 /* we now know that no code is executing in the personality module, 252 * except possibly the tail end of a ->bi_end_io function, but that 253 * is certain to complete before the module has a chance to get 254 * unloaded 255 */ 256 } 257 258 static void mddev_resume(mddev_t *mddev) 259 { 260 mddev->suspended = 0; 261 wake_up(&mddev->sb_wait); 262 mddev->pers->quiesce(mddev, 0); 263 } 264 265 266 static inline mddev_t *mddev_get(mddev_t *mddev) 267 { 268 atomic_inc(&mddev->active); 269 return mddev; 270 } 271 272 static void mddev_delayed_delete(struct work_struct *ws); 273 274 static void mddev_put(mddev_t *mddev) 275 { 276 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock)) 277 return; 278 if (!mddev->raid_disks && list_empty(&mddev->disks) && 279 !mddev->hold_active) { 280 list_del(&mddev->all_mddevs); 281 if (mddev->gendisk) { 282 /* we did a probe so need to clean up. 283 * Call schedule_work inside the spinlock 284 * so that flush_scheduled_work() after 285 * mddev_find will succeed in waiting for the 286 * work to be done. 287 */ 288 INIT_WORK(&mddev->del_work, mddev_delayed_delete); 289 schedule_work(&mddev->del_work); 290 } else 291 kfree(mddev); 292 } 293 spin_unlock(&all_mddevs_lock); 294 } 295 296 static mddev_t * mddev_find(dev_t unit) 297 { 298 mddev_t *mddev, *new = NULL; 299 300 retry: 301 spin_lock(&all_mddevs_lock); 302 303 if (unit) { 304 list_for_each_entry(mddev, &all_mddevs, all_mddevs) 305 if (mddev->unit == unit) { 306 mddev_get(mddev); 307 spin_unlock(&all_mddevs_lock); 308 kfree(new); 309 return mddev; 310 } 311 312 if (new) { 313 list_add(&new->all_mddevs, &all_mddevs); 314 spin_unlock(&all_mddevs_lock); 315 new->hold_active = UNTIL_IOCTL; 316 return new; 317 } 318 } else if (new) { 319 /* find an unused unit number */ 320 static int next_minor = 512; 321 int start = next_minor; 322 int is_free = 0; 323 int dev = 0; 324 while (!is_free) { 325 dev = MKDEV(MD_MAJOR, next_minor); 326 next_minor++; 327 if (next_minor > MINORMASK) 328 next_minor = 0; 329 if (next_minor == start) { 330 /* Oh dear, all in use. */ 331 spin_unlock(&all_mddevs_lock); 332 kfree(new); 333 return NULL; 334 } 335 336 is_free = 1; 337 list_for_each_entry(mddev, &all_mddevs, all_mddevs) 338 if (mddev->unit == dev) { 339 is_free = 0; 340 break; 341 } 342 } 343 new->unit = dev; 344 new->md_minor = MINOR(dev); 345 new->hold_active = UNTIL_STOP; 346 list_add(&new->all_mddevs, &all_mddevs); 347 spin_unlock(&all_mddevs_lock); 348 return new; 349 } 350 spin_unlock(&all_mddevs_lock); 351 352 new = kzalloc(sizeof(*new), GFP_KERNEL); 353 if (!new) 354 return NULL; 355 356 new->unit = unit; 357 if (MAJOR(unit) == MD_MAJOR) 358 new->md_minor = MINOR(unit); 359 else 360 new->md_minor = MINOR(unit) >> MdpMinorShift; 361 362 mutex_init(&new->reconfig_mutex); 363 INIT_LIST_HEAD(&new->disks); 364 INIT_LIST_HEAD(&new->all_mddevs); 365 init_timer(&new->safemode_timer); 366 atomic_set(&new->active, 1); 367 atomic_set(&new->openers, 0); 368 atomic_set(&new->active_io, 0); 369 spin_lock_init(&new->write_lock); 370 init_waitqueue_head(&new->sb_wait); 371 init_waitqueue_head(&new->recovery_wait); 372 new->reshape_position = MaxSector; 373 new->resync_min = 0; 374 new->resync_max = MaxSector; 375 new->level = LEVEL_NONE; 376 377 goto retry; 378 } 379 380 static inline int mddev_lock(mddev_t * mddev) 381 { 382 return mutex_lock_interruptible(&mddev->reconfig_mutex); 383 } 384 385 static inline int mddev_is_locked(mddev_t *mddev) 386 { 387 return mutex_is_locked(&mddev->reconfig_mutex); 388 } 389 390 static inline int mddev_trylock(mddev_t * mddev) 391 { 392 return mutex_trylock(&mddev->reconfig_mutex); 393 } 394 395 static inline void mddev_unlock(mddev_t * mddev) 396 { 397 mutex_unlock(&mddev->reconfig_mutex); 398 399 md_wakeup_thread(mddev->thread); 400 } 401 402 static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr) 403 { 404 mdk_rdev_t *rdev; 405 406 list_for_each_entry(rdev, &mddev->disks, same_set) 407 if (rdev->desc_nr == nr) 408 return rdev; 409 410 return NULL; 411 } 412 413 static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev) 414 { 415 mdk_rdev_t *rdev; 416 417 list_for_each_entry(rdev, &mddev->disks, same_set) 418 if (rdev->bdev->bd_dev == dev) 419 return rdev; 420 421 return NULL; 422 } 423 424 static struct mdk_personality *find_pers(int level, char *clevel) 425 { 426 struct mdk_personality *pers; 427 list_for_each_entry(pers, &pers_list, list) { 428 if (level != LEVEL_NONE && pers->level == level) 429 return pers; 430 if (strcmp(pers->name, clevel)==0) 431 return pers; 432 } 433 return NULL; 434 } 435 436 /* return the offset of the super block in 512byte sectors */ 437 static inline sector_t calc_dev_sboffset(struct block_device *bdev) 438 { 439 sector_t num_sectors = bdev->bd_inode->i_size / 512; 440 return MD_NEW_SIZE_SECTORS(num_sectors); 441 } 442 443 static sector_t calc_num_sectors(mdk_rdev_t *rdev, unsigned chunk_size) 444 { 445 sector_t num_sectors = rdev->sb_start; 446 447 if (chunk_size) 448 num_sectors &= ~((sector_t)chunk_size/512 - 1); 449 return num_sectors; 450 } 451 452 static int alloc_disk_sb(mdk_rdev_t * rdev) 453 { 454 if (rdev->sb_page) 455 MD_BUG(); 456 457 rdev->sb_page = alloc_page(GFP_KERNEL); 458 if (!rdev->sb_page) { 459 printk(KERN_ALERT "md: out of memory.\n"); 460 return -ENOMEM; 461 } 462 463 return 0; 464 } 465 466 static void free_disk_sb(mdk_rdev_t * rdev) 467 { 468 if (rdev->sb_page) { 469 put_page(rdev->sb_page); 470 rdev->sb_loaded = 0; 471 rdev->sb_page = NULL; 472 rdev->sb_start = 0; 473 rdev->sectors = 0; 474 } 475 } 476 477 478 static void super_written(struct bio *bio, int error) 479 { 480 mdk_rdev_t *rdev = bio->bi_private; 481 mddev_t *mddev = rdev->mddev; 482 483 if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) { 484 printk("md: super_written gets error=%d, uptodate=%d\n", 485 error, test_bit(BIO_UPTODATE, &bio->bi_flags)); 486 WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags)); 487 md_error(mddev, rdev); 488 } 489 490 if (atomic_dec_and_test(&mddev->pending_writes)) 491 wake_up(&mddev->sb_wait); 492 bio_put(bio); 493 } 494 495 static void super_written_barrier(struct bio *bio, int error) 496 { 497 struct bio *bio2 = bio->bi_private; 498 mdk_rdev_t *rdev = bio2->bi_private; 499 mddev_t *mddev = rdev->mddev; 500 501 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && 502 error == -EOPNOTSUPP) { 503 unsigned long flags; 504 /* barriers don't appear to be supported :-( */ 505 set_bit(BarriersNotsupp, &rdev->flags); 506 mddev->barriers_work = 0; 507 spin_lock_irqsave(&mddev->write_lock, flags); 508 bio2->bi_next = mddev->biolist; 509 mddev->biolist = bio2; 510 spin_unlock_irqrestore(&mddev->write_lock, flags); 511 wake_up(&mddev->sb_wait); 512 bio_put(bio); 513 } else { 514 bio_put(bio2); 515 bio->bi_private = rdev; 516 super_written(bio, error); 517 } 518 } 519 520 void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, 521 sector_t sector, int size, struct page *page) 522 { 523 /* write first size bytes of page to sector of rdev 524 * Increment mddev->pending_writes before returning 525 * and decrement it on completion, waking up sb_wait 526 * if zero is reached. 527 * If an error occurred, call md_error 528 * 529 * As we might need to resubmit the request if BIO_RW_BARRIER 530 * causes ENOTSUPP, we allocate a spare bio... 531 */ 532 struct bio *bio = bio_alloc(GFP_NOIO, 1); 533 int rw = (1<<BIO_RW) | (1<<BIO_RW_SYNCIO) | (1<<BIO_RW_UNPLUG); 534 535 bio->bi_bdev = rdev->bdev; 536 bio->bi_sector = sector; 537 bio_add_page(bio, page, size, 0); 538 bio->bi_private = rdev; 539 bio->bi_end_io = super_written; 540 bio->bi_rw = rw; 541 542 atomic_inc(&mddev->pending_writes); 543 if (!test_bit(BarriersNotsupp, &rdev->flags)) { 544 struct bio *rbio; 545 rw |= (1<<BIO_RW_BARRIER); 546 rbio = bio_clone(bio, GFP_NOIO); 547 rbio->bi_private = bio; 548 rbio->bi_end_io = super_written_barrier; 549 submit_bio(rw, rbio); 550 } else 551 submit_bio(rw, bio); 552 } 553 554 void md_super_wait(mddev_t *mddev) 555 { 556 /* wait for all superblock writes that were scheduled to complete. 557 * if any had to be retried (due to BARRIER problems), retry them 558 */ 559 DEFINE_WAIT(wq); 560 for(;;) { 561 prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE); 562 if (atomic_read(&mddev->pending_writes)==0) 563 break; 564 while (mddev->biolist) { 565 struct bio *bio; 566 spin_lock_irq(&mddev->write_lock); 567 bio = mddev->biolist; 568 mddev->biolist = bio->bi_next ; 569 bio->bi_next = NULL; 570 spin_unlock_irq(&mddev->write_lock); 571 submit_bio(bio->bi_rw, bio); 572 } 573 schedule(); 574 } 575 finish_wait(&mddev->sb_wait, &wq); 576 } 577 578 static void bi_complete(struct bio *bio, int error) 579 { 580 complete((struct completion*)bio->bi_private); 581 } 582 583 int sync_page_io(struct block_device *bdev, sector_t sector, int size, 584 struct page *page, int rw) 585 { 586 struct bio *bio = bio_alloc(GFP_NOIO, 1); 587 struct completion event; 588 int ret; 589 590 rw |= (1 << BIO_RW_SYNCIO) | (1 << BIO_RW_UNPLUG); 591 592 bio->bi_bdev = bdev; 593 bio->bi_sector = sector; 594 bio_add_page(bio, page, size, 0); 595 init_completion(&event); 596 bio->bi_private = &event; 597 bio->bi_end_io = bi_complete; 598 submit_bio(rw, bio); 599 wait_for_completion(&event); 600 601 ret = test_bit(BIO_UPTODATE, &bio->bi_flags); 602 bio_put(bio); 603 return ret; 604 } 605 EXPORT_SYMBOL_GPL(sync_page_io); 606 607 static int read_disk_sb(mdk_rdev_t * rdev, int size) 608 { 609 char b[BDEVNAME_SIZE]; 610 if (!rdev->sb_page) { 611 MD_BUG(); 612 return -EINVAL; 613 } 614 if (rdev->sb_loaded) 615 return 0; 616 617 618 if (!sync_page_io(rdev->bdev, rdev->sb_start, size, rdev->sb_page, READ)) 619 goto fail; 620 rdev->sb_loaded = 1; 621 return 0; 622 623 fail: 624 printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n", 625 bdevname(rdev->bdev,b)); 626 return -EINVAL; 627 } 628 629 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2) 630 { 631 return sb1->set_uuid0 == sb2->set_uuid0 && 632 sb1->set_uuid1 == sb2->set_uuid1 && 633 sb1->set_uuid2 == sb2->set_uuid2 && 634 sb1->set_uuid3 == sb2->set_uuid3; 635 } 636 637 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2) 638 { 639 int ret; 640 mdp_super_t *tmp1, *tmp2; 641 642 tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL); 643 tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL); 644 645 if (!tmp1 || !tmp2) { 646 ret = 0; 647 printk(KERN_INFO "md.c sb_equal(): failed to allocate memory!\n"); 648 goto abort; 649 } 650 651 *tmp1 = *sb1; 652 *tmp2 = *sb2; 653 654 /* 655 * nr_disks is not constant 656 */ 657 tmp1->nr_disks = 0; 658 tmp2->nr_disks = 0; 659 660 ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0); 661 abort: 662 kfree(tmp1); 663 kfree(tmp2); 664 return ret; 665 } 666 667 668 static u32 md_csum_fold(u32 csum) 669 { 670 csum = (csum & 0xffff) + (csum >> 16); 671 return (csum & 0xffff) + (csum >> 16); 672 } 673 674 static unsigned int calc_sb_csum(mdp_super_t * sb) 675 { 676 u64 newcsum = 0; 677 u32 *sb32 = (u32*)sb; 678 int i; 679 unsigned int disk_csum, csum; 680 681 disk_csum = sb->sb_csum; 682 sb->sb_csum = 0; 683 684 for (i = 0; i < MD_SB_BYTES/4 ; i++) 685 newcsum += sb32[i]; 686 csum = (newcsum & 0xffffffff) + (newcsum>>32); 687 688 689 #ifdef CONFIG_ALPHA 690 /* This used to use csum_partial, which was wrong for several 691 * reasons including that different results are returned on 692 * different architectures. It isn't critical that we get exactly 693 * the same return value as before (we always csum_fold before 694 * testing, and that removes any differences). However as we 695 * know that csum_partial always returned a 16bit value on 696 * alphas, do a fold to maximise conformity to previous behaviour. 697 */ 698 sb->sb_csum = md_csum_fold(disk_csum); 699 #else 700 sb->sb_csum = disk_csum; 701 #endif 702 return csum; 703 } 704 705 706 /* 707 * Handle superblock details. 708 * We want to be able to handle multiple superblock formats 709 * so we have a common interface to them all, and an array of 710 * different handlers. 711 * We rely on user-space to write the initial superblock, and support 712 * reading and updating of superblocks. 713 * Interface methods are: 714 * int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version) 715 * loads and validates a superblock on dev. 716 * if refdev != NULL, compare superblocks on both devices 717 * Return: 718 * 0 - dev has a superblock that is compatible with refdev 719 * 1 - dev has a superblock that is compatible and newer than refdev 720 * so dev should be used as the refdev in future 721 * -EINVAL superblock incompatible or invalid 722 * -othererror e.g. -EIO 723 * 724 * int validate_super(mddev_t *mddev, mdk_rdev_t *dev) 725 * Verify that dev is acceptable into mddev. 726 * The first time, mddev->raid_disks will be 0, and data from 727 * dev should be merged in. Subsequent calls check that dev 728 * is new enough. Return 0 or -EINVAL 729 * 730 * void sync_super(mddev_t *mddev, mdk_rdev_t *dev) 731 * Update the superblock for rdev with data in mddev 732 * This does not write to disc. 733 * 734 */ 735 736 struct super_type { 737 char *name; 738 struct module *owner; 739 int (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev, 740 int minor_version); 741 int (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev); 742 void (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev); 743 unsigned long long (*rdev_size_change)(mdk_rdev_t *rdev, 744 sector_t num_sectors); 745 }; 746 747 /* 748 * load_super for 0.90.0 749 */ 750 static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) 751 { 752 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 753 mdp_super_t *sb; 754 int ret; 755 756 /* 757 * Calculate the position of the superblock (512byte sectors), 758 * it's at the end of the disk. 759 * 760 * It also happens to be a multiple of 4Kb. 761 */ 762 rdev->sb_start = calc_dev_sboffset(rdev->bdev); 763 764 ret = read_disk_sb(rdev, MD_SB_BYTES); 765 if (ret) return ret; 766 767 ret = -EINVAL; 768 769 bdevname(rdev->bdev, b); 770 sb = (mdp_super_t*)page_address(rdev->sb_page); 771 772 if (sb->md_magic != MD_SB_MAGIC) { 773 printk(KERN_ERR "md: invalid raid superblock magic on %s\n", 774 b); 775 goto abort; 776 } 777 778 if (sb->major_version != 0 || 779 sb->minor_version < 90 || 780 sb->minor_version > 91) { 781 printk(KERN_WARNING "Bad version number %d.%d on %s\n", 782 sb->major_version, sb->minor_version, 783 b); 784 goto abort; 785 } 786 787 if (sb->raid_disks <= 0) 788 goto abort; 789 790 if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) { 791 printk(KERN_WARNING "md: invalid superblock checksum on %s\n", 792 b); 793 goto abort; 794 } 795 796 rdev->preferred_minor = sb->md_minor; 797 rdev->data_offset = 0; 798 rdev->sb_size = MD_SB_BYTES; 799 800 if (sb->state & (1<<MD_SB_BITMAP_PRESENT)) { 801 if (sb->level != 1 && sb->level != 4 802 && sb->level != 5 && sb->level != 6 803 && sb->level != 10) { 804 /* FIXME use a better test */ 805 printk(KERN_WARNING 806 "md: bitmaps not supported for this level.\n"); 807 goto abort; 808 } 809 } 810 811 if (sb->level == LEVEL_MULTIPATH) 812 rdev->desc_nr = -1; 813 else 814 rdev->desc_nr = sb->this_disk.number; 815 816 if (!refdev) { 817 ret = 1; 818 } else { 819 __u64 ev1, ev2; 820 mdp_super_t *refsb = (mdp_super_t*)page_address(refdev->sb_page); 821 if (!uuid_equal(refsb, sb)) { 822 printk(KERN_WARNING "md: %s has different UUID to %s\n", 823 b, bdevname(refdev->bdev,b2)); 824 goto abort; 825 } 826 if (!sb_equal(refsb, sb)) { 827 printk(KERN_WARNING "md: %s has same UUID" 828 " but different superblock to %s\n", 829 b, bdevname(refdev->bdev, b2)); 830 goto abort; 831 } 832 ev1 = md_event(sb); 833 ev2 = md_event(refsb); 834 if (ev1 > ev2) 835 ret = 1; 836 else 837 ret = 0; 838 } 839 rdev->sectors = calc_num_sectors(rdev, sb->chunk_size); 840 841 if (rdev->sectors < sb->size * 2 && sb->level > 1) 842 /* "this cannot possibly happen" ... */ 843 ret = -EINVAL; 844 845 abort: 846 return ret; 847 } 848 849 /* 850 * validate_super for 0.90.0 851 */ 852 static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev) 853 { 854 mdp_disk_t *desc; 855 mdp_super_t *sb = (mdp_super_t *)page_address(rdev->sb_page); 856 __u64 ev1 = md_event(sb); 857 858 rdev->raid_disk = -1; 859 clear_bit(Faulty, &rdev->flags); 860 clear_bit(In_sync, &rdev->flags); 861 clear_bit(WriteMostly, &rdev->flags); 862 clear_bit(BarriersNotsupp, &rdev->flags); 863 864 if (mddev->raid_disks == 0) { 865 mddev->major_version = 0; 866 mddev->minor_version = sb->minor_version; 867 mddev->patch_version = sb->patch_version; 868 mddev->external = 0; 869 mddev->chunk_size = sb->chunk_size; 870 mddev->ctime = sb->ctime; 871 mddev->utime = sb->utime; 872 mddev->level = sb->level; 873 mddev->clevel[0] = 0; 874 mddev->layout = sb->layout; 875 mddev->raid_disks = sb->raid_disks; 876 mddev->dev_sectors = sb->size * 2; 877 mddev->events = ev1; 878 mddev->bitmap_offset = 0; 879 mddev->default_bitmap_offset = MD_SB_BYTES >> 9; 880 881 if (mddev->minor_version >= 91) { 882 mddev->reshape_position = sb->reshape_position; 883 mddev->delta_disks = sb->delta_disks; 884 mddev->new_level = sb->new_level; 885 mddev->new_layout = sb->new_layout; 886 mddev->new_chunk = sb->new_chunk; 887 } else { 888 mddev->reshape_position = MaxSector; 889 mddev->delta_disks = 0; 890 mddev->new_level = mddev->level; 891 mddev->new_layout = mddev->layout; 892 mddev->new_chunk = mddev->chunk_size; 893 } 894 895 if (sb->state & (1<<MD_SB_CLEAN)) 896 mddev->recovery_cp = MaxSector; 897 else { 898 if (sb->events_hi == sb->cp_events_hi && 899 sb->events_lo == sb->cp_events_lo) { 900 mddev->recovery_cp = sb->recovery_cp; 901 } else 902 mddev->recovery_cp = 0; 903 } 904 905 memcpy(mddev->uuid+0, &sb->set_uuid0, 4); 906 memcpy(mddev->uuid+4, &sb->set_uuid1, 4); 907 memcpy(mddev->uuid+8, &sb->set_uuid2, 4); 908 memcpy(mddev->uuid+12,&sb->set_uuid3, 4); 909 910 mddev->max_disks = MD_SB_DISKS; 911 912 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) && 913 mddev->bitmap_file == NULL) 914 mddev->bitmap_offset = mddev->default_bitmap_offset; 915 916 } else if (mddev->pers == NULL) { 917 /* Insist on good event counter while assembling */ 918 ++ev1; 919 if (ev1 < mddev->events) 920 return -EINVAL; 921 } else if (mddev->bitmap) { 922 /* if adding to array with a bitmap, then we can accept an 923 * older device ... but not too old. 924 */ 925 if (ev1 < mddev->bitmap->events_cleared) 926 return 0; 927 } else { 928 if (ev1 < mddev->events) 929 /* just a hot-add of a new device, leave raid_disk at -1 */ 930 return 0; 931 } 932 933 if (mddev->level != LEVEL_MULTIPATH) { 934 desc = sb->disks + rdev->desc_nr; 935 936 if (desc->state & (1<<MD_DISK_FAULTY)) 937 set_bit(Faulty, &rdev->flags); 938 else if (desc->state & (1<<MD_DISK_SYNC) /* && 939 desc->raid_disk < mddev->raid_disks */) { 940 set_bit(In_sync, &rdev->flags); 941 rdev->raid_disk = desc->raid_disk; 942 } 943 if (desc->state & (1<<MD_DISK_WRITEMOSTLY)) 944 set_bit(WriteMostly, &rdev->flags); 945 } else /* MULTIPATH are always insync */ 946 set_bit(In_sync, &rdev->flags); 947 return 0; 948 } 949 950 /* 951 * sync_super for 0.90.0 952 */ 953 static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev) 954 { 955 mdp_super_t *sb; 956 mdk_rdev_t *rdev2; 957 int next_spare = mddev->raid_disks; 958 959 960 /* make rdev->sb match mddev data.. 961 * 962 * 1/ zero out disks 963 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare); 964 * 3/ any empty disks < next_spare become removed 965 * 966 * disks[0] gets initialised to REMOVED because 967 * we cannot be sure from other fields if it has 968 * been initialised or not. 969 */ 970 int i; 971 int active=0, working=0,failed=0,spare=0,nr_disks=0; 972 973 rdev->sb_size = MD_SB_BYTES; 974 975 sb = (mdp_super_t*)page_address(rdev->sb_page); 976 977 memset(sb, 0, sizeof(*sb)); 978 979 sb->md_magic = MD_SB_MAGIC; 980 sb->major_version = mddev->major_version; 981 sb->patch_version = mddev->patch_version; 982 sb->gvalid_words = 0; /* ignored */ 983 memcpy(&sb->set_uuid0, mddev->uuid+0, 4); 984 memcpy(&sb->set_uuid1, mddev->uuid+4, 4); 985 memcpy(&sb->set_uuid2, mddev->uuid+8, 4); 986 memcpy(&sb->set_uuid3, mddev->uuid+12,4); 987 988 sb->ctime = mddev->ctime; 989 sb->level = mddev->level; 990 sb->size = mddev->dev_sectors / 2; 991 sb->raid_disks = mddev->raid_disks; 992 sb->md_minor = mddev->md_minor; 993 sb->not_persistent = 0; 994 sb->utime = mddev->utime; 995 sb->state = 0; 996 sb->events_hi = (mddev->events>>32); 997 sb->events_lo = (u32)mddev->events; 998 999 if (mddev->reshape_position == MaxSector) 1000 sb->minor_version = 90; 1001 else { 1002 sb->minor_version = 91; 1003 sb->reshape_position = mddev->reshape_position; 1004 sb->new_level = mddev->new_level; 1005 sb->delta_disks = mddev->delta_disks; 1006 sb->new_layout = mddev->new_layout; 1007 sb->new_chunk = mddev->new_chunk; 1008 } 1009 mddev->minor_version = sb->minor_version; 1010 if (mddev->in_sync) 1011 { 1012 sb->recovery_cp = mddev->recovery_cp; 1013 sb->cp_events_hi = (mddev->events>>32); 1014 sb->cp_events_lo = (u32)mddev->events; 1015 if (mddev->recovery_cp == MaxSector) 1016 sb->state = (1<< MD_SB_CLEAN); 1017 } else 1018 sb->recovery_cp = 0; 1019 1020 sb->layout = mddev->layout; 1021 sb->chunk_size = mddev->chunk_size; 1022 1023 if (mddev->bitmap && mddev->bitmap_file == NULL) 1024 sb->state |= (1<<MD_SB_BITMAP_PRESENT); 1025 1026 sb->disks[0].state = (1<<MD_DISK_REMOVED); 1027 list_for_each_entry(rdev2, &mddev->disks, same_set) { 1028 mdp_disk_t *d; 1029 int desc_nr; 1030 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags) 1031 && !test_bit(Faulty, &rdev2->flags)) 1032 desc_nr = rdev2->raid_disk; 1033 else 1034 desc_nr = next_spare++; 1035 rdev2->desc_nr = desc_nr; 1036 d = &sb->disks[rdev2->desc_nr]; 1037 nr_disks++; 1038 d->number = rdev2->desc_nr; 1039 d->major = MAJOR(rdev2->bdev->bd_dev); 1040 d->minor = MINOR(rdev2->bdev->bd_dev); 1041 if (rdev2->raid_disk >= 0 && test_bit(In_sync, &rdev2->flags) 1042 && !test_bit(Faulty, &rdev2->flags)) 1043 d->raid_disk = rdev2->raid_disk; 1044 else 1045 d->raid_disk = rdev2->desc_nr; /* compatibility */ 1046 if (test_bit(Faulty, &rdev2->flags)) 1047 d->state = (1<<MD_DISK_FAULTY); 1048 else if (test_bit(In_sync, &rdev2->flags)) { 1049 d->state = (1<<MD_DISK_ACTIVE); 1050 d->state |= (1<<MD_DISK_SYNC); 1051 active++; 1052 working++; 1053 } else { 1054 d->state = 0; 1055 spare++; 1056 working++; 1057 } 1058 if (test_bit(WriteMostly, &rdev2->flags)) 1059 d->state |= (1<<MD_DISK_WRITEMOSTLY); 1060 } 1061 /* now set the "removed" and "faulty" bits on any missing devices */ 1062 for (i=0 ; i < mddev->raid_disks ; i++) { 1063 mdp_disk_t *d = &sb->disks[i]; 1064 if (d->state == 0 && d->number == 0) { 1065 d->number = i; 1066 d->raid_disk = i; 1067 d->state = (1<<MD_DISK_REMOVED); 1068 d->state |= (1<<MD_DISK_FAULTY); 1069 failed++; 1070 } 1071 } 1072 sb->nr_disks = nr_disks; 1073 sb->active_disks = active; 1074 sb->working_disks = working; 1075 sb->failed_disks = failed; 1076 sb->spare_disks = spare; 1077 1078 sb->this_disk = sb->disks[rdev->desc_nr]; 1079 sb->sb_csum = calc_sb_csum(sb); 1080 } 1081 1082 /* 1083 * rdev_size_change for 0.90.0 1084 */ 1085 static unsigned long long 1086 super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors) 1087 { 1088 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) 1089 return 0; /* component must fit device */ 1090 if (rdev->mddev->bitmap_offset) 1091 return 0; /* can't move bitmap */ 1092 rdev->sb_start = calc_dev_sboffset(rdev->bdev); 1093 if (!num_sectors || num_sectors > rdev->sb_start) 1094 num_sectors = rdev->sb_start; 1095 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, 1096 rdev->sb_page); 1097 md_super_wait(rdev->mddev); 1098 return num_sectors / 2; /* kB for sysfs */ 1099 } 1100 1101 1102 /* 1103 * version 1 superblock 1104 */ 1105 1106 static __le32 calc_sb_1_csum(struct mdp_superblock_1 * sb) 1107 { 1108 __le32 disk_csum; 1109 u32 csum; 1110 unsigned long long newcsum; 1111 int size = 256 + le32_to_cpu(sb->max_dev)*2; 1112 __le32 *isuper = (__le32*)sb; 1113 int i; 1114 1115 disk_csum = sb->sb_csum; 1116 sb->sb_csum = 0; 1117 newcsum = 0; 1118 for (i=0; size>=4; size -= 4 ) 1119 newcsum += le32_to_cpu(*isuper++); 1120 1121 if (size == 2) 1122 newcsum += le16_to_cpu(*(__le16*) isuper); 1123 1124 csum = (newcsum & 0xffffffff) + (newcsum >> 32); 1125 sb->sb_csum = disk_csum; 1126 return cpu_to_le32(csum); 1127 } 1128 1129 static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) 1130 { 1131 struct mdp_superblock_1 *sb; 1132 int ret; 1133 sector_t sb_start; 1134 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 1135 int bmask; 1136 1137 /* 1138 * Calculate the position of the superblock in 512byte sectors. 1139 * It is always aligned to a 4K boundary and 1140 * depeding on minor_version, it can be: 1141 * 0: At least 8K, but less than 12K, from end of device 1142 * 1: At start of device 1143 * 2: 4K from start of device. 1144 */ 1145 switch(minor_version) { 1146 case 0: 1147 sb_start = rdev->bdev->bd_inode->i_size >> 9; 1148 sb_start -= 8*2; 1149 sb_start &= ~(sector_t)(4*2-1); 1150 break; 1151 case 1: 1152 sb_start = 0; 1153 break; 1154 case 2: 1155 sb_start = 8; 1156 break; 1157 default: 1158 return -EINVAL; 1159 } 1160 rdev->sb_start = sb_start; 1161 1162 /* superblock is rarely larger than 1K, but it can be larger, 1163 * and it is safe to read 4k, so we do that 1164 */ 1165 ret = read_disk_sb(rdev, 4096); 1166 if (ret) return ret; 1167 1168 1169 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page); 1170 1171 if (sb->magic != cpu_to_le32(MD_SB_MAGIC) || 1172 sb->major_version != cpu_to_le32(1) || 1173 le32_to_cpu(sb->max_dev) > (4096-256)/2 || 1174 le64_to_cpu(sb->super_offset) != rdev->sb_start || 1175 (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0) 1176 return -EINVAL; 1177 1178 if (calc_sb_1_csum(sb) != sb->sb_csum) { 1179 printk("md: invalid superblock checksum on %s\n", 1180 bdevname(rdev->bdev,b)); 1181 return -EINVAL; 1182 } 1183 if (le64_to_cpu(sb->data_size) < 10) { 1184 printk("md: data_size too small on %s\n", 1185 bdevname(rdev->bdev,b)); 1186 return -EINVAL; 1187 } 1188 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET)) { 1189 if (sb->level != cpu_to_le32(1) && 1190 sb->level != cpu_to_le32(4) && 1191 sb->level != cpu_to_le32(5) && 1192 sb->level != cpu_to_le32(6) && 1193 sb->level != cpu_to_le32(10)) { 1194 printk(KERN_WARNING 1195 "md: bitmaps not supported for this level.\n"); 1196 return -EINVAL; 1197 } 1198 } 1199 1200 rdev->preferred_minor = 0xffff; 1201 rdev->data_offset = le64_to_cpu(sb->data_offset); 1202 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); 1203 1204 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; 1205 bmask = queue_hardsect_size(rdev->bdev->bd_disk->queue)-1; 1206 if (rdev->sb_size & bmask) 1207 rdev->sb_size = (rdev->sb_size | bmask) + 1; 1208 1209 if (minor_version 1210 && rdev->data_offset < sb_start + (rdev->sb_size/512)) 1211 return -EINVAL; 1212 1213 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH)) 1214 rdev->desc_nr = -1; 1215 else 1216 rdev->desc_nr = le32_to_cpu(sb->dev_number); 1217 1218 if (!refdev) { 1219 ret = 1; 1220 } else { 1221 __u64 ev1, ev2; 1222 struct mdp_superblock_1 *refsb = 1223 (struct mdp_superblock_1*)page_address(refdev->sb_page); 1224 1225 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 || 1226 sb->level != refsb->level || 1227 sb->layout != refsb->layout || 1228 sb->chunksize != refsb->chunksize) { 1229 printk(KERN_WARNING "md: %s has strangely different" 1230 " superblock to %s\n", 1231 bdevname(rdev->bdev,b), 1232 bdevname(refdev->bdev,b2)); 1233 return -EINVAL; 1234 } 1235 ev1 = le64_to_cpu(sb->events); 1236 ev2 = le64_to_cpu(refsb->events); 1237 1238 if (ev1 > ev2) 1239 ret = 1; 1240 else 1241 ret = 0; 1242 } 1243 if (minor_version) 1244 rdev->sectors = (rdev->bdev->bd_inode->i_size >> 9) - 1245 le64_to_cpu(sb->data_offset); 1246 else 1247 rdev->sectors = rdev->sb_start; 1248 if (rdev->sectors < le64_to_cpu(sb->data_size)) 1249 return -EINVAL; 1250 rdev->sectors = le64_to_cpu(sb->data_size); 1251 if (le32_to_cpu(sb->chunksize)) 1252 rdev->sectors &= ~((sector_t)le32_to_cpu(sb->chunksize) - 1); 1253 1254 if (le64_to_cpu(sb->size) > rdev->sectors) 1255 return -EINVAL; 1256 return ret; 1257 } 1258 1259 static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev) 1260 { 1261 struct mdp_superblock_1 *sb = (struct mdp_superblock_1*)page_address(rdev->sb_page); 1262 __u64 ev1 = le64_to_cpu(sb->events); 1263 1264 rdev->raid_disk = -1; 1265 clear_bit(Faulty, &rdev->flags); 1266 clear_bit(In_sync, &rdev->flags); 1267 clear_bit(WriteMostly, &rdev->flags); 1268 clear_bit(BarriersNotsupp, &rdev->flags); 1269 1270 if (mddev->raid_disks == 0) { 1271 mddev->major_version = 1; 1272 mddev->patch_version = 0; 1273 mddev->external = 0; 1274 mddev->chunk_size = le32_to_cpu(sb->chunksize) << 9; 1275 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1); 1276 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1); 1277 mddev->level = le32_to_cpu(sb->level); 1278 mddev->clevel[0] = 0; 1279 mddev->layout = le32_to_cpu(sb->layout); 1280 mddev->raid_disks = le32_to_cpu(sb->raid_disks); 1281 mddev->dev_sectors = le64_to_cpu(sb->size); 1282 mddev->events = ev1; 1283 mddev->bitmap_offset = 0; 1284 mddev->default_bitmap_offset = 1024 >> 9; 1285 1286 mddev->recovery_cp = le64_to_cpu(sb->resync_offset); 1287 memcpy(mddev->uuid, sb->set_uuid, 16); 1288 1289 mddev->max_disks = (4096-256)/2; 1290 1291 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) && 1292 mddev->bitmap_file == NULL ) 1293 mddev->bitmap_offset = (__s32)le32_to_cpu(sb->bitmap_offset); 1294 1295 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) { 1296 mddev->reshape_position = le64_to_cpu(sb->reshape_position); 1297 mddev->delta_disks = le32_to_cpu(sb->delta_disks); 1298 mddev->new_level = le32_to_cpu(sb->new_level); 1299 mddev->new_layout = le32_to_cpu(sb->new_layout); 1300 mddev->new_chunk = le32_to_cpu(sb->new_chunk)<<9; 1301 } else { 1302 mddev->reshape_position = MaxSector; 1303 mddev->delta_disks = 0; 1304 mddev->new_level = mddev->level; 1305 mddev->new_layout = mddev->layout; 1306 mddev->new_chunk = mddev->chunk_size; 1307 } 1308 1309 } else if (mddev->pers == NULL) { 1310 /* Insist of good event counter while assembling */ 1311 ++ev1; 1312 if (ev1 < mddev->events) 1313 return -EINVAL; 1314 } else if (mddev->bitmap) { 1315 /* If adding to array with a bitmap, then we can accept an 1316 * older device, but not too old. 1317 */ 1318 if (ev1 < mddev->bitmap->events_cleared) 1319 return 0; 1320 } else { 1321 if (ev1 < mddev->events) 1322 /* just a hot-add of a new device, leave raid_disk at -1 */ 1323 return 0; 1324 } 1325 if (mddev->level != LEVEL_MULTIPATH) { 1326 int role; 1327 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]); 1328 switch(role) { 1329 case 0xffff: /* spare */ 1330 break; 1331 case 0xfffe: /* faulty */ 1332 set_bit(Faulty, &rdev->flags); 1333 break; 1334 default: 1335 if ((le32_to_cpu(sb->feature_map) & 1336 MD_FEATURE_RECOVERY_OFFSET)) 1337 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset); 1338 else 1339 set_bit(In_sync, &rdev->flags); 1340 rdev->raid_disk = role; 1341 break; 1342 } 1343 if (sb->devflags & WriteMostly1) 1344 set_bit(WriteMostly, &rdev->flags); 1345 } else /* MULTIPATH are always insync */ 1346 set_bit(In_sync, &rdev->flags); 1347 1348 return 0; 1349 } 1350 1351 static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev) 1352 { 1353 struct mdp_superblock_1 *sb; 1354 mdk_rdev_t *rdev2; 1355 int max_dev, i; 1356 /* make rdev->sb match mddev and rdev data. */ 1357 1358 sb = (struct mdp_superblock_1*)page_address(rdev->sb_page); 1359 1360 sb->feature_map = 0; 1361 sb->pad0 = 0; 1362 sb->recovery_offset = cpu_to_le64(0); 1363 memset(sb->pad1, 0, sizeof(sb->pad1)); 1364 memset(sb->pad2, 0, sizeof(sb->pad2)); 1365 memset(sb->pad3, 0, sizeof(sb->pad3)); 1366 1367 sb->utime = cpu_to_le64((__u64)mddev->utime); 1368 sb->events = cpu_to_le64(mddev->events); 1369 if (mddev->in_sync) 1370 sb->resync_offset = cpu_to_le64(mddev->recovery_cp); 1371 else 1372 sb->resync_offset = cpu_to_le64(0); 1373 1374 sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors)); 1375 1376 sb->raid_disks = cpu_to_le32(mddev->raid_disks); 1377 sb->size = cpu_to_le64(mddev->dev_sectors); 1378 1379 if (mddev->bitmap && mddev->bitmap_file == NULL) { 1380 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_offset); 1381 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET); 1382 } 1383 1384 if (rdev->raid_disk >= 0 && 1385 !test_bit(In_sync, &rdev->flags)) { 1386 if (mddev->curr_resync_completed > rdev->recovery_offset) 1387 rdev->recovery_offset = mddev->curr_resync_completed; 1388 if (rdev->recovery_offset > 0) { 1389 sb->feature_map |= 1390 cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET); 1391 sb->recovery_offset = 1392 cpu_to_le64(rdev->recovery_offset); 1393 } 1394 } 1395 1396 if (mddev->reshape_position != MaxSector) { 1397 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE); 1398 sb->reshape_position = cpu_to_le64(mddev->reshape_position); 1399 sb->new_layout = cpu_to_le32(mddev->new_layout); 1400 sb->delta_disks = cpu_to_le32(mddev->delta_disks); 1401 sb->new_level = cpu_to_le32(mddev->new_level); 1402 sb->new_chunk = cpu_to_le32(mddev->new_chunk>>9); 1403 } 1404 1405 max_dev = 0; 1406 list_for_each_entry(rdev2, &mddev->disks, same_set) 1407 if (rdev2->desc_nr+1 > max_dev) 1408 max_dev = rdev2->desc_nr+1; 1409 1410 if (max_dev > le32_to_cpu(sb->max_dev)) 1411 sb->max_dev = cpu_to_le32(max_dev); 1412 for (i=0; i<max_dev;i++) 1413 sb->dev_roles[i] = cpu_to_le16(0xfffe); 1414 1415 list_for_each_entry(rdev2, &mddev->disks, same_set) { 1416 i = rdev2->desc_nr; 1417 if (test_bit(Faulty, &rdev2->flags)) 1418 sb->dev_roles[i] = cpu_to_le16(0xfffe); 1419 else if (test_bit(In_sync, &rdev2->flags)) 1420 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); 1421 else if (rdev2->raid_disk >= 0 && rdev2->recovery_offset > 0) 1422 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); 1423 else 1424 sb->dev_roles[i] = cpu_to_le16(0xffff); 1425 } 1426 1427 sb->sb_csum = calc_sb_1_csum(sb); 1428 } 1429 1430 static unsigned long long 1431 super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors) 1432 { 1433 struct mdp_superblock_1 *sb; 1434 sector_t max_sectors; 1435 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) 1436 return 0; /* component must fit device */ 1437 if (rdev->sb_start < rdev->data_offset) { 1438 /* minor versions 1 and 2; superblock before data */ 1439 max_sectors = rdev->bdev->bd_inode->i_size >> 9; 1440 max_sectors -= rdev->data_offset; 1441 if (!num_sectors || num_sectors > max_sectors) 1442 num_sectors = max_sectors; 1443 } else if (rdev->mddev->bitmap_offset) { 1444 /* minor version 0 with bitmap we can't move */ 1445 return 0; 1446 } else { 1447 /* minor version 0; superblock after data */ 1448 sector_t sb_start; 1449 sb_start = (rdev->bdev->bd_inode->i_size >> 9) - 8*2; 1450 sb_start &= ~(sector_t)(4*2 - 1); 1451 max_sectors = rdev->sectors + sb_start - rdev->sb_start; 1452 if (!num_sectors || num_sectors > max_sectors) 1453 num_sectors = max_sectors; 1454 rdev->sb_start = sb_start; 1455 } 1456 sb = (struct mdp_superblock_1 *) page_address(rdev->sb_page); 1457 sb->data_size = cpu_to_le64(num_sectors); 1458 sb->super_offset = rdev->sb_start; 1459 sb->sb_csum = calc_sb_1_csum(sb); 1460 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, 1461 rdev->sb_page); 1462 md_super_wait(rdev->mddev); 1463 return num_sectors / 2; /* kB for sysfs */ 1464 } 1465 1466 static struct super_type super_types[] = { 1467 [0] = { 1468 .name = "0.90.0", 1469 .owner = THIS_MODULE, 1470 .load_super = super_90_load, 1471 .validate_super = super_90_validate, 1472 .sync_super = super_90_sync, 1473 .rdev_size_change = super_90_rdev_size_change, 1474 }, 1475 [1] = { 1476 .name = "md-1", 1477 .owner = THIS_MODULE, 1478 .load_super = super_1_load, 1479 .validate_super = super_1_validate, 1480 .sync_super = super_1_sync, 1481 .rdev_size_change = super_1_rdev_size_change, 1482 }, 1483 }; 1484 1485 static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2) 1486 { 1487 mdk_rdev_t *rdev, *rdev2; 1488 1489 rcu_read_lock(); 1490 rdev_for_each_rcu(rdev, mddev1) 1491 rdev_for_each_rcu(rdev2, mddev2) 1492 if (rdev->bdev->bd_contains == 1493 rdev2->bdev->bd_contains) { 1494 rcu_read_unlock(); 1495 return 1; 1496 } 1497 rcu_read_unlock(); 1498 return 0; 1499 } 1500 1501 static LIST_HEAD(pending_raid_disks); 1502 1503 static void md_integrity_check(mdk_rdev_t *rdev, mddev_t *mddev) 1504 { 1505 struct mdk_personality *pers = mddev->pers; 1506 struct gendisk *disk = mddev->gendisk; 1507 struct blk_integrity *bi_rdev = bdev_get_integrity(rdev->bdev); 1508 struct blk_integrity *bi_mddev = blk_get_integrity(disk); 1509 1510 /* Data integrity passthrough not supported on RAID 4, 5 and 6 */ 1511 if (pers && pers->level >= 4 && pers->level <= 6) 1512 return; 1513 1514 /* If rdev is integrity capable, register profile for mddev */ 1515 if (!bi_mddev && bi_rdev) { 1516 if (blk_integrity_register(disk, bi_rdev)) 1517 printk(KERN_ERR "%s: %s Could not register integrity!\n", 1518 __func__, disk->disk_name); 1519 else 1520 printk(KERN_NOTICE "Enabling data integrity on %s\n", 1521 disk->disk_name); 1522 return; 1523 } 1524 1525 /* Check that mddev and rdev have matching profiles */ 1526 if (blk_integrity_compare(disk, rdev->bdev->bd_disk) < 0) { 1527 printk(KERN_ERR "%s: %s/%s integrity mismatch!\n", __func__, 1528 disk->disk_name, rdev->bdev->bd_disk->disk_name); 1529 printk(KERN_NOTICE "Disabling data integrity on %s\n", 1530 disk->disk_name); 1531 blk_integrity_unregister(disk); 1532 } 1533 } 1534 1535 static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev) 1536 { 1537 char b[BDEVNAME_SIZE]; 1538 struct kobject *ko; 1539 char *s; 1540 int err; 1541 1542 if (rdev->mddev) { 1543 MD_BUG(); 1544 return -EINVAL; 1545 } 1546 1547 /* prevent duplicates */ 1548 if (find_rdev(mddev, rdev->bdev->bd_dev)) 1549 return -EEXIST; 1550 1551 /* make sure rdev->sectors exceeds mddev->dev_sectors */ 1552 if (rdev->sectors && (mddev->dev_sectors == 0 || 1553 rdev->sectors < mddev->dev_sectors)) { 1554 if (mddev->pers) { 1555 /* Cannot change size, so fail 1556 * If mddev->level <= 0, then we don't care 1557 * about aligning sizes (e.g. linear) 1558 */ 1559 if (mddev->level > 0) 1560 return -ENOSPC; 1561 } else 1562 mddev->dev_sectors = rdev->sectors; 1563 } 1564 1565 /* Verify rdev->desc_nr is unique. 1566 * If it is -1, assign a free number, else 1567 * check number is not in use 1568 */ 1569 if (rdev->desc_nr < 0) { 1570 int choice = 0; 1571 if (mddev->pers) choice = mddev->raid_disks; 1572 while (find_rdev_nr(mddev, choice)) 1573 choice++; 1574 rdev->desc_nr = choice; 1575 } else { 1576 if (find_rdev_nr(mddev, rdev->desc_nr)) 1577 return -EBUSY; 1578 } 1579 if (mddev->max_disks && rdev->desc_nr >= mddev->max_disks) { 1580 printk(KERN_WARNING "md: %s: array is limited to %d devices\n", 1581 mdname(mddev), mddev->max_disks); 1582 return -EBUSY; 1583 } 1584 bdevname(rdev->bdev,b); 1585 while ( (s=strchr(b, '/')) != NULL) 1586 *s = '!'; 1587 1588 rdev->mddev = mddev; 1589 printk(KERN_INFO "md: bind<%s>\n", b); 1590 1591 if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b))) 1592 goto fail; 1593 1594 ko = &part_to_dev(rdev->bdev->bd_part)->kobj; 1595 if ((err = sysfs_create_link(&rdev->kobj, ko, "block"))) { 1596 kobject_del(&rdev->kobj); 1597 goto fail; 1598 } 1599 rdev->sysfs_state = sysfs_get_dirent(rdev->kobj.sd, "state"); 1600 1601 list_add_rcu(&rdev->same_set, &mddev->disks); 1602 bd_claim_by_disk(rdev->bdev, rdev->bdev->bd_holder, mddev->gendisk); 1603 1604 /* May as well allow recovery to be retried once */ 1605 mddev->recovery_disabled = 0; 1606 1607 md_integrity_check(rdev, mddev); 1608 return 0; 1609 1610 fail: 1611 printk(KERN_WARNING "md: failed to register dev-%s for %s\n", 1612 b, mdname(mddev)); 1613 return err; 1614 } 1615 1616 static void md_delayed_delete(struct work_struct *ws) 1617 { 1618 mdk_rdev_t *rdev = container_of(ws, mdk_rdev_t, del_work); 1619 kobject_del(&rdev->kobj); 1620 kobject_put(&rdev->kobj); 1621 } 1622 1623 static void unbind_rdev_from_array(mdk_rdev_t * rdev) 1624 { 1625 char b[BDEVNAME_SIZE]; 1626 if (!rdev->mddev) { 1627 MD_BUG(); 1628 return; 1629 } 1630 bd_release_from_disk(rdev->bdev, rdev->mddev->gendisk); 1631 list_del_rcu(&rdev->same_set); 1632 printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b)); 1633 rdev->mddev = NULL; 1634 sysfs_remove_link(&rdev->kobj, "block"); 1635 sysfs_put(rdev->sysfs_state); 1636 rdev->sysfs_state = NULL; 1637 /* We need to delay this, otherwise we can deadlock when 1638 * writing to 'remove' to "dev/state". We also need 1639 * to delay it due to rcu usage. 1640 */ 1641 synchronize_rcu(); 1642 INIT_WORK(&rdev->del_work, md_delayed_delete); 1643 kobject_get(&rdev->kobj); 1644 schedule_work(&rdev->del_work); 1645 } 1646 1647 /* 1648 * prevent the device from being mounted, repartitioned or 1649 * otherwise reused by a RAID array (or any other kernel 1650 * subsystem), by bd_claiming the device. 1651 */ 1652 static int lock_rdev(mdk_rdev_t *rdev, dev_t dev, int shared) 1653 { 1654 int err = 0; 1655 struct block_device *bdev; 1656 char b[BDEVNAME_SIZE]; 1657 1658 bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE); 1659 if (IS_ERR(bdev)) { 1660 printk(KERN_ERR "md: could not open %s.\n", 1661 __bdevname(dev, b)); 1662 return PTR_ERR(bdev); 1663 } 1664 err = bd_claim(bdev, shared ? (mdk_rdev_t *)lock_rdev : rdev); 1665 if (err) { 1666 printk(KERN_ERR "md: could not bd_claim %s.\n", 1667 bdevname(bdev, b)); 1668 blkdev_put(bdev, FMODE_READ|FMODE_WRITE); 1669 return err; 1670 } 1671 if (!shared) 1672 set_bit(AllReserved, &rdev->flags); 1673 rdev->bdev = bdev; 1674 return err; 1675 } 1676 1677 static void unlock_rdev(mdk_rdev_t *rdev) 1678 { 1679 struct block_device *bdev = rdev->bdev; 1680 rdev->bdev = NULL; 1681 if (!bdev) 1682 MD_BUG(); 1683 bd_release(bdev); 1684 blkdev_put(bdev, FMODE_READ|FMODE_WRITE); 1685 } 1686 1687 void md_autodetect_dev(dev_t dev); 1688 1689 static void export_rdev(mdk_rdev_t * rdev) 1690 { 1691 char b[BDEVNAME_SIZE]; 1692 printk(KERN_INFO "md: export_rdev(%s)\n", 1693 bdevname(rdev->bdev,b)); 1694 if (rdev->mddev) 1695 MD_BUG(); 1696 free_disk_sb(rdev); 1697 #ifndef MODULE 1698 if (test_bit(AutoDetected, &rdev->flags)) 1699 md_autodetect_dev(rdev->bdev->bd_dev); 1700 #endif 1701 unlock_rdev(rdev); 1702 kobject_put(&rdev->kobj); 1703 } 1704 1705 static void kick_rdev_from_array(mdk_rdev_t * rdev) 1706 { 1707 unbind_rdev_from_array(rdev); 1708 export_rdev(rdev); 1709 } 1710 1711 static void export_array(mddev_t *mddev) 1712 { 1713 mdk_rdev_t *rdev, *tmp; 1714 1715 rdev_for_each(rdev, tmp, mddev) { 1716 if (!rdev->mddev) { 1717 MD_BUG(); 1718 continue; 1719 } 1720 kick_rdev_from_array(rdev); 1721 } 1722 if (!list_empty(&mddev->disks)) 1723 MD_BUG(); 1724 mddev->raid_disks = 0; 1725 mddev->major_version = 0; 1726 } 1727 1728 static void print_desc(mdp_disk_t *desc) 1729 { 1730 printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number, 1731 desc->major,desc->minor,desc->raid_disk,desc->state); 1732 } 1733 1734 static void print_sb_90(mdp_super_t *sb) 1735 { 1736 int i; 1737 1738 printk(KERN_INFO 1739 "md: SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n", 1740 sb->major_version, sb->minor_version, sb->patch_version, 1741 sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3, 1742 sb->ctime); 1743 printk(KERN_INFO "md: L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n", 1744 sb->level, sb->size, sb->nr_disks, sb->raid_disks, 1745 sb->md_minor, sb->layout, sb->chunk_size); 1746 printk(KERN_INFO "md: UT:%08x ST:%d AD:%d WD:%d" 1747 " FD:%d SD:%d CSUM:%08x E:%08lx\n", 1748 sb->utime, sb->state, sb->active_disks, sb->working_disks, 1749 sb->failed_disks, sb->spare_disks, 1750 sb->sb_csum, (unsigned long)sb->events_lo); 1751 1752 printk(KERN_INFO); 1753 for (i = 0; i < MD_SB_DISKS; i++) { 1754 mdp_disk_t *desc; 1755 1756 desc = sb->disks + i; 1757 if (desc->number || desc->major || desc->minor || 1758 desc->raid_disk || (desc->state && (desc->state != 4))) { 1759 printk(" D %2d: ", i); 1760 print_desc(desc); 1761 } 1762 } 1763 printk(KERN_INFO "md: THIS: "); 1764 print_desc(&sb->this_disk); 1765 } 1766 1767 static void print_sb_1(struct mdp_superblock_1 *sb) 1768 { 1769 __u8 *uuid; 1770 1771 uuid = sb->set_uuid; 1772 printk(KERN_INFO "md: SB: (V:%u) (F:0x%08x) Array-ID:<%02x%02x%02x%02x" 1773 ":%02x%02x:%02x%02x:%02x%02x:%02x%02x%02x%02x%02x%02x>\n" 1774 KERN_INFO "md: Name: \"%s\" CT:%llu\n", 1775 le32_to_cpu(sb->major_version), 1776 le32_to_cpu(sb->feature_map), 1777 uuid[0], uuid[1], uuid[2], uuid[3], 1778 uuid[4], uuid[5], uuid[6], uuid[7], 1779 uuid[8], uuid[9], uuid[10], uuid[11], 1780 uuid[12], uuid[13], uuid[14], uuid[15], 1781 sb->set_name, 1782 (unsigned long long)le64_to_cpu(sb->ctime) 1783 & MD_SUPERBLOCK_1_TIME_SEC_MASK); 1784 1785 uuid = sb->device_uuid; 1786 printk(KERN_INFO "md: L%u SZ%llu RD:%u LO:%u CS:%u DO:%llu DS:%llu SO:%llu" 1787 " RO:%llu\n" 1788 KERN_INFO "md: Dev:%08x UUID: %02x%02x%02x%02x:%02x%02x:%02x%02x:%02x%02x" 1789 ":%02x%02x%02x%02x%02x%02x\n" 1790 KERN_INFO "md: (F:0x%08x) UT:%llu Events:%llu ResyncOffset:%llu CSUM:0x%08x\n" 1791 KERN_INFO "md: (MaxDev:%u) \n", 1792 le32_to_cpu(sb->level), 1793 (unsigned long long)le64_to_cpu(sb->size), 1794 le32_to_cpu(sb->raid_disks), 1795 le32_to_cpu(sb->layout), 1796 le32_to_cpu(sb->chunksize), 1797 (unsigned long long)le64_to_cpu(sb->data_offset), 1798 (unsigned long long)le64_to_cpu(sb->data_size), 1799 (unsigned long long)le64_to_cpu(sb->super_offset), 1800 (unsigned long long)le64_to_cpu(sb->recovery_offset), 1801 le32_to_cpu(sb->dev_number), 1802 uuid[0], uuid[1], uuid[2], uuid[3], 1803 uuid[4], uuid[5], uuid[6], uuid[7], 1804 uuid[8], uuid[9], uuid[10], uuid[11], 1805 uuid[12], uuid[13], uuid[14], uuid[15], 1806 sb->devflags, 1807 (unsigned long long)le64_to_cpu(sb->utime) & MD_SUPERBLOCK_1_TIME_SEC_MASK, 1808 (unsigned long long)le64_to_cpu(sb->events), 1809 (unsigned long long)le64_to_cpu(sb->resync_offset), 1810 le32_to_cpu(sb->sb_csum), 1811 le32_to_cpu(sb->max_dev) 1812 ); 1813 } 1814 1815 static void print_rdev(mdk_rdev_t *rdev, int major_version) 1816 { 1817 char b[BDEVNAME_SIZE]; 1818 printk(KERN_INFO "md: rdev %s, Sect:%08llu F:%d S:%d DN:%u\n", 1819 bdevname(rdev->bdev, b), (unsigned long long)rdev->sectors, 1820 test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags), 1821 rdev->desc_nr); 1822 if (rdev->sb_loaded) { 1823 printk(KERN_INFO "md: rdev superblock (MJ:%d):\n", major_version); 1824 switch (major_version) { 1825 case 0: 1826 print_sb_90((mdp_super_t*)page_address(rdev->sb_page)); 1827 break; 1828 case 1: 1829 print_sb_1((struct mdp_superblock_1 *)page_address(rdev->sb_page)); 1830 break; 1831 } 1832 } else 1833 printk(KERN_INFO "md: no rdev superblock!\n"); 1834 } 1835 1836 static void md_print_devices(void) 1837 { 1838 struct list_head *tmp; 1839 mdk_rdev_t *rdev; 1840 mddev_t *mddev; 1841 char b[BDEVNAME_SIZE]; 1842 1843 printk("\n"); 1844 printk("md: **********************************\n"); 1845 printk("md: * <COMPLETE RAID STATE PRINTOUT> *\n"); 1846 printk("md: **********************************\n"); 1847 for_each_mddev(mddev, tmp) { 1848 1849 if (mddev->bitmap) 1850 bitmap_print_sb(mddev->bitmap); 1851 else 1852 printk("%s: ", mdname(mddev)); 1853 list_for_each_entry(rdev, &mddev->disks, same_set) 1854 printk("<%s>", bdevname(rdev->bdev,b)); 1855 printk("\n"); 1856 1857 list_for_each_entry(rdev, &mddev->disks, same_set) 1858 print_rdev(rdev, mddev->major_version); 1859 } 1860 printk("md: **********************************\n"); 1861 printk("\n"); 1862 } 1863 1864 1865 static void sync_sbs(mddev_t * mddev, int nospares) 1866 { 1867 /* Update each superblock (in-memory image), but 1868 * if we are allowed to, skip spares which already 1869 * have the right event counter, or have one earlier 1870 * (which would mean they aren't being marked as dirty 1871 * with the rest of the array) 1872 */ 1873 mdk_rdev_t *rdev; 1874 1875 list_for_each_entry(rdev, &mddev->disks, same_set) { 1876 if (rdev->sb_events == mddev->events || 1877 (nospares && 1878 rdev->raid_disk < 0 && 1879 (rdev->sb_events&1)==0 && 1880 rdev->sb_events+1 == mddev->events)) { 1881 /* Don't update this superblock */ 1882 rdev->sb_loaded = 2; 1883 } else { 1884 super_types[mddev->major_version]. 1885 sync_super(mddev, rdev); 1886 rdev->sb_loaded = 1; 1887 } 1888 } 1889 } 1890 1891 static void md_update_sb(mddev_t * mddev, int force_change) 1892 { 1893 mdk_rdev_t *rdev; 1894 int sync_req; 1895 int nospares = 0; 1896 1897 if (mddev->external) 1898 return; 1899 repeat: 1900 spin_lock_irq(&mddev->write_lock); 1901 1902 set_bit(MD_CHANGE_PENDING, &mddev->flags); 1903 if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags)) 1904 force_change = 1; 1905 if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags)) 1906 /* just a clean<-> dirty transition, possibly leave spares alone, 1907 * though if events isn't the right even/odd, we will have to do 1908 * spares after all 1909 */ 1910 nospares = 1; 1911 if (force_change) 1912 nospares = 0; 1913 if (mddev->degraded) 1914 /* If the array is degraded, then skipping spares is both 1915 * dangerous and fairly pointless. 1916 * Dangerous because a device that was removed from the array 1917 * might have a event_count that still looks up-to-date, 1918 * so it can be re-added without a resync. 1919 * Pointless because if there are any spares to skip, 1920 * then a recovery will happen and soon that array won't 1921 * be degraded any more and the spare can go back to sleep then. 1922 */ 1923 nospares = 0; 1924 1925 sync_req = mddev->in_sync; 1926 mddev->utime = get_seconds(); 1927 1928 /* If this is just a dirty<->clean transition, and the array is clean 1929 * and 'events' is odd, we can roll back to the previous clean state */ 1930 if (nospares 1931 && (mddev->in_sync && mddev->recovery_cp == MaxSector) 1932 && (mddev->events & 1) 1933 && mddev->events != 1) 1934 mddev->events--; 1935 else { 1936 /* otherwise we have to go forward and ... */ 1937 mddev->events ++; 1938 if (!mddev->in_sync || mddev->recovery_cp != MaxSector) { /* not clean */ 1939 /* .. if the array isn't clean, insist on an odd 'events' */ 1940 if ((mddev->events&1)==0) { 1941 mddev->events++; 1942 nospares = 0; 1943 } 1944 } else { 1945 /* otherwise insist on an even 'events' (for clean states) */ 1946 if ((mddev->events&1)) { 1947 mddev->events++; 1948 nospares = 0; 1949 } 1950 } 1951 } 1952 1953 if (!mddev->events) { 1954 /* 1955 * oops, this 64-bit counter should never wrap. 1956 * Either we are in around ~1 trillion A.C., assuming 1957 * 1 reboot per second, or we have a bug: 1958 */ 1959 MD_BUG(); 1960 mddev->events --; 1961 } 1962 1963 /* 1964 * do not write anything to disk if using 1965 * nonpersistent superblocks 1966 */ 1967 if (!mddev->persistent) { 1968 if (!mddev->external) 1969 clear_bit(MD_CHANGE_PENDING, &mddev->flags); 1970 1971 spin_unlock_irq(&mddev->write_lock); 1972 wake_up(&mddev->sb_wait); 1973 return; 1974 } 1975 sync_sbs(mddev, nospares); 1976 spin_unlock_irq(&mddev->write_lock); 1977 1978 dprintk(KERN_INFO 1979 "md: updating %s RAID superblock on device (in sync %d)\n", 1980 mdname(mddev),mddev->in_sync); 1981 1982 bitmap_update_sb(mddev->bitmap); 1983 list_for_each_entry(rdev, &mddev->disks, same_set) { 1984 char b[BDEVNAME_SIZE]; 1985 dprintk(KERN_INFO "md: "); 1986 if (rdev->sb_loaded != 1) 1987 continue; /* no noise on spare devices */ 1988 if (test_bit(Faulty, &rdev->flags)) 1989 dprintk("(skipping faulty "); 1990 1991 dprintk("%s ", bdevname(rdev->bdev,b)); 1992 if (!test_bit(Faulty, &rdev->flags)) { 1993 md_super_write(mddev,rdev, 1994 rdev->sb_start, rdev->sb_size, 1995 rdev->sb_page); 1996 dprintk(KERN_INFO "(write) %s's sb offset: %llu\n", 1997 bdevname(rdev->bdev,b), 1998 (unsigned long long)rdev->sb_start); 1999 rdev->sb_events = mddev->events; 2000 2001 } else 2002 dprintk(")\n"); 2003 if (mddev->level == LEVEL_MULTIPATH) 2004 /* only need to write one superblock... */ 2005 break; 2006 } 2007 md_super_wait(mddev); 2008 /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */ 2009 2010 spin_lock_irq(&mddev->write_lock); 2011 if (mddev->in_sync != sync_req || 2012 test_bit(MD_CHANGE_DEVS, &mddev->flags)) { 2013 /* have to write it out again */ 2014 spin_unlock_irq(&mddev->write_lock); 2015 goto repeat; 2016 } 2017 clear_bit(MD_CHANGE_PENDING, &mddev->flags); 2018 spin_unlock_irq(&mddev->write_lock); 2019 wake_up(&mddev->sb_wait); 2020 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 2021 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 2022 2023 } 2024 2025 /* words written to sysfs files may, or may not, be \n terminated. 2026 * We want to accept with case. For this we use cmd_match. 2027 */ 2028 static int cmd_match(const char *cmd, const char *str) 2029 { 2030 /* See if cmd, written into a sysfs file, matches 2031 * str. They must either be the same, or cmd can 2032 * have a trailing newline 2033 */ 2034 while (*cmd && *str && *cmd == *str) { 2035 cmd++; 2036 str++; 2037 } 2038 if (*cmd == '\n') 2039 cmd++; 2040 if (*str || *cmd) 2041 return 0; 2042 return 1; 2043 } 2044 2045 struct rdev_sysfs_entry { 2046 struct attribute attr; 2047 ssize_t (*show)(mdk_rdev_t *, char *); 2048 ssize_t (*store)(mdk_rdev_t *, const char *, size_t); 2049 }; 2050 2051 static ssize_t 2052 state_show(mdk_rdev_t *rdev, char *page) 2053 { 2054 char *sep = ""; 2055 size_t len = 0; 2056 2057 if (test_bit(Faulty, &rdev->flags)) { 2058 len+= sprintf(page+len, "%sfaulty",sep); 2059 sep = ","; 2060 } 2061 if (test_bit(In_sync, &rdev->flags)) { 2062 len += sprintf(page+len, "%sin_sync",sep); 2063 sep = ","; 2064 } 2065 if (test_bit(WriteMostly, &rdev->flags)) { 2066 len += sprintf(page+len, "%swrite_mostly",sep); 2067 sep = ","; 2068 } 2069 if (test_bit(Blocked, &rdev->flags)) { 2070 len += sprintf(page+len, "%sblocked", sep); 2071 sep = ","; 2072 } 2073 if (!test_bit(Faulty, &rdev->flags) && 2074 !test_bit(In_sync, &rdev->flags)) { 2075 len += sprintf(page+len, "%sspare", sep); 2076 sep = ","; 2077 } 2078 return len+sprintf(page+len, "\n"); 2079 } 2080 2081 static ssize_t 2082 state_store(mdk_rdev_t *rdev, const char *buf, size_t len) 2083 { 2084 /* can write 2085 * faulty - simulates and error 2086 * remove - disconnects the device 2087 * writemostly - sets write_mostly 2088 * -writemostly - clears write_mostly 2089 * blocked - sets the Blocked flag 2090 * -blocked - clears the Blocked flag 2091 * insync - sets Insync providing device isn't active 2092 */ 2093 int err = -EINVAL; 2094 if (cmd_match(buf, "faulty") && rdev->mddev->pers) { 2095 md_error(rdev->mddev, rdev); 2096 err = 0; 2097 } else if (cmd_match(buf, "remove")) { 2098 if (rdev->raid_disk >= 0) 2099 err = -EBUSY; 2100 else { 2101 mddev_t *mddev = rdev->mddev; 2102 kick_rdev_from_array(rdev); 2103 if (mddev->pers) 2104 md_update_sb(mddev, 1); 2105 md_new_event(mddev); 2106 err = 0; 2107 } 2108 } else if (cmd_match(buf, "writemostly")) { 2109 set_bit(WriteMostly, &rdev->flags); 2110 err = 0; 2111 } else if (cmd_match(buf, "-writemostly")) { 2112 clear_bit(WriteMostly, &rdev->flags); 2113 err = 0; 2114 } else if (cmd_match(buf, "blocked")) { 2115 set_bit(Blocked, &rdev->flags); 2116 err = 0; 2117 } else if (cmd_match(buf, "-blocked")) { 2118 clear_bit(Blocked, &rdev->flags); 2119 wake_up(&rdev->blocked_wait); 2120 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); 2121 md_wakeup_thread(rdev->mddev->thread); 2122 2123 err = 0; 2124 } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) { 2125 set_bit(In_sync, &rdev->flags); 2126 err = 0; 2127 } 2128 if (!err && rdev->sysfs_state) 2129 sysfs_notify_dirent(rdev->sysfs_state); 2130 return err ? err : len; 2131 } 2132 static struct rdev_sysfs_entry rdev_state = 2133 __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store); 2134 2135 static ssize_t 2136 errors_show(mdk_rdev_t *rdev, char *page) 2137 { 2138 return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors)); 2139 } 2140 2141 static ssize_t 2142 errors_store(mdk_rdev_t *rdev, const char *buf, size_t len) 2143 { 2144 char *e; 2145 unsigned long n = simple_strtoul(buf, &e, 10); 2146 if (*buf && (*e == 0 || *e == '\n')) { 2147 atomic_set(&rdev->corrected_errors, n); 2148 return len; 2149 } 2150 return -EINVAL; 2151 } 2152 static struct rdev_sysfs_entry rdev_errors = 2153 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store); 2154 2155 static ssize_t 2156 slot_show(mdk_rdev_t *rdev, char *page) 2157 { 2158 if (rdev->raid_disk < 0) 2159 return sprintf(page, "none\n"); 2160 else 2161 return sprintf(page, "%d\n", rdev->raid_disk); 2162 } 2163 2164 static ssize_t 2165 slot_store(mdk_rdev_t *rdev, const char *buf, size_t len) 2166 { 2167 char *e; 2168 int err; 2169 char nm[20]; 2170 int slot = simple_strtoul(buf, &e, 10); 2171 if (strncmp(buf, "none", 4)==0) 2172 slot = -1; 2173 else if (e==buf || (*e && *e!= '\n')) 2174 return -EINVAL; 2175 if (rdev->mddev->pers && slot == -1) { 2176 /* Setting 'slot' on an active array requires also 2177 * updating the 'rd%d' link, and communicating 2178 * with the personality with ->hot_*_disk. 2179 * For now we only support removing 2180 * failed/spare devices. This normally happens automatically, 2181 * but not when the metadata is externally managed. 2182 */ 2183 if (rdev->raid_disk == -1) 2184 return -EEXIST; 2185 /* personality does all needed checks */ 2186 if (rdev->mddev->pers->hot_add_disk == NULL) 2187 return -EINVAL; 2188 err = rdev->mddev->pers-> 2189 hot_remove_disk(rdev->mddev, rdev->raid_disk); 2190 if (err) 2191 return err; 2192 sprintf(nm, "rd%d", rdev->raid_disk); 2193 sysfs_remove_link(&rdev->mddev->kobj, nm); 2194 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); 2195 md_wakeup_thread(rdev->mddev->thread); 2196 } else if (rdev->mddev->pers) { 2197 mdk_rdev_t *rdev2; 2198 /* Activating a spare .. or possibly reactivating 2199 * if we ever get bitmaps working here. 2200 */ 2201 2202 if (rdev->raid_disk != -1) 2203 return -EBUSY; 2204 2205 if (rdev->mddev->pers->hot_add_disk == NULL) 2206 return -EINVAL; 2207 2208 list_for_each_entry(rdev2, &rdev->mddev->disks, same_set) 2209 if (rdev2->raid_disk == slot) 2210 return -EEXIST; 2211 2212 rdev->raid_disk = slot; 2213 if (test_bit(In_sync, &rdev->flags)) 2214 rdev->saved_raid_disk = slot; 2215 else 2216 rdev->saved_raid_disk = -1; 2217 err = rdev->mddev->pers-> 2218 hot_add_disk(rdev->mddev, rdev); 2219 if (err) { 2220 rdev->raid_disk = -1; 2221 return err; 2222 } else 2223 sysfs_notify_dirent(rdev->sysfs_state); 2224 sprintf(nm, "rd%d", rdev->raid_disk); 2225 if (sysfs_create_link(&rdev->mddev->kobj, &rdev->kobj, nm)) 2226 printk(KERN_WARNING 2227 "md: cannot register " 2228 "%s for %s\n", 2229 nm, mdname(rdev->mddev)); 2230 2231 /* don't wakeup anyone, leave that to userspace. */ 2232 } else { 2233 if (slot >= rdev->mddev->raid_disks) 2234 return -ENOSPC; 2235 rdev->raid_disk = slot; 2236 /* assume it is working */ 2237 clear_bit(Faulty, &rdev->flags); 2238 clear_bit(WriteMostly, &rdev->flags); 2239 set_bit(In_sync, &rdev->flags); 2240 sysfs_notify_dirent(rdev->sysfs_state); 2241 } 2242 return len; 2243 } 2244 2245 2246 static struct rdev_sysfs_entry rdev_slot = 2247 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store); 2248 2249 static ssize_t 2250 offset_show(mdk_rdev_t *rdev, char *page) 2251 { 2252 return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset); 2253 } 2254 2255 static ssize_t 2256 offset_store(mdk_rdev_t *rdev, const char *buf, size_t len) 2257 { 2258 char *e; 2259 unsigned long long offset = simple_strtoull(buf, &e, 10); 2260 if (e==buf || (*e && *e != '\n')) 2261 return -EINVAL; 2262 if (rdev->mddev->pers && rdev->raid_disk >= 0) 2263 return -EBUSY; 2264 if (rdev->sectors && rdev->mddev->external) 2265 /* Must set offset before size, so overlap checks 2266 * can be sane */ 2267 return -EBUSY; 2268 rdev->data_offset = offset; 2269 return len; 2270 } 2271 2272 static struct rdev_sysfs_entry rdev_offset = 2273 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store); 2274 2275 static ssize_t 2276 rdev_size_show(mdk_rdev_t *rdev, char *page) 2277 { 2278 return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2); 2279 } 2280 2281 static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2) 2282 { 2283 /* check if two start/length pairs overlap */ 2284 if (s1+l1 <= s2) 2285 return 0; 2286 if (s2+l2 <= s1) 2287 return 0; 2288 return 1; 2289 } 2290 2291 static int strict_blocks_to_sectors(const char *buf, sector_t *sectors) 2292 { 2293 unsigned long long blocks; 2294 sector_t new; 2295 2296 if (strict_strtoull(buf, 10, &blocks) < 0) 2297 return -EINVAL; 2298 2299 if (blocks & 1ULL << (8 * sizeof(blocks) - 1)) 2300 return -EINVAL; /* sector conversion overflow */ 2301 2302 new = blocks * 2; 2303 if (new != blocks * 2) 2304 return -EINVAL; /* unsigned long long to sector_t overflow */ 2305 2306 *sectors = new; 2307 return 0; 2308 } 2309 2310 static ssize_t 2311 rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len) 2312 { 2313 mddev_t *my_mddev = rdev->mddev; 2314 sector_t oldsectors = rdev->sectors; 2315 sector_t sectors; 2316 2317 if (strict_blocks_to_sectors(buf, §ors) < 0) 2318 return -EINVAL; 2319 if (my_mddev->pers && rdev->raid_disk >= 0) { 2320 if (my_mddev->persistent) { 2321 sectors = super_types[my_mddev->major_version]. 2322 rdev_size_change(rdev, sectors); 2323 if (!sectors) 2324 return -EBUSY; 2325 } else if (!sectors) 2326 sectors = (rdev->bdev->bd_inode->i_size >> 9) - 2327 rdev->data_offset; 2328 } 2329 if (sectors < my_mddev->dev_sectors) 2330 return -EINVAL; /* component must fit device */ 2331 2332 rdev->sectors = sectors; 2333 if (sectors > oldsectors && my_mddev->external) { 2334 /* need to check that all other rdevs with the same ->bdev 2335 * do not overlap. We need to unlock the mddev to avoid 2336 * a deadlock. We have already changed rdev->sectors, and if 2337 * we have to change it back, we will have the lock again. 2338 */ 2339 mddev_t *mddev; 2340 int overlap = 0; 2341 struct list_head *tmp; 2342 2343 mddev_unlock(my_mddev); 2344 for_each_mddev(mddev, tmp) { 2345 mdk_rdev_t *rdev2; 2346 2347 mddev_lock(mddev); 2348 list_for_each_entry(rdev2, &mddev->disks, same_set) 2349 if (test_bit(AllReserved, &rdev2->flags) || 2350 (rdev->bdev == rdev2->bdev && 2351 rdev != rdev2 && 2352 overlaps(rdev->data_offset, rdev->sectors, 2353 rdev2->data_offset, 2354 rdev2->sectors))) { 2355 overlap = 1; 2356 break; 2357 } 2358 mddev_unlock(mddev); 2359 if (overlap) { 2360 mddev_put(mddev); 2361 break; 2362 } 2363 } 2364 mddev_lock(my_mddev); 2365 if (overlap) { 2366 /* Someone else could have slipped in a size 2367 * change here, but doing so is just silly. 2368 * We put oldsectors back because we *know* it is 2369 * safe, and trust userspace not to race with 2370 * itself 2371 */ 2372 rdev->sectors = oldsectors; 2373 return -EBUSY; 2374 } 2375 } 2376 return len; 2377 } 2378 2379 static struct rdev_sysfs_entry rdev_size = 2380 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store); 2381 2382 static struct attribute *rdev_default_attrs[] = { 2383 &rdev_state.attr, 2384 &rdev_errors.attr, 2385 &rdev_slot.attr, 2386 &rdev_offset.attr, 2387 &rdev_size.attr, 2388 NULL, 2389 }; 2390 static ssize_t 2391 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 2392 { 2393 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); 2394 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj); 2395 mddev_t *mddev = rdev->mddev; 2396 ssize_t rv; 2397 2398 if (!entry->show) 2399 return -EIO; 2400 2401 rv = mddev ? mddev_lock(mddev) : -EBUSY; 2402 if (!rv) { 2403 if (rdev->mddev == NULL) 2404 rv = -EBUSY; 2405 else 2406 rv = entry->show(rdev, page); 2407 mddev_unlock(mddev); 2408 } 2409 return rv; 2410 } 2411 2412 static ssize_t 2413 rdev_attr_store(struct kobject *kobj, struct attribute *attr, 2414 const char *page, size_t length) 2415 { 2416 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); 2417 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj); 2418 ssize_t rv; 2419 mddev_t *mddev = rdev->mddev; 2420 2421 if (!entry->store) 2422 return -EIO; 2423 if (!capable(CAP_SYS_ADMIN)) 2424 return -EACCES; 2425 rv = mddev ? mddev_lock(mddev): -EBUSY; 2426 if (!rv) { 2427 if (rdev->mddev == NULL) 2428 rv = -EBUSY; 2429 else 2430 rv = entry->store(rdev, page, length); 2431 mddev_unlock(mddev); 2432 } 2433 return rv; 2434 } 2435 2436 static void rdev_free(struct kobject *ko) 2437 { 2438 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj); 2439 kfree(rdev); 2440 } 2441 static struct sysfs_ops rdev_sysfs_ops = { 2442 .show = rdev_attr_show, 2443 .store = rdev_attr_store, 2444 }; 2445 static struct kobj_type rdev_ktype = { 2446 .release = rdev_free, 2447 .sysfs_ops = &rdev_sysfs_ops, 2448 .default_attrs = rdev_default_attrs, 2449 }; 2450 2451 /* 2452 * Import a device. If 'super_format' >= 0, then sanity check the superblock 2453 * 2454 * mark the device faulty if: 2455 * 2456 * - the device is nonexistent (zero size) 2457 * - the device has no valid superblock 2458 * 2459 * a faulty rdev _never_ has rdev->sb set. 2460 */ 2461 static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_minor) 2462 { 2463 char b[BDEVNAME_SIZE]; 2464 int err; 2465 mdk_rdev_t *rdev; 2466 sector_t size; 2467 2468 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); 2469 if (!rdev) { 2470 printk(KERN_ERR "md: could not alloc mem for new device!\n"); 2471 return ERR_PTR(-ENOMEM); 2472 } 2473 2474 if ((err = alloc_disk_sb(rdev))) 2475 goto abort_free; 2476 2477 err = lock_rdev(rdev, newdev, super_format == -2); 2478 if (err) 2479 goto abort_free; 2480 2481 kobject_init(&rdev->kobj, &rdev_ktype); 2482 2483 rdev->desc_nr = -1; 2484 rdev->saved_raid_disk = -1; 2485 rdev->raid_disk = -1; 2486 rdev->flags = 0; 2487 rdev->data_offset = 0; 2488 rdev->sb_events = 0; 2489 atomic_set(&rdev->nr_pending, 0); 2490 atomic_set(&rdev->read_errors, 0); 2491 atomic_set(&rdev->corrected_errors, 0); 2492 2493 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; 2494 if (!size) { 2495 printk(KERN_WARNING 2496 "md: %s has zero or unknown size, marking faulty!\n", 2497 bdevname(rdev->bdev,b)); 2498 err = -EINVAL; 2499 goto abort_free; 2500 } 2501 2502 if (super_format >= 0) { 2503 err = super_types[super_format]. 2504 load_super(rdev, NULL, super_minor); 2505 if (err == -EINVAL) { 2506 printk(KERN_WARNING 2507 "md: %s does not have a valid v%d.%d " 2508 "superblock, not importing!\n", 2509 bdevname(rdev->bdev,b), 2510 super_format, super_minor); 2511 goto abort_free; 2512 } 2513 if (err < 0) { 2514 printk(KERN_WARNING 2515 "md: could not read %s's sb, not importing!\n", 2516 bdevname(rdev->bdev,b)); 2517 goto abort_free; 2518 } 2519 } 2520 2521 INIT_LIST_HEAD(&rdev->same_set); 2522 init_waitqueue_head(&rdev->blocked_wait); 2523 2524 return rdev; 2525 2526 abort_free: 2527 if (rdev->sb_page) { 2528 if (rdev->bdev) 2529 unlock_rdev(rdev); 2530 free_disk_sb(rdev); 2531 } 2532 kfree(rdev); 2533 return ERR_PTR(err); 2534 } 2535 2536 /* 2537 * Check a full RAID array for plausibility 2538 */ 2539 2540 2541 static void analyze_sbs(mddev_t * mddev) 2542 { 2543 int i; 2544 mdk_rdev_t *rdev, *freshest, *tmp; 2545 char b[BDEVNAME_SIZE]; 2546 2547 freshest = NULL; 2548 rdev_for_each(rdev, tmp, mddev) 2549 switch (super_types[mddev->major_version]. 2550 load_super(rdev, freshest, mddev->minor_version)) { 2551 case 1: 2552 freshest = rdev; 2553 break; 2554 case 0: 2555 break; 2556 default: 2557 printk( KERN_ERR \ 2558 "md: fatal superblock inconsistency in %s" 2559 " -- removing from array\n", 2560 bdevname(rdev->bdev,b)); 2561 kick_rdev_from_array(rdev); 2562 } 2563 2564 2565 super_types[mddev->major_version]. 2566 validate_super(mddev, freshest); 2567 2568 i = 0; 2569 rdev_for_each(rdev, tmp, mddev) { 2570 if (rdev->desc_nr >= mddev->max_disks || 2571 i > mddev->max_disks) { 2572 printk(KERN_WARNING 2573 "md: %s: %s: only %d devices permitted\n", 2574 mdname(mddev), bdevname(rdev->bdev, b), 2575 mddev->max_disks); 2576 kick_rdev_from_array(rdev); 2577 continue; 2578 } 2579 if (rdev != freshest) 2580 if (super_types[mddev->major_version]. 2581 validate_super(mddev, rdev)) { 2582 printk(KERN_WARNING "md: kicking non-fresh %s" 2583 " from array!\n", 2584 bdevname(rdev->bdev,b)); 2585 kick_rdev_from_array(rdev); 2586 continue; 2587 } 2588 if (mddev->level == LEVEL_MULTIPATH) { 2589 rdev->desc_nr = i++; 2590 rdev->raid_disk = rdev->desc_nr; 2591 set_bit(In_sync, &rdev->flags); 2592 } else if (rdev->raid_disk >= mddev->raid_disks) { 2593 rdev->raid_disk = -1; 2594 clear_bit(In_sync, &rdev->flags); 2595 } 2596 } 2597 2598 2599 2600 if (mddev->recovery_cp != MaxSector && 2601 mddev->level >= 1) 2602 printk(KERN_ERR "md: %s: raid array is not clean" 2603 " -- starting background reconstruction\n", 2604 mdname(mddev)); 2605 2606 } 2607 2608 static void md_safemode_timeout(unsigned long data); 2609 2610 static ssize_t 2611 safe_delay_show(mddev_t *mddev, char *page) 2612 { 2613 int msec = (mddev->safemode_delay*1000)/HZ; 2614 return sprintf(page, "%d.%03d\n", msec/1000, msec%1000); 2615 } 2616 static ssize_t 2617 safe_delay_store(mddev_t *mddev, const char *cbuf, size_t len) 2618 { 2619 int scale=1; 2620 int dot=0; 2621 int i; 2622 unsigned long msec; 2623 char buf[30]; 2624 2625 /* remove a period, and count digits after it */ 2626 if (len >= sizeof(buf)) 2627 return -EINVAL; 2628 strlcpy(buf, cbuf, sizeof(buf)); 2629 for (i=0; i<len; i++) { 2630 if (dot) { 2631 if (isdigit(buf[i])) { 2632 buf[i-1] = buf[i]; 2633 scale *= 10; 2634 } 2635 buf[i] = 0; 2636 } else if (buf[i] == '.') { 2637 dot=1; 2638 buf[i] = 0; 2639 } 2640 } 2641 if (strict_strtoul(buf, 10, &msec) < 0) 2642 return -EINVAL; 2643 msec = (msec * 1000) / scale; 2644 if (msec == 0) 2645 mddev->safemode_delay = 0; 2646 else { 2647 unsigned long old_delay = mddev->safemode_delay; 2648 mddev->safemode_delay = (msec*HZ)/1000; 2649 if (mddev->safemode_delay == 0) 2650 mddev->safemode_delay = 1; 2651 if (mddev->safemode_delay < old_delay) 2652 md_safemode_timeout((unsigned long)mddev); 2653 } 2654 return len; 2655 } 2656 static struct md_sysfs_entry md_safe_delay = 2657 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store); 2658 2659 static ssize_t 2660 level_show(mddev_t *mddev, char *page) 2661 { 2662 struct mdk_personality *p = mddev->pers; 2663 if (p) 2664 return sprintf(page, "%s\n", p->name); 2665 else if (mddev->clevel[0]) 2666 return sprintf(page, "%s\n", mddev->clevel); 2667 else if (mddev->level != LEVEL_NONE) 2668 return sprintf(page, "%d\n", mddev->level); 2669 else 2670 return 0; 2671 } 2672 2673 static ssize_t 2674 level_store(mddev_t *mddev, const char *buf, size_t len) 2675 { 2676 char level[16]; 2677 ssize_t rv = len; 2678 struct mdk_personality *pers; 2679 void *priv; 2680 2681 if (mddev->pers == NULL) { 2682 if (len == 0) 2683 return 0; 2684 if (len >= sizeof(mddev->clevel)) 2685 return -ENOSPC; 2686 strncpy(mddev->clevel, buf, len); 2687 if (mddev->clevel[len-1] == '\n') 2688 len--; 2689 mddev->clevel[len] = 0; 2690 mddev->level = LEVEL_NONE; 2691 return rv; 2692 } 2693 2694 /* request to change the personality. Need to ensure: 2695 * - array is not engaged in resync/recovery/reshape 2696 * - old personality can be suspended 2697 * - new personality will access other array. 2698 */ 2699 2700 if (mddev->sync_thread || mddev->reshape_position != MaxSector) 2701 return -EBUSY; 2702 2703 if (!mddev->pers->quiesce) { 2704 printk(KERN_WARNING "md: %s: %s does not support online personality change\n", 2705 mdname(mddev), mddev->pers->name); 2706 return -EINVAL; 2707 } 2708 2709 /* Now find the new personality */ 2710 if (len == 0 || len >= sizeof(level)) 2711 return -EINVAL; 2712 strncpy(level, buf, len); 2713 if (level[len-1] == '\n') 2714 len--; 2715 level[len] = 0; 2716 2717 request_module("md-%s", level); 2718 spin_lock(&pers_lock); 2719 pers = find_pers(LEVEL_NONE, level); 2720 if (!pers || !try_module_get(pers->owner)) { 2721 spin_unlock(&pers_lock); 2722 printk(KERN_WARNING "md: personality %s not loaded\n", level); 2723 return -EINVAL; 2724 } 2725 spin_unlock(&pers_lock); 2726 2727 if (pers == mddev->pers) { 2728 /* Nothing to do! */ 2729 module_put(pers->owner); 2730 return rv; 2731 } 2732 if (!pers->takeover) { 2733 module_put(pers->owner); 2734 printk(KERN_WARNING "md: %s: %s does not support personality takeover\n", 2735 mdname(mddev), level); 2736 return -EINVAL; 2737 } 2738 2739 /* ->takeover must set new_* and/or delta_disks 2740 * if it succeeds, and may set them when it fails. 2741 */ 2742 priv = pers->takeover(mddev); 2743 if (IS_ERR(priv)) { 2744 mddev->new_level = mddev->level; 2745 mddev->new_layout = mddev->layout; 2746 mddev->new_chunk = mddev->chunk_size; 2747 mddev->raid_disks -= mddev->delta_disks; 2748 mddev->delta_disks = 0; 2749 module_put(pers->owner); 2750 printk(KERN_WARNING "md: %s: %s would not accept array\n", 2751 mdname(mddev), level); 2752 return PTR_ERR(priv); 2753 } 2754 2755 /* Looks like we have a winner */ 2756 mddev_suspend(mddev); 2757 mddev->pers->stop(mddev); 2758 module_put(mddev->pers->owner); 2759 mddev->pers = pers; 2760 mddev->private = priv; 2761 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); 2762 mddev->level = mddev->new_level; 2763 mddev->layout = mddev->new_layout; 2764 mddev->chunk_size = mddev->new_chunk; 2765 mddev->delta_disks = 0; 2766 pers->run(mddev); 2767 mddev_resume(mddev); 2768 set_bit(MD_CHANGE_DEVS, &mddev->flags); 2769 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 2770 md_wakeup_thread(mddev->thread); 2771 return rv; 2772 } 2773 2774 static struct md_sysfs_entry md_level = 2775 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store); 2776 2777 2778 static ssize_t 2779 layout_show(mddev_t *mddev, char *page) 2780 { 2781 /* just a number, not meaningful for all levels */ 2782 if (mddev->reshape_position != MaxSector && 2783 mddev->layout != mddev->new_layout) 2784 return sprintf(page, "%d (%d)\n", 2785 mddev->new_layout, mddev->layout); 2786 return sprintf(page, "%d\n", mddev->layout); 2787 } 2788 2789 static ssize_t 2790 layout_store(mddev_t *mddev, const char *buf, size_t len) 2791 { 2792 char *e; 2793 unsigned long n = simple_strtoul(buf, &e, 10); 2794 2795 if (!*buf || (*e && *e != '\n')) 2796 return -EINVAL; 2797 2798 if (mddev->pers) { 2799 int err; 2800 if (mddev->pers->reconfig == NULL) 2801 return -EBUSY; 2802 err = mddev->pers->reconfig(mddev, n, -1); 2803 if (err) 2804 return err; 2805 } else { 2806 mddev->new_layout = n; 2807 if (mddev->reshape_position == MaxSector) 2808 mddev->layout = n; 2809 } 2810 return len; 2811 } 2812 static struct md_sysfs_entry md_layout = 2813 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store); 2814 2815 2816 static ssize_t 2817 raid_disks_show(mddev_t *mddev, char *page) 2818 { 2819 if (mddev->raid_disks == 0) 2820 return 0; 2821 if (mddev->reshape_position != MaxSector && 2822 mddev->delta_disks != 0) 2823 return sprintf(page, "%d (%d)\n", mddev->raid_disks, 2824 mddev->raid_disks - mddev->delta_disks); 2825 return sprintf(page, "%d\n", mddev->raid_disks); 2826 } 2827 2828 static int update_raid_disks(mddev_t *mddev, int raid_disks); 2829 2830 static ssize_t 2831 raid_disks_store(mddev_t *mddev, const char *buf, size_t len) 2832 { 2833 char *e; 2834 int rv = 0; 2835 unsigned long n = simple_strtoul(buf, &e, 10); 2836 2837 if (!*buf || (*e && *e != '\n')) 2838 return -EINVAL; 2839 2840 if (mddev->pers) 2841 rv = update_raid_disks(mddev, n); 2842 else if (mddev->reshape_position != MaxSector) { 2843 int olddisks = mddev->raid_disks - mddev->delta_disks; 2844 mddev->delta_disks = n - olddisks; 2845 mddev->raid_disks = n; 2846 } else 2847 mddev->raid_disks = n; 2848 return rv ? rv : len; 2849 } 2850 static struct md_sysfs_entry md_raid_disks = 2851 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store); 2852 2853 static ssize_t 2854 chunk_size_show(mddev_t *mddev, char *page) 2855 { 2856 if (mddev->reshape_position != MaxSector && 2857 mddev->chunk_size != mddev->new_chunk) 2858 return sprintf(page, "%d (%d)\n", mddev->new_chunk, 2859 mddev->chunk_size); 2860 return sprintf(page, "%d\n", mddev->chunk_size); 2861 } 2862 2863 static ssize_t 2864 chunk_size_store(mddev_t *mddev, const char *buf, size_t len) 2865 { 2866 char *e; 2867 unsigned long n = simple_strtoul(buf, &e, 10); 2868 2869 if (!*buf || (*e && *e != '\n')) 2870 return -EINVAL; 2871 2872 if (mddev->pers) { 2873 int err; 2874 if (mddev->pers->reconfig == NULL) 2875 return -EBUSY; 2876 err = mddev->pers->reconfig(mddev, -1, n); 2877 if (err) 2878 return err; 2879 } else { 2880 mddev->new_chunk = n; 2881 if (mddev->reshape_position == MaxSector) 2882 mddev->chunk_size = n; 2883 } 2884 return len; 2885 } 2886 static struct md_sysfs_entry md_chunk_size = 2887 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store); 2888 2889 static ssize_t 2890 resync_start_show(mddev_t *mddev, char *page) 2891 { 2892 if (mddev->recovery_cp == MaxSector) 2893 return sprintf(page, "none\n"); 2894 return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp); 2895 } 2896 2897 static ssize_t 2898 resync_start_store(mddev_t *mddev, const char *buf, size_t len) 2899 { 2900 char *e; 2901 unsigned long long n = simple_strtoull(buf, &e, 10); 2902 2903 if (mddev->pers) 2904 return -EBUSY; 2905 if (!*buf || (*e && *e != '\n')) 2906 return -EINVAL; 2907 2908 mddev->recovery_cp = n; 2909 return len; 2910 } 2911 static struct md_sysfs_entry md_resync_start = 2912 __ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store); 2913 2914 /* 2915 * The array state can be: 2916 * 2917 * clear 2918 * No devices, no size, no level 2919 * Equivalent to STOP_ARRAY ioctl 2920 * inactive 2921 * May have some settings, but array is not active 2922 * all IO results in error 2923 * When written, doesn't tear down array, but just stops it 2924 * suspended (not supported yet) 2925 * All IO requests will block. The array can be reconfigured. 2926 * Writing this, if accepted, will block until array is quiescent 2927 * readonly 2928 * no resync can happen. no superblocks get written. 2929 * write requests fail 2930 * read-auto 2931 * like readonly, but behaves like 'clean' on a write request. 2932 * 2933 * clean - no pending writes, but otherwise active. 2934 * When written to inactive array, starts without resync 2935 * If a write request arrives then 2936 * if metadata is known, mark 'dirty' and switch to 'active'. 2937 * if not known, block and switch to write-pending 2938 * If written to an active array that has pending writes, then fails. 2939 * active 2940 * fully active: IO and resync can be happening. 2941 * When written to inactive array, starts with resync 2942 * 2943 * write-pending 2944 * clean, but writes are blocked waiting for 'active' to be written. 2945 * 2946 * active-idle 2947 * like active, but no writes have been seen for a while (100msec). 2948 * 2949 */ 2950 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active, 2951 write_pending, active_idle, bad_word}; 2952 static char *array_states[] = { 2953 "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active", 2954 "write-pending", "active-idle", NULL }; 2955 2956 static int match_word(const char *word, char **list) 2957 { 2958 int n; 2959 for (n=0; list[n]; n++) 2960 if (cmd_match(word, list[n])) 2961 break; 2962 return n; 2963 } 2964 2965 static ssize_t 2966 array_state_show(mddev_t *mddev, char *page) 2967 { 2968 enum array_state st = inactive; 2969 2970 if (mddev->pers) 2971 switch(mddev->ro) { 2972 case 1: 2973 st = readonly; 2974 break; 2975 case 2: 2976 st = read_auto; 2977 break; 2978 case 0: 2979 if (mddev->in_sync) 2980 st = clean; 2981 else if (test_bit(MD_CHANGE_CLEAN, &mddev->flags)) 2982 st = write_pending; 2983 else if (mddev->safemode) 2984 st = active_idle; 2985 else 2986 st = active; 2987 } 2988 else { 2989 if (list_empty(&mddev->disks) && 2990 mddev->raid_disks == 0 && 2991 mddev->dev_sectors == 0) 2992 st = clear; 2993 else 2994 st = inactive; 2995 } 2996 return sprintf(page, "%s\n", array_states[st]); 2997 } 2998 2999 static int do_md_stop(mddev_t * mddev, int ro, int is_open); 3000 static int do_md_run(mddev_t * mddev); 3001 static int restart_array(mddev_t *mddev); 3002 3003 static ssize_t 3004 array_state_store(mddev_t *mddev, const char *buf, size_t len) 3005 { 3006 int err = -EINVAL; 3007 enum array_state st = match_word(buf, array_states); 3008 switch(st) { 3009 case bad_word: 3010 break; 3011 case clear: 3012 /* stopping an active array */ 3013 if (atomic_read(&mddev->openers) > 0) 3014 return -EBUSY; 3015 err = do_md_stop(mddev, 0, 0); 3016 break; 3017 case inactive: 3018 /* stopping an active array */ 3019 if (mddev->pers) { 3020 if (atomic_read(&mddev->openers) > 0) 3021 return -EBUSY; 3022 err = do_md_stop(mddev, 2, 0); 3023 } else 3024 err = 0; /* already inactive */ 3025 break; 3026 case suspended: 3027 break; /* not supported yet */ 3028 case readonly: 3029 if (mddev->pers) 3030 err = do_md_stop(mddev, 1, 0); 3031 else { 3032 mddev->ro = 1; 3033 set_disk_ro(mddev->gendisk, 1); 3034 err = do_md_run(mddev); 3035 } 3036 break; 3037 case read_auto: 3038 if (mddev->pers) { 3039 if (mddev->ro == 0) 3040 err = do_md_stop(mddev, 1, 0); 3041 else if (mddev->ro == 1) 3042 err = restart_array(mddev); 3043 if (err == 0) { 3044 mddev->ro = 2; 3045 set_disk_ro(mddev->gendisk, 0); 3046 } 3047 } else { 3048 mddev->ro = 2; 3049 err = do_md_run(mddev); 3050 } 3051 break; 3052 case clean: 3053 if (mddev->pers) { 3054 restart_array(mddev); 3055 spin_lock_irq(&mddev->write_lock); 3056 if (atomic_read(&mddev->writes_pending) == 0) { 3057 if (mddev->in_sync == 0) { 3058 mddev->in_sync = 1; 3059 if (mddev->safemode == 1) 3060 mddev->safemode = 0; 3061 if (mddev->persistent) 3062 set_bit(MD_CHANGE_CLEAN, 3063 &mddev->flags); 3064 } 3065 err = 0; 3066 } else 3067 err = -EBUSY; 3068 spin_unlock_irq(&mddev->write_lock); 3069 } else { 3070 mddev->ro = 0; 3071 mddev->recovery_cp = MaxSector; 3072 err = do_md_run(mddev); 3073 } 3074 break; 3075 case active: 3076 if (mddev->pers) { 3077 restart_array(mddev); 3078 if (mddev->external) 3079 clear_bit(MD_CHANGE_CLEAN, &mddev->flags); 3080 wake_up(&mddev->sb_wait); 3081 err = 0; 3082 } else { 3083 mddev->ro = 0; 3084 set_disk_ro(mddev->gendisk, 0); 3085 err = do_md_run(mddev); 3086 } 3087 break; 3088 case write_pending: 3089 case active_idle: 3090 /* these cannot be set */ 3091 break; 3092 } 3093 if (err) 3094 return err; 3095 else { 3096 sysfs_notify_dirent(mddev->sysfs_state); 3097 return len; 3098 } 3099 } 3100 static struct md_sysfs_entry md_array_state = 3101 __ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store); 3102 3103 static ssize_t 3104 null_show(mddev_t *mddev, char *page) 3105 { 3106 return -EINVAL; 3107 } 3108 3109 static ssize_t 3110 new_dev_store(mddev_t *mddev, const char *buf, size_t len) 3111 { 3112 /* buf must be %d:%d\n? giving major and minor numbers */ 3113 /* The new device is added to the array. 3114 * If the array has a persistent superblock, we read the 3115 * superblock to initialise info and check validity. 3116 * Otherwise, only checking done is that in bind_rdev_to_array, 3117 * which mainly checks size. 3118 */ 3119 char *e; 3120 int major = simple_strtoul(buf, &e, 10); 3121 int minor; 3122 dev_t dev; 3123 mdk_rdev_t *rdev; 3124 int err; 3125 3126 if (!*buf || *e != ':' || !e[1] || e[1] == '\n') 3127 return -EINVAL; 3128 minor = simple_strtoul(e+1, &e, 10); 3129 if (*e && *e != '\n') 3130 return -EINVAL; 3131 dev = MKDEV(major, minor); 3132 if (major != MAJOR(dev) || 3133 minor != MINOR(dev)) 3134 return -EOVERFLOW; 3135 3136 3137 if (mddev->persistent) { 3138 rdev = md_import_device(dev, mddev->major_version, 3139 mddev->minor_version); 3140 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) { 3141 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next, 3142 mdk_rdev_t, same_set); 3143 err = super_types[mddev->major_version] 3144 .load_super(rdev, rdev0, mddev->minor_version); 3145 if (err < 0) 3146 goto out; 3147 } 3148 } else if (mddev->external) 3149 rdev = md_import_device(dev, -2, -1); 3150 else 3151 rdev = md_import_device(dev, -1, -1); 3152 3153 if (IS_ERR(rdev)) 3154 return PTR_ERR(rdev); 3155 err = bind_rdev_to_array(rdev, mddev); 3156 out: 3157 if (err) 3158 export_rdev(rdev); 3159 return err ? err : len; 3160 } 3161 3162 static struct md_sysfs_entry md_new_device = 3163 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store); 3164 3165 static ssize_t 3166 bitmap_store(mddev_t *mddev, const char *buf, size_t len) 3167 { 3168 char *end; 3169 unsigned long chunk, end_chunk; 3170 3171 if (!mddev->bitmap) 3172 goto out; 3173 /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */ 3174 while (*buf) { 3175 chunk = end_chunk = simple_strtoul(buf, &end, 0); 3176 if (buf == end) break; 3177 if (*end == '-') { /* range */ 3178 buf = end + 1; 3179 end_chunk = simple_strtoul(buf, &end, 0); 3180 if (buf == end) break; 3181 } 3182 if (*end && !isspace(*end)) break; 3183 bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk); 3184 buf = end; 3185 while (isspace(*buf)) buf++; 3186 } 3187 bitmap_unplug(mddev->bitmap); /* flush the bits to disk */ 3188 out: 3189 return len; 3190 } 3191 3192 static struct md_sysfs_entry md_bitmap = 3193 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store); 3194 3195 static ssize_t 3196 size_show(mddev_t *mddev, char *page) 3197 { 3198 return sprintf(page, "%llu\n", 3199 (unsigned long long)mddev->dev_sectors / 2); 3200 } 3201 3202 static int update_size(mddev_t *mddev, sector_t num_sectors); 3203 3204 static ssize_t 3205 size_store(mddev_t *mddev, const char *buf, size_t len) 3206 { 3207 /* If array is inactive, we can reduce the component size, but 3208 * not increase it (except from 0). 3209 * If array is active, we can try an on-line resize 3210 */ 3211 sector_t sectors; 3212 int err = strict_blocks_to_sectors(buf, §ors); 3213 3214 if (err < 0) 3215 return err; 3216 if (mddev->pers) { 3217 err = update_size(mddev, sectors); 3218 md_update_sb(mddev, 1); 3219 } else { 3220 if (mddev->dev_sectors == 0 || 3221 mddev->dev_sectors > sectors) 3222 mddev->dev_sectors = sectors; 3223 else 3224 err = -ENOSPC; 3225 } 3226 return err ? err : len; 3227 } 3228 3229 static struct md_sysfs_entry md_size = 3230 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store); 3231 3232 3233 /* Metdata version. 3234 * This is one of 3235 * 'none' for arrays with no metadata (good luck...) 3236 * 'external' for arrays with externally managed metadata, 3237 * or N.M for internally known formats 3238 */ 3239 static ssize_t 3240 metadata_show(mddev_t *mddev, char *page) 3241 { 3242 if (mddev->persistent) 3243 return sprintf(page, "%d.%d\n", 3244 mddev->major_version, mddev->minor_version); 3245 else if (mddev->external) 3246 return sprintf(page, "external:%s\n", mddev->metadata_type); 3247 else 3248 return sprintf(page, "none\n"); 3249 } 3250 3251 static ssize_t 3252 metadata_store(mddev_t *mddev, const char *buf, size_t len) 3253 { 3254 int major, minor; 3255 char *e; 3256 /* Changing the details of 'external' metadata is 3257 * always permitted. Otherwise there must be 3258 * no devices attached to the array. 3259 */ 3260 if (mddev->external && strncmp(buf, "external:", 9) == 0) 3261 ; 3262 else if (!list_empty(&mddev->disks)) 3263 return -EBUSY; 3264 3265 if (cmd_match(buf, "none")) { 3266 mddev->persistent = 0; 3267 mddev->external = 0; 3268 mddev->major_version = 0; 3269 mddev->minor_version = 90; 3270 return len; 3271 } 3272 if (strncmp(buf, "external:", 9) == 0) { 3273 size_t namelen = len-9; 3274 if (namelen >= sizeof(mddev->metadata_type)) 3275 namelen = sizeof(mddev->metadata_type)-1; 3276 strncpy(mddev->metadata_type, buf+9, namelen); 3277 mddev->metadata_type[namelen] = 0; 3278 if (namelen && mddev->metadata_type[namelen-1] == '\n') 3279 mddev->metadata_type[--namelen] = 0; 3280 mddev->persistent = 0; 3281 mddev->external = 1; 3282 mddev->major_version = 0; 3283 mddev->minor_version = 90; 3284 return len; 3285 } 3286 major = simple_strtoul(buf, &e, 10); 3287 if (e==buf || *e != '.') 3288 return -EINVAL; 3289 buf = e+1; 3290 minor = simple_strtoul(buf, &e, 10); 3291 if (e==buf || (*e && *e != '\n') ) 3292 return -EINVAL; 3293 if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL) 3294 return -ENOENT; 3295 mddev->major_version = major; 3296 mddev->minor_version = minor; 3297 mddev->persistent = 1; 3298 mddev->external = 0; 3299 return len; 3300 } 3301 3302 static struct md_sysfs_entry md_metadata = 3303 __ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store); 3304 3305 static ssize_t 3306 action_show(mddev_t *mddev, char *page) 3307 { 3308 char *type = "idle"; 3309 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 3310 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) { 3311 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 3312 type = "reshape"; 3313 else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 3314 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 3315 type = "resync"; 3316 else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) 3317 type = "check"; 3318 else 3319 type = "repair"; 3320 } else if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) 3321 type = "recover"; 3322 } 3323 return sprintf(page, "%s\n", type); 3324 } 3325 3326 static ssize_t 3327 action_store(mddev_t *mddev, const char *page, size_t len) 3328 { 3329 if (!mddev->pers || !mddev->pers->sync_request) 3330 return -EINVAL; 3331 3332 if (cmd_match(page, "idle")) { 3333 if (mddev->sync_thread) { 3334 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 3335 md_unregister_thread(mddev->sync_thread); 3336 mddev->sync_thread = NULL; 3337 mddev->recovery = 0; 3338 } 3339 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 3340 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) 3341 return -EBUSY; 3342 else if (cmd_match(page, "resync")) 3343 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3344 else if (cmd_match(page, "recover")) { 3345 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 3346 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3347 } else if (cmd_match(page, "reshape")) { 3348 int err; 3349 if (mddev->pers->start_reshape == NULL) 3350 return -EINVAL; 3351 err = mddev->pers->start_reshape(mddev); 3352 if (err) 3353 return err; 3354 sysfs_notify(&mddev->kobj, NULL, "degraded"); 3355 } else { 3356 if (cmd_match(page, "check")) 3357 set_bit(MD_RECOVERY_CHECK, &mddev->recovery); 3358 else if (!cmd_match(page, "repair")) 3359 return -EINVAL; 3360 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 3361 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 3362 } 3363 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3364 md_wakeup_thread(mddev->thread); 3365 sysfs_notify_dirent(mddev->sysfs_action); 3366 return len; 3367 } 3368 3369 static ssize_t 3370 mismatch_cnt_show(mddev_t *mddev, char *page) 3371 { 3372 return sprintf(page, "%llu\n", 3373 (unsigned long long) mddev->resync_mismatches); 3374 } 3375 3376 static struct md_sysfs_entry md_scan_mode = 3377 __ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store); 3378 3379 3380 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt); 3381 3382 static ssize_t 3383 sync_min_show(mddev_t *mddev, char *page) 3384 { 3385 return sprintf(page, "%d (%s)\n", speed_min(mddev), 3386 mddev->sync_speed_min ? "local": "system"); 3387 } 3388 3389 static ssize_t 3390 sync_min_store(mddev_t *mddev, const char *buf, size_t len) 3391 { 3392 int min; 3393 char *e; 3394 if (strncmp(buf, "system", 6)==0) { 3395 mddev->sync_speed_min = 0; 3396 return len; 3397 } 3398 min = simple_strtoul(buf, &e, 10); 3399 if (buf == e || (*e && *e != '\n') || min <= 0) 3400 return -EINVAL; 3401 mddev->sync_speed_min = min; 3402 return len; 3403 } 3404 3405 static struct md_sysfs_entry md_sync_min = 3406 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store); 3407 3408 static ssize_t 3409 sync_max_show(mddev_t *mddev, char *page) 3410 { 3411 return sprintf(page, "%d (%s)\n", speed_max(mddev), 3412 mddev->sync_speed_max ? "local": "system"); 3413 } 3414 3415 static ssize_t 3416 sync_max_store(mddev_t *mddev, const char *buf, size_t len) 3417 { 3418 int max; 3419 char *e; 3420 if (strncmp(buf, "system", 6)==0) { 3421 mddev->sync_speed_max = 0; 3422 return len; 3423 } 3424 max = simple_strtoul(buf, &e, 10); 3425 if (buf == e || (*e && *e != '\n') || max <= 0) 3426 return -EINVAL; 3427 mddev->sync_speed_max = max; 3428 return len; 3429 } 3430 3431 static struct md_sysfs_entry md_sync_max = 3432 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store); 3433 3434 static ssize_t 3435 degraded_show(mddev_t *mddev, char *page) 3436 { 3437 return sprintf(page, "%d\n", mddev->degraded); 3438 } 3439 static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded); 3440 3441 static ssize_t 3442 sync_force_parallel_show(mddev_t *mddev, char *page) 3443 { 3444 return sprintf(page, "%d\n", mddev->parallel_resync); 3445 } 3446 3447 static ssize_t 3448 sync_force_parallel_store(mddev_t *mddev, const char *buf, size_t len) 3449 { 3450 long n; 3451 3452 if (strict_strtol(buf, 10, &n)) 3453 return -EINVAL; 3454 3455 if (n != 0 && n != 1) 3456 return -EINVAL; 3457 3458 mddev->parallel_resync = n; 3459 3460 if (mddev->sync_thread) 3461 wake_up(&resync_wait); 3462 3463 return len; 3464 } 3465 3466 /* force parallel resync, even with shared block devices */ 3467 static struct md_sysfs_entry md_sync_force_parallel = 3468 __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR, 3469 sync_force_parallel_show, sync_force_parallel_store); 3470 3471 static ssize_t 3472 sync_speed_show(mddev_t *mddev, char *page) 3473 { 3474 unsigned long resync, dt, db; 3475 if (mddev->curr_resync == 0) 3476 return sprintf(page, "none\n"); 3477 resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active); 3478 dt = (jiffies - mddev->resync_mark) / HZ; 3479 if (!dt) dt++; 3480 db = resync - mddev->resync_mark_cnt; 3481 return sprintf(page, "%lu\n", db/dt/2); /* K/sec */ 3482 } 3483 3484 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed); 3485 3486 static ssize_t 3487 sync_completed_show(mddev_t *mddev, char *page) 3488 { 3489 unsigned long max_sectors, resync; 3490 3491 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 3492 return sprintf(page, "none\n"); 3493 3494 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 3495 max_sectors = mddev->resync_max_sectors; 3496 else 3497 max_sectors = mddev->dev_sectors; 3498 3499 resync = mddev->curr_resync_completed; 3500 return sprintf(page, "%lu / %lu\n", resync, max_sectors); 3501 } 3502 3503 static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed); 3504 3505 static ssize_t 3506 min_sync_show(mddev_t *mddev, char *page) 3507 { 3508 return sprintf(page, "%llu\n", 3509 (unsigned long long)mddev->resync_min); 3510 } 3511 static ssize_t 3512 min_sync_store(mddev_t *mddev, const char *buf, size_t len) 3513 { 3514 unsigned long long min; 3515 if (strict_strtoull(buf, 10, &min)) 3516 return -EINVAL; 3517 if (min > mddev->resync_max) 3518 return -EINVAL; 3519 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 3520 return -EBUSY; 3521 3522 /* Must be a multiple of chunk_size */ 3523 if (mddev->chunk_size) { 3524 if (min & (sector_t)((mddev->chunk_size>>9)-1)) 3525 return -EINVAL; 3526 } 3527 mddev->resync_min = min; 3528 3529 return len; 3530 } 3531 3532 static struct md_sysfs_entry md_min_sync = 3533 __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store); 3534 3535 static ssize_t 3536 max_sync_show(mddev_t *mddev, char *page) 3537 { 3538 if (mddev->resync_max == MaxSector) 3539 return sprintf(page, "max\n"); 3540 else 3541 return sprintf(page, "%llu\n", 3542 (unsigned long long)mddev->resync_max); 3543 } 3544 static ssize_t 3545 max_sync_store(mddev_t *mddev, const char *buf, size_t len) 3546 { 3547 if (strncmp(buf, "max", 3) == 0) 3548 mddev->resync_max = MaxSector; 3549 else { 3550 unsigned long long max; 3551 if (strict_strtoull(buf, 10, &max)) 3552 return -EINVAL; 3553 if (max < mddev->resync_min) 3554 return -EINVAL; 3555 if (max < mddev->resync_max && 3556 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 3557 return -EBUSY; 3558 3559 /* Must be a multiple of chunk_size */ 3560 if (mddev->chunk_size) { 3561 if (max & (sector_t)((mddev->chunk_size>>9)-1)) 3562 return -EINVAL; 3563 } 3564 mddev->resync_max = max; 3565 } 3566 wake_up(&mddev->recovery_wait); 3567 return len; 3568 } 3569 3570 static struct md_sysfs_entry md_max_sync = 3571 __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store); 3572 3573 static ssize_t 3574 suspend_lo_show(mddev_t *mddev, char *page) 3575 { 3576 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo); 3577 } 3578 3579 static ssize_t 3580 suspend_lo_store(mddev_t *mddev, const char *buf, size_t len) 3581 { 3582 char *e; 3583 unsigned long long new = simple_strtoull(buf, &e, 10); 3584 3585 if (mddev->pers->quiesce == NULL) 3586 return -EINVAL; 3587 if (buf == e || (*e && *e != '\n')) 3588 return -EINVAL; 3589 if (new >= mddev->suspend_hi || 3590 (new > mddev->suspend_lo && new < mddev->suspend_hi)) { 3591 mddev->suspend_lo = new; 3592 mddev->pers->quiesce(mddev, 2); 3593 return len; 3594 } else 3595 return -EINVAL; 3596 } 3597 static struct md_sysfs_entry md_suspend_lo = 3598 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store); 3599 3600 3601 static ssize_t 3602 suspend_hi_show(mddev_t *mddev, char *page) 3603 { 3604 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi); 3605 } 3606 3607 static ssize_t 3608 suspend_hi_store(mddev_t *mddev, const char *buf, size_t len) 3609 { 3610 char *e; 3611 unsigned long long new = simple_strtoull(buf, &e, 10); 3612 3613 if (mddev->pers->quiesce == NULL) 3614 return -EINVAL; 3615 if (buf == e || (*e && *e != '\n')) 3616 return -EINVAL; 3617 if ((new <= mddev->suspend_lo && mddev->suspend_lo >= mddev->suspend_hi) || 3618 (new > mddev->suspend_lo && new > mddev->suspend_hi)) { 3619 mddev->suspend_hi = new; 3620 mddev->pers->quiesce(mddev, 1); 3621 mddev->pers->quiesce(mddev, 0); 3622 return len; 3623 } else 3624 return -EINVAL; 3625 } 3626 static struct md_sysfs_entry md_suspend_hi = 3627 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store); 3628 3629 static ssize_t 3630 reshape_position_show(mddev_t *mddev, char *page) 3631 { 3632 if (mddev->reshape_position != MaxSector) 3633 return sprintf(page, "%llu\n", 3634 (unsigned long long)mddev->reshape_position); 3635 strcpy(page, "none\n"); 3636 return 5; 3637 } 3638 3639 static ssize_t 3640 reshape_position_store(mddev_t *mddev, const char *buf, size_t len) 3641 { 3642 char *e; 3643 unsigned long long new = simple_strtoull(buf, &e, 10); 3644 if (mddev->pers) 3645 return -EBUSY; 3646 if (buf == e || (*e && *e != '\n')) 3647 return -EINVAL; 3648 mddev->reshape_position = new; 3649 mddev->delta_disks = 0; 3650 mddev->new_level = mddev->level; 3651 mddev->new_layout = mddev->layout; 3652 mddev->new_chunk = mddev->chunk_size; 3653 return len; 3654 } 3655 3656 static struct md_sysfs_entry md_reshape_position = 3657 __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show, 3658 reshape_position_store); 3659 3660 static ssize_t 3661 array_size_show(mddev_t *mddev, char *page) 3662 { 3663 if (mddev->external_size) 3664 return sprintf(page, "%llu\n", 3665 (unsigned long long)mddev->array_sectors/2); 3666 else 3667 return sprintf(page, "default\n"); 3668 } 3669 3670 static ssize_t 3671 array_size_store(mddev_t *mddev, const char *buf, size_t len) 3672 { 3673 sector_t sectors; 3674 3675 if (strncmp(buf, "default", 7) == 0) { 3676 if (mddev->pers) 3677 sectors = mddev->pers->size(mddev, 0, 0); 3678 else 3679 sectors = mddev->array_sectors; 3680 3681 mddev->external_size = 0; 3682 } else { 3683 if (strict_blocks_to_sectors(buf, §ors) < 0) 3684 return -EINVAL; 3685 if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors) 3686 return -EINVAL; 3687 3688 mddev->external_size = 1; 3689 } 3690 3691 mddev->array_sectors = sectors; 3692 set_capacity(mddev->gendisk, mddev->array_sectors); 3693 if (mddev->pers) { 3694 struct block_device *bdev = bdget_disk(mddev->gendisk, 0); 3695 3696 if (bdev) { 3697 mutex_lock(&bdev->bd_inode->i_mutex); 3698 i_size_write(bdev->bd_inode, 3699 (loff_t)mddev->array_sectors << 9); 3700 mutex_unlock(&bdev->bd_inode->i_mutex); 3701 bdput(bdev); 3702 } 3703 } 3704 3705 return len; 3706 } 3707 3708 static struct md_sysfs_entry md_array_size = 3709 __ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show, 3710 array_size_store); 3711 3712 static struct attribute *md_default_attrs[] = { 3713 &md_level.attr, 3714 &md_layout.attr, 3715 &md_raid_disks.attr, 3716 &md_chunk_size.attr, 3717 &md_size.attr, 3718 &md_resync_start.attr, 3719 &md_metadata.attr, 3720 &md_new_device.attr, 3721 &md_safe_delay.attr, 3722 &md_array_state.attr, 3723 &md_reshape_position.attr, 3724 &md_array_size.attr, 3725 NULL, 3726 }; 3727 3728 static struct attribute *md_redundancy_attrs[] = { 3729 &md_scan_mode.attr, 3730 &md_mismatches.attr, 3731 &md_sync_min.attr, 3732 &md_sync_max.attr, 3733 &md_sync_speed.attr, 3734 &md_sync_force_parallel.attr, 3735 &md_sync_completed.attr, 3736 &md_min_sync.attr, 3737 &md_max_sync.attr, 3738 &md_suspend_lo.attr, 3739 &md_suspend_hi.attr, 3740 &md_bitmap.attr, 3741 &md_degraded.attr, 3742 NULL, 3743 }; 3744 static struct attribute_group md_redundancy_group = { 3745 .name = NULL, 3746 .attrs = md_redundancy_attrs, 3747 }; 3748 3749 3750 static ssize_t 3751 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 3752 { 3753 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); 3754 mddev_t *mddev = container_of(kobj, struct mddev_s, kobj); 3755 ssize_t rv; 3756 3757 if (!entry->show) 3758 return -EIO; 3759 rv = mddev_lock(mddev); 3760 if (!rv) { 3761 rv = entry->show(mddev, page); 3762 mddev_unlock(mddev); 3763 } 3764 return rv; 3765 } 3766 3767 static ssize_t 3768 md_attr_store(struct kobject *kobj, struct attribute *attr, 3769 const char *page, size_t length) 3770 { 3771 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); 3772 mddev_t *mddev = container_of(kobj, struct mddev_s, kobj); 3773 ssize_t rv; 3774 3775 if (!entry->store) 3776 return -EIO; 3777 if (!capable(CAP_SYS_ADMIN)) 3778 return -EACCES; 3779 rv = mddev_lock(mddev); 3780 if (mddev->hold_active == UNTIL_IOCTL) 3781 mddev->hold_active = 0; 3782 if (!rv) { 3783 rv = entry->store(mddev, page, length); 3784 mddev_unlock(mddev); 3785 } 3786 return rv; 3787 } 3788 3789 static void md_free(struct kobject *ko) 3790 { 3791 mddev_t *mddev = container_of(ko, mddev_t, kobj); 3792 3793 if (mddev->sysfs_state) 3794 sysfs_put(mddev->sysfs_state); 3795 3796 if (mddev->gendisk) { 3797 del_gendisk(mddev->gendisk); 3798 put_disk(mddev->gendisk); 3799 } 3800 if (mddev->queue) 3801 blk_cleanup_queue(mddev->queue); 3802 3803 kfree(mddev); 3804 } 3805 3806 static struct sysfs_ops md_sysfs_ops = { 3807 .show = md_attr_show, 3808 .store = md_attr_store, 3809 }; 3810 static struct kobj_type md_ktype = { 3811 .release = md_free, 3812 .sysfs_ops = &md_sysfs_ops, 3813 .default_attrs = md_default_attrs, 3814 }; 3815 3816 int mdp_major = 0; 3817 3818 static void mddev_delayed_delete(struct work_struct *ws) 3819 { 3820 mddev_t *mddev = container_of(ws, mddev_t, del_work); 3821 3822 if (mddev->private == &md_redundancy_group) { 3823 sysfs_remove_group(&mddev->kobj, &md_redundancy_group); 3824 if (mddev->sysfs_action) 3825 sysfs_put(mddev->sysfs_action); 3826 mddev->sysfs_action = NULL; 3827 mddev->private = NULL; 3828 } 3829 kobject_del(&mddev->kobj); 3830 kobject_put(&mddev->kobj); 3831 } 3832 3833 static int md_alloc(dev_t dev, char *name) 3834 { 3835 static DEFINE_MUTEX(disks_mutex); 3836 mddev_t *mddev = mddev_find(dev); 3837 struct gendisk *disk; 3838 int partitioned; 3839 int shift; 3840 int unit; 3841 int error; 3842 3843 if (!mddev) 3844 return -ENODEV; 3845 3846 partitioned = (MAJOR(mddev->unit) != MD_MAJOR); 3847 shift = partitioned ? MdpMinorShift : 0; 3848 unit = MINOR(mddev->unit) >> shift; 3849 3850 /* wait for any previous instance if this device 3851 * to be completed removed (mddev_delayed_delete). 3852 */ 3853 flush_scheduled_work(); 3854 3855 mutex_lock(&disks_mutex); 3856 if (mddev->gendisk) { 3857 mutex_unlock(&disks_mutex); 3858 mddev_put(mddev); 3859 return -EEXIST; 3860 } 3861 3862 if (name) { 3863 /* Need to ensure that 'name' is not a duplicate. 3864 */ 3865 mddev_t *mddev2; 3866 spin_lock(&all_mddevs_lock); 3867 3868 list_for_each_entry(mddev2, &all_mddevs, all_mddevs) 3869 if (mddev2->gendisk && 3870 strcmp(mddev2->gendisk->disk_name, name) == 0) { 3871 spin_unlock(&all_mddevs_lock); 3872 return -EEXIST; 3873 } 3874 spin_unlock(&all_mddevs_lock); 3875 } 3876 3877 mddev->queue = blk_alloc_queue(GFP_KERNEL); 3878 if (!mddev->queue) { 3879 mutex_unlock(&disks_mutex); 3880 mddev_put(mddev); 3881 return -ENOMEM; 3882 } 3883 mddev->queue->queuedata = mddev; 3884 3885 /* Can be unlocked because the queue is new: no concurrency */ 3886 queue_flag_set_unlocked(QUEUE_FLAG_CLUSTER, mddev->queue); 3887 3888 blk_queue_make_request(mddev->queue, md_make_request); 3889 3890 disk = alloc_disk(1 << shift); 3891 if (!disk) { 3892 mutex_unlock(&disks_mutex); 3893 blk_cleanup_queue(mddev->queue); 3894 mddev->queue = NULL; 3895 mddev_put(mddev); 3896 return -ENOMEM; 3897 } 3898 disk->major = MAJOR(mddev->unit); 3899 disk->first_minor = unit << shift; 3900 if (name) 3901 strcpy(disk->disk_name, name); 3902 else if (partitioned) 3903 sprintf(disk->disk_name, "md_d%d", unit); 3904 else 3905 sprintf(disk->disk_name, "md%d", unit); 3906 disk->fops = &md_fops; 3907 disk->private_data = mddev; 3908 disk->queue = mddev->queue; 3909 /* Allow extended partitions. This makes the 3910 * 'mdp' device redundant, but we can't really 3911 * remove it now. 3912 */ 3913 disk->flags |= GENHD_FL_EXT_DEVT; 3914 add_disk(disk); 3915 mddev->gendisk = disk; 3916 error = kobject_init_and_add(&mddev->kobj, &md_ktype, 3917 &disk_to_dev(disk)->kobj, "%s", "md"); 3918 mutex_unlock(&disks_mutex); 3919 if (error) 3920 printk(KERN_WARNING "md: cannot register %s/md - name in use\n", 3921 disk->disk_name); 3922 else { 3923 kobject_uevent(&mddev->kobj, KOBJ_ADD); 3924 mddev->sysfs_state = sysfs_get_dirent(mddev->kobj.sd, "array_state"); 3925 } 3926 mddev_put(mddev); 3927 return 0; 3928 } 3929 3930 static struct kobject *md_probe(dev_t dev, int *part, void *data) 3931 { 3932 md_alloc(dev, NULL); 3933 return NULL; 3934 } 3935 3936 static int add_named_array(const char *val, struct kernel_param *kp) 3937 { 3938 /* val must be "md_*" where * is not all digits. 3939 * We allocate an array with a large free minor number, and 3940 * set the name to val. val must not already be an active name. 3941 */ 3942 int len = strlen(val); 3943 char buf[DISK_NAME_LEN]; 3944 3945 while (len && val[len-1] == '\n') 3946 len--; 3947 if (len >= DISK_NAME_LEN) 3948 return -E2BIG; 3949 strlcpy(buf, val, len+1); 3950 if (strncmp(buf, "md_", 3) != 0) 3951 return -EINVAL; 3952 return md_alloc(0, buf); 3953 } 3954 3955 static void md_safemode_timeout(unsigned long data) 3956 { 3957 mddev_t *mddev = (mddev_t *) data; 3958 3959 if (!atomic_read(&mddev->writes_pending)) { 3960 mddev->safemode = 1; 3961 if (mddev->external) 3962 sysfs_notify_dirent(mddev->sysfs_state); 3963 } 3964 md_wakeup_thread(mddev->thread); 3965 } 3966 3967 static int start_dirty_degraded; 3968 3969 static int do_md_run(mddev_t * mddev) 3970 { 3971 int err; 3972 int chunk_size; 3973 mdk_rdev_t *rdev; 3974 struct gendisk *disk; 3975 struct mdk_personality *pers; 3976 char b[BDEVNAME_SIZE]; 3977 3978 if (list_empty(&mddev->disks)) 3979 /* cannot run an array with no devices.. */ 3980 return -EINVAL; 3981 3982 if (mddev->pers) 3983 return -EBUSY; 3984 3985 /* 3986 * Analyze all RAID superblock(s) 3987 */ 3988 if (!mddev->raid_disks) { 3989 if (!mddev->persistent) 3990 return -EINVAL; 3991 analyze_sbs(mddev); 3992 } 3993 3994 chunk_size = mddev->chunk_size; 3995 3996 if (chunk_size) { 3997 if (chunk_size > MAX_CHUNK_SIZE) { 3998 printk(KERN_ERR "too big chunk_size: %d > %d\n", 3999 chunk_size, MAX_CHUNK_SIZE); 4000 return -EINVAL; 4001 } 4002 /* 4003 * chunk-size has to be a power of 2 4004 */ 4005 if ( (1 << ffz(~chunk_size)) != chunk_size) { 4006 printk(KERN_ERR "chunk_size of %d not valid\n", chunk_size); 4007 return -EINVAL; 4008 } 4009 4010 /* devices must have minimum size of one chunk */ 4011 list_for_each_entry(rdev, &mddev->disks, same_set) { 4012 if (test_bit(Faulty, &rdev->flags)) 4013 continue; 4014 if (rdev->sectors < chunk_size / 512) { 4015 printk(KERN_WARNING 4016 "md: Dev %s smaller than chunk_size:" 4017 " %llu < %d\n", 4018 bdevname(rdev->bdev,b), 4019 (unsigned long long)rdev->sectors, 4020 chunk_size / 512); 4021 return -EINVAL; 4022 } 4023 } 4024 } 4025 4026 if (mddev->level != LEVEL_NONE) 4027 request_module("md-level-%d", mddev->level); 4028 else if (mddev->clevel[0]) 4029 request_module("md-%s", mddev->clevel); 4030 4031 /* 4032 * Drop all container device buffers, from now on 4033 * the only valid external interface is through the md 4034 * device. 4035 */ 4036 list_for_each_entry(rdev, &mddev->disks, same_set) { 4037 if (test_bit(Faulty, &rdev->flags)) 4038 continue; 4039 sync_blockdev(rdev->bdev); 4040 invalidate_bdev(rdev->bdev); 4041 4042 /* perform some consistency tests on the device. 4043 * We don't want the data to overlap the metadata, 4044 * Internal Bitmap issues have been handled elsewhere. 4045 */ 4046 if (rdev->data_offset < rdev->sb_start) { 4047 if (mddev->dev_sectors && 4048 rdev->data_offset + mddev->dev_sectors 4049 > rdev->sb_start) { 4050 printk("md: %s: data overlaps metadata\n", 4051 mdname(mddev)); 4052 return -EINVAL; 4053 } 4054 } else { 4055 if (rdev->sb_start + rdev->sb_size/512 4056 > rdev->data_offset) { 4057 printk("md: %s: metadata overlaps data\n", 4058 mdname(mddev)); 4059 return -EINVAL; 4060 } 4061 } 4062 sysfs_notify_dirent(rdev->sysfs_state); 4063 } 4064 4065 md_probe(mddev->unit, NULL, NULL); 4066 disk = mddev->gendisk; 4067 if (!disk) 4068 return -ENOMEM; 4069 4070 spin_lock(&pers_lock); 4071 pers = find_pers(mddev->level, mddev->clevel); 4072 if (!pers || !try_module_get(pers->owner)) { 4073 spin_unlock(&pers_lock); 4074 if (mddev->level != LEVEL_NONE) 4075 printk(KERN_WARNING "md: personality for level %d is not loaded!\n", 4076 mddev->level); 4077 else 4078 printk(KERN_WARNING "md: personality for level %s is not loaded!\n", 4079 mddev->clevel); 4080 return -EINVAL; 4081 } 4082 mddev->pers = pers; 4083 spin_unlock(&pers_lock); 4084 if (mddev->level != pers->level) { 4085 mddev->level = pers->level; 4086 mddev->new_level = pers->level; 4087 } 4088 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); 4089 4090 if (pers->level >= 4 && pers->level <= 6) 4091 /* Cannot support integrity (yet) */ 4092 blk_integrity_unregister(mddev->gendisk); 4093 4094 if (mddev->reshape_position != MaxSector && 4095 pers->start_reshape == NULL) { 4096 /* This personality cannot handle reshaping... */ 4097 mddev->pers = NULL; 4098 module_put(pers->owner); 4099 return -EINVAL; 4100 } 4101 4102 if (pers->sync_request) { 4103 /* Warn if this is a potentially silly 4104 * configuration. 4105 */ 4106 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 4107 mdk_rdev_t *rdev2; 4108 int warned = 0; 4109 4110 list_for_each_entry(rdev, &mddev->disks, same_set) 4111 list_for_each_entry(rdev2, &mddev->disks, same_set) { 4112 if (rdev < rdev2 && 4113 rdev->bdev->bd_contains == 4114 rdev2->bdev->bd_contains) { 4115 printk(KERN_WARNING 4116 "%s: WARNING: %s appears to be" 4117 " on the same physical disk as" 4118 " %s.\n", 4119 mdname(mddev), 4120 bdevname(rdev->bdev,b), 4121 bdevname(rdev2->bdev,b2)); 4122 warned = 1; 4123 } 4124 } 4125 4126 if (warned) 4127 printk(KERN_WARNING 4128 "True protection against single-disk" 4129 " failure might be compromised.\n"); 4130 } 4131 4132 mddev->recovery = 0; 4133 /* may be over-ridden by personality */ 4134 mddev->resync_max_sectors = mddev->dev_sectors; 4135 4136 mddev->barriers_work = 1; 4137 mddev->ok_start_degraded = start_dirty_degraded; 4138 4139 if (start_readonly) 4140 mddev->ro = 2; /* read-only, but switch on first write */ 4141 4142 err = mddev->pers->run(mddev); 4143 if (err) 4144 printk(KERN_ERR "md: pers->run() failed ...\n"); 4145 else if (mddev->pers->size(mddev, 0, 0) < mddev->array_sectors) { 4146 WARN_ONCE(!mddev->external_size, "%s: default size too small," 4147 " but 'external_size' not in effect?\n", __func__); 4148 printk(KERN_ERR 4149 "md: invalid array_size %llu > default size %llu\n", 4150 (unsigned long long)mddev->array_sectors / 2, 4151 (unsigned long long)mddev->pers->size(mddev, 0, 0) / 2); 4152 err = -EINVAL; 4153 mddev->pers->stop(mddev); 4154 } 4155 if (err == 0 && mddev->pers->sync_request) { 4156 err = bitmap_create(mddev); 4157 if (err) { 4158 printk(KERN_ERR "%s: failed to create bitmap (%d)\n", 4159 mdname(mddev), err); 4160 mddev->pers->stop(mddev); 4161 } 4162 } 4163 if (err) { 4164 module_put(mddev->pers->owner); 4165 mddev->pers = NULL; 4166 bitmap_destroy(mddev); 4167 return err; 4168 } 4169 if (mddev->pers->sync_request) { 4170 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group)) 4171 printk(KERN_WARNING 4172 "md: cannot register extra attributes for %s\n", 4173 mdname(mddev)); 4174 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, "sync_action"); 4175 } else if (mddev->ro == 2) /* auto-readonly not meaningful */ 4176 mddev->ro = 0; 4177 4178 atomic_set(&mddev->writes_pending,0); 4179 mddev->safemode = 0; 4180 mddev->safemode_timer.function = md_safemode_timeout; 4181 mddev->safemode_timer.data = (unsigned long) mddev; 4182 mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */ 4183 mddev->in_sync = 1; 4184 4185 list_for_each_entry(rdev, &mddev->disks, same_set) 4186 if (rdev->raid_disk >= 0) { 4187 char nm[20]; 4188 sprintf(nm, "rd%d", rdev->raid_disk); 4189 if (sysfs_create_link(&mddev->kobj, &rdev->kobj, nm)) 4190 printk("md: cannot register %s for %s\n", 4191 nm, mdname(mddev)); 4192 } 4193 4194 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4195 4196 if (mddev->flags) 4197 md_update_sb(mddev, 0); 4198 4199 set_capacity(disk, mddev->array_sectors); 4200 4201 /* If there is a partially-recovered drive we need to 4202 * start recovery here. If we leave it to md_check_recovery, 4203 * it will remove the drives and not do the right thing 4204 */ 4205 if (mddev->degraded && !mddev->sync_thread) { 4206 int spares = 0; 4207 list_for_each_entry(rdev, &mddev->disks, same_set) 4208 if (rdev->raid_disk >= 0 && 4209 !test_bit(In_sync, &rdev->flags) && 4210 !test_bit(Faulty, &rdev->flags)) 4211 /* complete an interrupted recovery */ 4212 spares++; 4213 if (spares && mddev->pers->sync_request) { 4214 mddev->recovery = 0; 4215 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 4216 mddev->sync_thread = md_register_thread(md_do_sync, 4217 mddev, 4218 "%s_resync"); 4219 if (!mddev->sync_thread) { 4220 printk(KERN_ERR "%s: could not start resync" 4221 " thread...\n", 4222 mdname(mddev)); 4223 /* leave the spares where they are, it shouldn't hurt */ 4224 mddev->recovery = 0; 4225 } 4226 } 4227 } 4228 md_wakeup_thread(mddev->thread); 4229 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ 4230 4231 mddev->changed = 1; 4232 md_new_event(mddev); 4233 sysfs_notify_dirent(mddev->sysfs_state); 4234 if (mddev->sysfs_action) 4235 sysfs_notify_dirent(mddev->sysfs_action); 4236 sysfs_notify(&mddev->kobj, NULL, "degraded"); 4237 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); 4238 return 0; 4239 } 4240 4241 static int restart_array(mddev_t *mddev) 4242 { 4243 struct gendisk *disk = mddev->gendisk; 4244 4245 /* Complain if it has no devices */ 4246 if (list_empty(&mddev->disks)) 4247 return -ENXIO; 4248 if (!mddev->pers) 4249 return -EINVAL; 4250 if (!mddev->ro) 4251 return -EBUSY; 4252 mddev->safemode = 0; 4253 mddev->ro = 0; 4254 set_disk_ro(disk, 0); 4255 printk(KERN_INFO "md: %s switched to read-write mode.\n", 4256 mdname(mddev)); 4257 /* Kick recovery or resync if necessary */ 4258 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4259 md_wakeup_thread(mddev->thread); 4260 md_wakeup_thread(mddev->sync_thread); 4261 sysfs_notify_dirent(mddev->sysfs_state); 4262 return 0; 4263 } 4264 4265 /* similar to deny_write_access, but accounts for our holding a reference 4266 * to the file ourselves */ 4267 static int deny_bitmap_write_access(struct file * file) 4268 { 4269 struct inode *inode = file->f_mapping->host; 4270 4271 spin_lock(&inode->i_lock); 4272 if (atomic_read(&inode->i_writecount) > 1) { 4273 spin_unlock(&inode->i_lock); 4274 return -ETXTBSY; 4275 } 4276 atomic_set(&inode->i_writecount, -1); 4277 spin_unlock(&inode->i_lock); 4278 4279 return 0; 4280 } 4281 4282 static void restore_bitmap_write_access(struct file *file) 4283 { 4284 struct inode *inode = file->f_mapping->host; 4285 4286 spin_lock(&inode->i_lock); 4287 atomic_set(&inode->i_writecount, 1); 4288 spin_unlock(&inode->i_lock); 4289 } 4290 4291 /* mode: 4292 * 0 - completely stop and dis-assemble array 4293 * 1 - switch to readonly 4294 * 2 - stop but do not disassemble array 4295 */ 4296 static int do_md_stop(mddev_t * mddev, int mode, int is_open) 4297 { 4298 int err = 0; 4299 struct gendisk *disk = mddev->gendisk; 4300 4301 if (atomic_read(&mddev->openers) > is_open) { 4302 printk("md: %s still in use.\n",mdname(mddev)); 4303 return -EBUSY; 4304 } 4305 4306 if (mddev->pers) { 4307 4308 if (mddev->sync_thread) { 4309 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4310 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 4311 md_unregister_thread(mddev->sync_thread); 4312 mddev->sync_thread = NULL; 4313 } 4314 4315 del_timer_sync(&mddev->safemode_timer); 4316 4317 switch(mode) { 4318 case 1: /* readonly */ 4319 err = -ENXIO; 4320 if (mddev->ro==1) 4321 goto out; 4322 mddev->ro = 1; 4323 break; 4324 case 0: /* disassemble */ 4325 case 2: /* stop */ 4326 bitmap_flush(mddev); 4327 md_super_wait(mddev); 4328 if (mddev->ro) 4329 set_disk_ro(disk, 0); 4330 4331 mddev->pers->stop(mddev); 4332 mddev->queue->merge_bvec_fn = NULL; 4333 mddev->queue->unplug_fn = NULL; 4334 mddev->queue->backing_dev_info.congested_fn = NULL; 4335 module_put(mddev->pers->owner); 4336 if (mddev->pers->sync_request) 4337 mddev->private = &md_redundancy_group; 4338 mddev->pers = NULL; 4339 /* tell userspace to handle 'inactive' */ 4340 sysfs_notify_dirent(mddev->sysfs_state); 4341 4342 set_capacity(disk, 0); 4343 mddev->changed = 1; 4344 4345 if (mddev->ro) 4346 mddev->ro = 0; 4347 } 4348 if (!mddev->in_sync || mddev->flags) { 4349 /* mark array as shutdown cleanly */ 4350 mddev->in_sync = 1; 4351 md_update_sb(mddev, 1); 4352 } 4353 if (mode == 1) 4354 set_disk_ro(disk, 1); 4355 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4356 } 4357 4358 /* 4359 * Free resources if final stop 4360 */ 4361 if (mode == 0) { 4362 mdk_rdev_t *rdev; 4363 4364 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev)); 4365 4366 bitmap_destroy(mddev); 4367 if (mddev->bitmap_file) { 4368 restore_bitmap_write_access(mddev->bitmap_file); 4369 fput(mddev->bitmap_file); 4370 mddev->bitmap_file = NULL; 4371 } 4372 mddev->bitmap_offset = 0; 4373 4374 list_for_each_entry(rdev, &mddev->disks, same_set) 4375 if (rdev->raid_disk >= 0) { 4376 char nm[20]; 4377 sprintf(nm, "rd%d", rdev->raid_disk); 4378 sysfs_remove_link(&mddev->kobj, nm); 4379 } 4380 4381 /* make sure all md_delayed_delete calls have finished */ 4382 flush_scheduled_work(); 4383 4384 export_array(mddev); 4385 4386 mddev->array_sectors = 0; 4387 mddev->external_size = 0; 4388 mddev->dev_sectors = 0; 4389 mddev->raid_disks = 0; 4390 mddev->recovery_cp = 0; 4391 mddev->resync_min = 0; 4392 mddev->resync_max = MaxSector; 4393 mddev->reshape_position = MaxSector; 4394 mddev->external = 0; 4395 mddev->persistent = 0; 4396 mddev->level = LEVEL_NONE; 4397 mddev->clevel[0] = 0; 4398 mddev->flags = 0; 4399 mddev->ro = 0; 4400 mddev->metadata_type[0] = 0; 4401 mddev->chunk_size = 0; 4402 mddev->ctime = mddev->utime = 0; 4403 mddev->layout = 0; 4404 mddev->max_disks = 0; 4405 mddev->events = 0; 4406 mddev->delta_disks = 0; 4407 mddev->new_level = LEVEL_NONE; 4408 mddev->new_layout = 0; 4409 mddev->new_chunk = 0; 4410 mddev->curr_resync = 0; 4411 mddev->resync_mismatches = 0; 4412 mddev->suspend_lo = mddev->suspend_hi = 0; 4413 mddev->sync_speed_min = mddev->sync_speed_max = 0; 4414 mddev->recovery = 0; 4415 mddev->in_sync = 0; 4416 mddev->changed = 0; 4417 mddev->degraded = 0; 4418 mddev->barriers_work = 0; 4419 mddev->safemode = 0; 4420 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); 4421 if (mddev->hold_active == UNTIL_STOP) 4422 mddev->hold_active = 0; 4423 4424 } else if (mddev->pers) 4425 printk(KERN_INFO "md: %s switched to read-only mode.\n", 4426 mdname(mddev)); 4427 err = 0; 4428 blk_integrity_unregister(disk); 4429 md_new_event(mddev); 4430 sysfs_notify_dirent(mddev->sysfs_state); 4431 out: 4432 return err; 4433 } 4434 4435 #ifndef MODULE 4436 static void autorun_array(mddev_t *mddev) 4437 { 4438 mdk_rdev_t *rdev; 4439 int err; 4440 4441 if (list_empty(&mddev->disks)) 4442 return; 4443 4444 printk(KERN_INFO "md: running: "); 4445 4446 list_for_each_entry(rdev, &mddev->disks, same_set) { 4447 char b[BDEVNAME_SIZE]; 4448 printk("<%s>", bdevname(rdev->bdev,b)); 4449 } 4450 printk("\n"); 4451 4452 err = do_md_run(mddev); 4453 if (err) { 4454 printk(KERN_WARNING "md: do_md_run() returned %d\n", err); 4455 do_md_stop(mddev, 0, 0); 4456 } 4457 } 4458 4459 /* 4460 * lets try to run arrays based on all disks that have arrived 4461 * until now. (those are in pending_raid_disks) 4462 * 4463 * the method: pick the first pending disk, collect all disks with 4464 * the same UUID, remove all from the pending list and put them into 4465 * the 'same_array' list. Then order this list based on superblock 4466 * update time (freshest comes first), kick out 'old' disks and 4467 * compare superblocks. If everything's fine then run it. 4468 * 4469 * If "unit" is allocated, then bump its reference count 4470 */ 4471 static void autorun_devices(int part) 4472 { 4473 mdk_rdev_t *rdev0, *rdev, *tmp; 4474 mddev_t *mddev; 4475 char b[BDEVNAME_SIZE]; 4476 4477 printk(KERN_INFO "md: autorun ...\n"); 4478 while (!list_empty(&pending_raid_disks)) { 4479 int unit; 4480 dev_t dev; 4481 LIST_HEAD(candidates); 4482 rdev0 = list_entry(pending_raid_disks.next, 4483 mdk_rdev_t, same_set); 4484 4485 printk(KERN_INFO "md: considering %s ...\n", 4486 bdevname(rdev0->bdev,b)); 4487 INIT_LIST_HEAD(&candidates); 4488 rdev_for_each_list(rdev, tmp, &pending_raid_disks) 4489 if (super_90_load(rdev, rdev0, 0) >= 0) { 4490 printk(KERN_INFO "md: adding %s ...\n", 4491 bdevname(rdev->bdev,b)); 4492 list_move(&rdev->same_set, &candidates); 4493 } 4494 /* 4495 * now we have a set of devices, with all of them having 4496 * mostly sane superblocks. It's time to allocate the 4497 * mddev. 4498 */ 4499 if (part) { 4500 dev = MKDEV(mdp_major, 4501 rdev0->preferred_minor << MdpMinorShift); 4502 unit = MINOR(dev) >> MdpMinorShift; 4503 } else { 4504 dev = MKDEV(MD_MAJOR, rdev0->preferred_minor); 4505 unit = MINOR(dev); 4506 } 4507 if (rdev0->preferred_minor != unit) { 4508 printk(KERN_INFO "md: unit number in %s is bad: %d\n", 4509 bdevname(rdev0->bdev, b), rdev0->preferred_minor); 4510 break; 4511 } 4512 4513 md_probe(dev, NULL, NULL); 4514 mddev = mddev_find(dev); 4515 if (!mddev || !mddev->gendisk) { 4516 if (mddev) 4517 mddev_put(mddev); 4518 printk(KERN_ERR 4519 "md: cannot allocate memory for md drive.\n"); 4520 break; 4521 } 4522 if (mddev_lock(mddev)) 4523 printk(KERN_WARNING "md: %s locked, cannot run\n", 4524 mdname(mddev)); 4525 else if (mddev->raid_disks || mddev->major_version 4526 || !list_empty(&mddev->disks)) { 4527 printk(KERN_WARNING 4528 "md: %s already running, cannot run %s\n", 4529 mdname(mddev), bdevname(rdev0->bdev,b)); 4530 mddev_unlock(mddev); 4531 } else { 4532 printk(KERN_INFO "md: created %s\n", mdname(mddev)); 4533 mddev->persistent = 1; 4534 rdev_for_each_list(rdev, tmp, &candidates) { 4535 list_del_init(&rdev->same_set); 4536 if (bind_rdev_to_array(rdev, mddev)) 4537 export_rdev(rdev); 4538 } 4539 autorun_array(mddev); 4540 mddev_unlock(mddev); 4541 } 4542 /* on success, candidates will be empty, on error 4543 * it won't... 4544 */ 4545 rdev_for_each_list(rdev, tmp, &candidates) { 4546 list_del_init(&rdev->same_set); 4547 export_rdev(rdev); 4548 } 4549 mddev_put(mddev); 4550 } 4551 printk(KERN_INFO "md: ... autorun DONE.\n"); 4552 } 4553 #endif /* !MODULE */ 4554 4555 static int get_version(void __user * arg) 4556 { 4557 mdu_version_t ver; 4558 4559 ver.major = MD_MAJOR_VERSION; 4560 ver.minor = MD_MINOR_VERSION; 4561 ver.patchlevel = MD_PATCHLEVEL_VERSION; 4562 4563 if (copy_to_user(arg, &ver, sizeof(ver))) 4564 return -EFAULT; 4565 4566 return 0; 4567 } 4568 4569 static int get_array_info(mddev_t * mddev, void __user * arg) 4570 { 4571 mdu_array_info_t info; 4572 int nr,working,active,failed,spare; 4573 mdk_rdev_t *rdev; 4574 4575 nr=working=active=failed=spare=0; 4576 list_for_each_entry(rdev, &mddev->disks, same_set) { 4577 nr++; 4578 if (test_bit(Faulty, &rdev->flags)) 4579 failed++; 4580 else { 4581 working++; 4582 if (test_bit(In_sync, &rdev->flags)) 4583 active++; 4584 else 4585 spare++; 4586 } 4587 } 4588 4589 info.major_version = mddev->major_version; 4590 info.minor_version = mddev->minor_version; 4591 info.patch_version = MD_PATCHLEVEL_VERSION; 4592 info.ctime = mddev->ctime; 4593 info.level = mddev->level; 4594 info.size = mddev->dev_sectors / 2; 4595 if (info.size != mddev->dev_sectors / 2) /* overflow */ 4596 info.size = -1; 4597 info.nr_disks = nr; 4598 info.raid_disks = mddev->raid_disks; 4599 info.md_minor = mddev->md_minor; 4600 info.not_persistent= !mddev->persistent; 4601 4602 info.utime = mddev->utime; 4603 info.state = 0; 4604 if (mddev->in_sync) 4605 info.state = (1<<MD_SB_CLEAN); 4606 if (mddev->bitmap && mddev->bitmap_offset) 4607 info.state = (1<<MD_SB_BITMAP_PRESENT); 4608 info.active_disks = active; 4609 info.working_disks = working; 4610 info.failed_disks = failed; 4611 info.spare_disks = spare; 4612 4613 info.layout = mddev->layout; 4614 info.chunk_size = mddev->chunk_size; 4615 4616 if (copy_to_user(arg, &info, sizeof(info))) 4617 return -EFAULT; 4618 4619 return 0; 4620 } 4621 4622 static int get_bitmap_file(mddev_t * mddev, void __user * arg) 4623 { 4624 mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */ 4625 char *ptr, *buf = NULL; 4626 int err = -ENOMEM; 4627 4628 if (md_allow_write(mddev)) 4629 file = kmalloc(sizeof(*file), GFP_NOIO); 4630 else 4631 file = kmalloc(sizeof(*file), GFP_KERNEL); 4632 4633 if (!file) 4634 goto out; 4635 4636 /* bitmap disabled, zero the first byte and copy out */ 4637 if (!mddev->bitmap || !mddev->bitmap->file) { 4638 file->pathname[0] = '\0'; 4639 goto copy_out; 4640 } 4641 4642 buf = kmalloc(sizeof(file->pathname), GFP_KERNEL); 4643 if (!buf) 4644 goto out; 4645 4646 ptr = d_path(&mddev->bitmap->file->f_path, buf, sizeof(file->pathname)); 4647 if (IS_ERR(ptr)) 4648 goto out; 4649 4650 strcpy(file->pathname, ptr); 4651 4652 copy_out: 4653 err = 0; 4654 if (copy_to_user(arg, file, sizeof(*file))) 4655 err = -EFAULT; 4656 out: 4657 kfree(buf); 4658 kfree(file); 4659 return err; 4660 } 4661 4662 static int get_disk_info(mddev_t * mddev, void __user * arg) 4663 { 4664 mdu_disk_info_t info; 4665 mdk_rdev_t *rdev; 4666 4667 if (copy_from_user(&info, arg, sizeof(info))) 4668 return -EFAULT; 4669 4670 rdev = find_rdev_nr(mddev, info.number); 4671 if (rdev) { 4672 info.major = MAJOR(rdev->bdev->bd_dev); 4673 info.minor = MINOR(rdev->bdev->bd_dev); 4674 info.raid_disk = rdev->raid_disk; 4675 info.state = 0; 4676 if (test_bit(Faulty, &rdev->flags)) 4677 info.state |= (1<<MD_DISK_FAULTY); 4678 else if (test_bit(In_sync, &rdev->flags)) { 4679 info.state |= (1<<MD_DISK_ACTIVE); 4680 info.state |= (1<<MD_DISK_SYNC); 4681 } 4682 if (test_bit(WriteMostly, &rdev->flags)) 4683 info.state |= (1<<MD_DISK_WRITEMOSTLY); 4684 } else { 4685 info.major = info.minor = 0; 4686 info.raid_disk = -1; 4687 info.state = (1<<MD_DISK_REMOVED); 4688 } 4689 4690 if (copy_to_user(arg, &info, sizeof(info))) 4691 return -EFAULT; 4692 4693 return 0; 4694 } 4695 4696 static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info) 4697 { 4698 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 4699 mdk_rdev_t *rdev; 4700 dev_t dev = MKDEV(info->major,info->minor); 4701 4702 if (info->major != MAJOR(dev) || info->minor != MINOR(dev)) 4703 return -EOVERFLOW; 4704 4705 if (!mddev->raid_disks) { 4706 int err; 4707 /* expecting a device which has a superblock */ 4708 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version); 4709 if (IS_ERR(rdev)) { 4710 printk(KERN_WARNING 4711 "md: md_import_device returned %ld\n", 4712 PTR_ERR(rdev)); 4713 return PTR_ERR(rdev); 4714 } 4715 if (!list_empty(&mddev->disks)) { 4716 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next, 4717 mdk_rdev_t, same_set); 4718 int err = super_types[mddev->major_version] 4719 .load_super(rdev, rdev0, mddev->minor_version); 4720 if (err < 0) { 4721 printk(KERN_WARNING 4722 "md: %s has different UUID to %s\n", 4723 bdevname(rdev->bdev,b), 4724 bdevname(rdev0->bdev,b2)); 4725 export_rdev(rdev); 4726 return -EINVAL; 4727 } 4728 } 4729 err = bind_rdev_to_array(rdev, mddev); 4730 if (err) 4731 export_rdev(rdev); 4732 return err; 4733 } 4734 4735 /* 4736 * add_new_disk can be used once the array is assembled 4737 * to add "hot spares". They must already have a superblock 4738 * written 4739 */ 4740 if (mddev->pers) { 4741 int err; 4742 if (!mddev->pers->hot_add_disk) { 4743 printk(KERN_WARNING 4744 "%s: personality does not support diskops!\n", 4745 mdname(mddev)); 4746 return -EINVAL; 4747 } 4748 if (mddev->persistent) 4749 rdev = md_import_device(dev, mddev->major_version, 4750 mddev->minor_version); 4751 else 4752 rdev = md_import_device(dev, -1, -1); 4753 if (IS_ERR(rdev)) { 4754 printk(KERN_WARNING 4755 "md: md_import_device returned %ld\n", 4756 PTR_ERR(rdev)); 4757 return PTR_ERR(rdev); 4758 } 4759 /* set save_raid_disk if appropriate */ 4760 if (!mddev->persistent) { 4761 if (info->state & (1<<MD_DISK_SYNC) && 4762 info->raid_disk < mddev->raid_disks) 4763 rdev->raid_disk = info->raid_disk; 4764 else 4765 rdev->raid_disk = -1; 4766 } else 4767 super_types[mddev->major_version]. 4768 validate_super(mddev, rdev); 4769 rdev->saved_raid_disk = rdev->raid_disk; 4770 4771 clear_bit(In_sync, &rdev->flags); /* just to be sure */ 4772 if (info->state & (1<<MD_DISK_WRITEMOSTLY)) 4773 set_bit(WriteMostly, &rdev->flags); 4774 else 4775 clear_bit(WriteMostly, &rdev->flags); 4776 4777 rdev->raid_disk = -1; 4778 err = bind_rdev_to_array(rdev, mddev); 4779 if (!err && !mddev->pers->hot_remove_disk) { 4780 /* If there is hot_add_disk but no hot_remove_disk 4781 * then added disks for geometry changes, 4782 * and should be added immediately. 4783 */ 4784 super_types[mddev->major_version]. 4785 validate_super(mddev, rdev); 4786 err = mddev->pers->hot_add_disk(mddev, rdev); 4787 if (err) 4788 unbind_rdev_from_array(rdev); 4789 } 4790 if (err) 4791 export_rdev(rdev); 4792 else 4793 sysfs_notify_dirent(rdev->sysfs_state); 4794 4795 md_update_sb(mddev, 1); 4796 if (mddev->degraded) 4797 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 4798 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4799 md_wakeup_thread(mddev->thread); 4800 return err; 4801 } 4802 4803 /* otherwise, add_new_disk is only allowed 4804 * for major_version==0 superblocks 4805 */ 4806 if (mddev->major_version != 0) { 4807 printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n", 4808 mdname(mddev)); 4809 return -EINVAL; 4810 } 4811 4812 if (!(info->state & (1<<MD_DISK_FAULTY))) { 4813 int err; 4814 rdev = md_import_device(dev, -1, 0); 4815 if (IS_ERR(rdev)) { 4816 printk(KERN_WARNING 4817 "md: error, md_import_device() returned %ld\n", 4818 PTR_ERR(rdev)); 4819 return PTR_ERR(rdev); 4820 } 4821 rdev->desc_nr = info->number; 4822 if (info->raid_disk < mddev->raid_disks) 4823 rdev->raid_disk = info->raid_disk; 4824 else 4825 rdev->raid_disk = -1; 4826 4827 if (rdev->raid_disk < mddev->raid_disks) 4828 if (info->state & (1<<MD_DISK_SYNC)) 4829 set_bit(In_sync, &rdev->flags); 4830 4831 if (info->state & (1<<MD_DISK_WRITEMOSTLY)) 4832 set_bit(WriteMostly, &rdev->flags); 4833 4834 if (!mddev->persistent) { 4835 printk(KERN_INFO "md: nonpersistent superblock ...\n"); 4836 rdev->sb_start = rdev->bdev->bd_inode->i_size / 512; 4837 } else 4838 rdev->sb_start = calc_dev_sboffset(rdev->bdev); 4839 rdev->sectors = calc_num_sectors(rdev, mddev->chunk_size); 4840 4841 err = bind_rdev_to_array(rdev, mddev); 4842 if (err) { 4843 export_rdev(rdev); 4844 return err; 4845 } 4846 } 4847 4848 return 0; 4849 } 4850 4851 static int hot_remove_disk(mddev_t * mddev, dev_t dev) 4852 { 4853 char b[BDEVNAME_SIZE]; 4854 mdk_rdev_t *rdev; 4855 4856 rdev = find_rdev(mddev, dev); 4857 if (!rdev) 4858 return -ENXIO; 4859 4860 if (rdev->raid_disk >= 0) 4861 goto busy; 4862 4863 kick_rdev_from_array(rdev); 4864 md_update_sb(mddev, 1); 4865 md_new_event(mddev); 4866 4867 return 0; 4868 busy: 4869 printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n", 4870 bdevname(rdev->bdev,b), mdname(mddev)); 4871 return -EBUSY; 4872 } 4873 4874 static int hot_add_disk(mddev_t * mddev, dev_t dev) 4875 { 4876 char b[BDEVNAME_SIZE]; 4877 int err; 4878 mdk_rdev_t *rdev; 4879 4880 if (!mddev->pers) 4881 return -ENODEV; 4882 4883 if (mddev->major_version != 0) { 4884 printk(KERN_WARNING "%s: HOT_ADD may only be used with" 4885 " version-0 superblocks.\n", 4886 mdname(mddev)); 4887 return -EINVAL; 4888 } 4889 if (!mddev->pers->hot_add_disk) { 4890 printk(KERN_WARNING 4891 "%s: personality does not support diskops!\n", 4892 mdname(mddev)); 4893 return -EINVAL; 4894 } 4895 4896 rdev = md_import_device(dev, -1, 0); 4897 if (IS_ERR(rdev)) { 4898 printk(KERN_WARNING 4899 "md: error, md_import_device() returned %ld\n", 4900 PTR_ERR(rdev)); 4901 return -EINVAL; 4902 } 4903 4904 if (mddev->persistent) 4905 rdev->sb_start = calc_dev_sboffset(rdev->bdev); 4906 else 4907 rdev->sb_start = rdev->bdev->bd_inode->i_size / 512; 4908 4909 rdev->sectors = calc_num_sectors(rdev, mddev->chunk_size); 4910 4911 if (test_bit(Faulty, &rdev->flags)) { 4912 printk(KERN_WARNING 4913 "md: can not hot-add faulty %s disk to %s!\n", 4914 bdevname(rdev->bdev,b), mdname(mddev)); 4915 err = -EINVAL; 4916 goto abort_export; 4917 } 4918 clear_bit(In_sync, &rdev->flags); 4919 rdev->desc_nr = -1; 4920 rdev->saved_raid_disk = -1; 4921 err = bind_rdev_to_array(rdev, mddev); 4922 if (err) 4923 goto abort_export; 4924 4925 /* 4926 * The rest should better be atomic, we can have disk failures 4927 * noticed in interrupt contexts ... 4928 */ 4929 4930 rdev->raid_disk = -1; 4931 4932 md_update_sb(mddev, 1); 4933 4934 /* 4935 * Kick recovery, maybe this spare has to be added to the 4936 * array immediately. 4937 */ 4938 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4939 md_wakeup_thread(mddev->thread); 4940 md_new_event(mddev); 4941 return 0; 4942 4943 abort_export: 4944 export_rdev(rdev); 4945 return err; 4946 } 4947 4948 static int set_bitmap_file(mddev_t *mddev, int fd) 4949 { 4950 int err; 4951 4952 if (mddev->pers) { 4953 if (!mddev->pers->quiesce) 4954 return -EBUSY; 4955 if (mddev->recovery || mddev->sync_thread) 4956 return -EBUSY; 4957 /* we should be able to change the bitmap.. */ 4958 } 4959 4960 4961 if (fd >= 0) { 4962 if (mddev->bitmap) 4963 return -EEXIST; /* cannot add when bitmap is present */ 4964 mddev->bitmap_file = fget(fd); 4965 4966 if (mddev->bitmap_file == NULL) { 4967 printk(KERN_ERR "%s: error: failed to get bitmap file\n", 4968 mdname(mddev)); 4969 return -EBADF; 4970 } 4971 4972 err = deny_bitmap_write_access(mddev->bitmap_file); 4973 if (err) { 4974 printk(KERN_ERR "%s: error: bitmap file is already in use\n", 4975 mdname(mddev)); 4976 fput(mddev->bitmap_file); 4977 mddev->bitmap_file = NULL; 4978 return err; 4979 } 4980 mddev->bitmap_offset = 0; /* file overrides offset */ 4981 } else if (mddev->bitmap == NULL) 4982 return -ENOENT; /* cannot remove what isn't there */ 4983 err = 0; 4984 if (mddev->pers) { 4985 mddev->pers->quiesce(mddev, 1); 4986 if (fd >= 0) 4987 err = bitmap_create(mddev); 4988 if (fd < 0 || err) { 4989 bitmap_destroy(mddev); 4990 fd = -1; /* make sure to put the file */ 4991 } 4992 mddev->pers->quiesce(mddev, 0); 4993 } 4994 if (fd < 0) { 4995 if (mddev->bitmap_file) { 4996 restore_bitmap_write_access(mddev->bitmap_file); 4997 fput(mddev->bitmap_file); 4998 } 4999 mddev->bitmap_file = NULL; 5000 } 5001 5002 return err; 5003 } 5004 5005 /* 5006 * set_array_info is used two different ways 5007 * The original usage is when creating a new array. 5008 * In this usage, raid_disks is > 0 and it together with 5009 * level, size, not_persistent,layout,chunksize determine the 5010 * shape of the array. 5011 * This will always create an array with a type-0.90.0 superblock. 5012 * The newer usage is when assembling an array. 5013 * In this case raid_disks will be 0, and the major_version field is 5014 * use to determine which style super-blocks are to be found on the devices. 5015 * The minor and patch _version numbers are also kept incase the 5016 * super_block handler wishes to interpret them. 5017 */ 5018 static int set_array_info(mddev_t * mddev, mdu_array_info_t *info) 5019 { 5020 5021 if (info->raid_disks == 0) { 5022 /* just setting version number for superblock loading */ 5023 if (info->major_version < 0 || 5024 info->major_version >= ARRAY_SIZE(super_types) || 5025 super_types[info->major_version].name == NULL) { 5026 /* maybe try to auto-load a module? */ 5027 printk(KERN_INFO 5028 "md: superblock version %d not known\n", 5029 info->major_version); 5030 return -EINVAL; 5031 } 5032 mddev->major_version = info->major_version; 5033 mddev->minor_version = info->minor_version; 5034 mddev->patch_version = info->patch_version; 5035 mddev->persistent = !info->not_persistent; 5036 return 0; 5037 } 5038 mddev->major_version = MD_MAJOR_VERSION; 5039 mddev->minor_version = MD_MINOR_VERSION; 5040 mddev->patch_version = MD_PATCHLEVEL_VERSION; 5041 mddev->ctime = get_seconds(); 5042 5043 mddev->level = info->level; 5044 mddev->clevel[0] = 0; 5045 mddev->dev_sectors = 2 * (sector_t)info->size; 5046 mddev->raid_disks = info->raid_disks; 5047 /* don't set md_minor, it is determined by which /dev/md* was 5048 * openned 5049 */ 5050 if (info->state & (1<<MD_SB_CLEAN)) 5051 mddev->recovery_cp = MaxSector; 5052 else 5053 mddev->recovery_cp = 0; 5054 mddev->persistent = ! info->not_persistent; 5055 mddev->external = 0; 5056 5057 mddev->layout = info->layout; 5058 mddev->chunk_size = info->chunk_size; 5059 5060 mddev->max_disks = MD_SB_DISKS; 5061 5062 if (mddev->persistent) 5063 mddev->flags = 0; 5064 set_bit(MD_CHANGE_DEVS, &mddev->flags); 5065 5066 mddev->default_bitmap_offset = MD_SB_BYTES >> 9; 5067 mddev->bitmap_offset = 0; 5068 5069 mddev->reshape_position = MaxSector; 5070 5071 /* 5072 * Generate a 128 bit UUID 5073 */ 5074 get_random_bytes(mddev->uuid, 16); 5075 5076 mddev->new_level = mddev->level; 5077 mddev->new_chunk = mddev->chunk_size; 5078 mddev->new_layout = mddev->layout; 5079 mddev->delta_disks = 0; 5080 5081 return 0; 5082 } 5083 5084 void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors) 5085 { 5086 WARN(!mddev_is_locked(mddev), "%s: unlocked mddev!\n", __func__); 5087 5088 if (mddev->external_size) 5089 return; 5090 5091 mddev->array_sectors = array_sectors; 5092 } 5093 EXPORT_SYMBOL(md_set_array_sectors); 5094 5095 static int update_size(mddev_t *mddev, sector_t num_sectors) 5096 { 5097 mdk_rdev_t *rdev; 5098 int rv; 5099 int fit = (num_sectors == 0); 5100 5101 if (mddev->pers->resize == NULL) 5102 return -EINVAL; 5103 /* The "num_sectors" is the number of sectors of each device that 5104 * is used. This can only make sense for arrays with redundancy. 5105 * linear and raid0 always use whatever space is available. We can only 5106 * consider changing this number if no resync or reconstruction is 5107 * happening, and if the new size is acceptable. It must fit before the 5108 * sb_start or, if that is <data_offset, it must fit before the size 5109 * of each device. If num_sectors is zero, we find the largest size 5110 * that fits. 5111 5112 */ 5113 if (mddev->sync_thread) 5114 return -EBUSY; 5115 if (mddev->bitmap) 5116 /* Sorry, cannot grow a bitmap yet, just remove it, 5117 * grow, and re-add. 5118 */ 5119 return -EBUSY; 5120 list_for_each_entry(rdev, &mddev->disks, same_set) { 5121 sector_t avail = rdev->sectors; 5122 5123 if (fit && (num_sectors == 0 || num_sectors > avail)) 5124 num_sectors = avail; 5125 if (avail < num_sectors) 5126 return -ENOSPC; 5127 } 5128 rv = mddev->pers->resize(mddev, num_sectors); 5129 if (!rv) { 5130 struct block_device *bdev; 5131 5132 bdev = bdget_disk(mddev->gendisk, 0); 5133 if (bdev) { 5134 mutex_lock(&bdev->bd_inode->i_mutex); 5135 i_size_write(bdev->bd_inode, 5136 (loff_t)mddev->array_sectors << 9); 5137 mutex_unlock(&bdev->bd_inode->i_mutex); 5138 bdput(bdev); 5139 } 5140 } 5141 return rv; 5142 } 5143 5144 static int update_raid_disks(mddev_t *mddev, int raid_disks) 5145 { 5146 int rv; 5147 /* change the number of raid disks */ 5148 if (mddev->pers->check_reshape == NULL) 5149 return -EINVAL; 5150 if (raid_disks <= 0 || 5151 raid_disks >= mddev->max_disks) 5152 return -EINVAL; 5153 if (mddev->sync_thread || mddev->reshape_position != MaxSector) 5154 return -EBUSY; 5155 mddev->delta_disks = raid_disks - mddev->raid_disks; 5156 5157 rv = mddev->pers->check_reshape(mddev); 5158 return rv; 5159 } 5160 5161 5162 /* 5163 * update_array_info is used to change the configuration of an 5164 * on-line array. 5165 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size 5166 * fields in the info are checked against the array. 5167 * Any differences that cannot be handled will cause an error. 5168 * Normally, only one change can be managed at a time. 5169 */ 5170 static int update_array_info(mddev_t *mddev, mdu_array_info_t *info) 5171 { 5172 int rv = 0; 5173 int cnt = 0; 5174 int state = 0; 5175 5176 /* calculate expected state,ignoring low bits */ 5177 if (mddev->bitmap && mddev->bitmap_offset) 5178 state |= (1 << MD_SB_BITMAP_PRESENT); 5179 5180 if (mddev->major_version != info->major_version || 5181 mddev->minor_version != info->minor_version || 5182 /* mddev->patch_version != info->patch_version || */ 5183 mddev->ctime != info->ctime || 5184 mddev->level != info->level || 5185 /* mddev->layout != info->layout || */ 5186 !mddev->persistent != info->not_persistent|| 5187 mddev->chunk_size != info->chunk_size || 5188 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */ 5189 ((state^info->state) & 0xfffffe00) 5190 ) 5191 return -EINVAL; 5192 /* Check there is only one change */ 5193 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) 5194 cnt++; 5195 if (mddev->raid_disks != info->raid_disks) 5196 cnt++; 5197 if (mddev->layout != info->layout) 5198 cnt++; 5199 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) 5200 cnt++; 5201 if (cnt == 0) 5202 return 0; 5203 if (cnt > 1) 5204 return -EINVAL; 5205 5206 if (mddev->layout != info->layout) { 5207 /* Change layout 5208 * we don't need to do anything at the md level, the 5209 * personality will take care of it all. 5210 */ 5211 if (mddev->pers->reconfig == NULL) 5212 return -EINVAL; 5213 else 5214 return mddev->pers->reconfig(mddev, info->layout, -1); 5215 } 5216 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) 5217 rv = update_size(mddev, (sector_t)info->size * 2); 5218 5219 if (mddev->raid_disks != info->raid_disks) 5220 rv = update_raid_disks(mddev, info->raid_disks); 5221 5222 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) { 5223 if (mddev->pers->quiesce == NULL) 5224 return -EINVAL; 5225 if (mddev->recovery || mddev->sync_thread) 5226 return -EBUSY; 5227 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) { 5228 /* add the bitmap */ 5229 if (mddev->bitmap) 5230 return -EEXIST; 5231 if (mddev->default_bitmap_offset == 0) 5232 return -EINVAL; 5233 mddev->bitmap_offset = mddev->default_bitmap_offset; 5234 mddev->pers->quiesce(mddev, 1); 5235 rv = bitmap_create(mddev); 5236 if (rv) 5237 bitmap_destroy(mddev); 5238 mddev->pers->quiesce(mddev, 0); 5239 } else { 5240 /* remove the bitmap */ 5241 if (!mddev->bitmap) 5242 return -ENOENT; 5243 if (mddev->bitmap->file) 5244 return -EINVAL; 5245 mddev->pers->quiesce(mddev, 1); 5246 bitmap_destroy(mddev); 5247 mddev->pers->quiesce(mddev, 0); 5248 mddev->bitmap_offset = 0; 5249 } 5250 } 5251 md_update_sb(mddev, 1); 5252 return rv; 5253 } 5254 5255 static int set_disk_faulty(mddev_t *mddev, dev_t dev) 5256 { 5257 mdk_rdev_t *rdev; 5258 5259 if (mddev->pers == NULL) 5260 return -ENODEV; 5261 5262 rdev = find_rdev(mddev, dev); 5263 if (!rdev) 5264 return -ENODEV; 5265 5266 md_error(mddev, rdev); 5267 return 0; 5268 } 5269 5270 /* 5271 * We have a problem here : there is no easy way to give a CHS 5272 * virtual geometry. We currently pretend that we have a 2 heads 5273 * 4 sectors (with a BIG number of cylinders...). This drives 5274 * dosfs just mad... ;-) 5275 */ 5276 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo) 5277 { 5278 mddev_t *mddev = bdev->bd_disk->private_data; 5279 5280 geo->heads = 2; 5281 geo->sectors = 4; 5282 geo->cylinders = get_capacity(mddev->gendisk) / 8; 5283 return 0; 5284 } 5285 5286 static int md_ioctl(struct block_device *bdev, fmode_t mode, 5287 unsigned int cmd, unsigned long arg) 5288 { 5289 int err = 0; 5290 void __user *argp = (void __user *)arg; 5291 mddev_t *mddev = NULL; 5292 5293 if (!capable(CAP_SYS_ADMIN)) 5294 return -EACCES; 5295 5296 /* 5297 * Commands dealing with the RAID driver but not any 5298 * particular array: 5299 */ 5300 switch (cmd) 5301 { 5302 case RAID_VERSION: 5303 err = get_version(argp); 5304 goto done; 5305 5306 case PRINT_RAID_DEBUG: 5307 err = 0; 5308 md_print_devices(); 5309 goto done; 5310 5311 #ifndef MODULE 5312 case RAID_AUTORUN: 5313 err = 0; 5314 autostart_arrays(arg); 5315 goto done; 5316 #endif 5317 default:; 5318 } 5319 5320 /* 5321 * Commands creating/starting a new array: 5322 */ 5323 5324 mddev = bdev->bd_disk->private_data; 5325 5326 if (!mddev) { 5327 BUG(); 5328 goto abort; 5329 } 5330 5331 err = mddev_lock(mddev); 5332 if (err) { 5333 printk(KERN_INFO 5334 "md: ioctl lock interrupted, reason %d, cmd %d\n", 5335 err, cmd); 5336 goto abort; 5337 } 5338 5339 switch (cmd) 5340 { 5341 case SET_ARRAY_INFO: 5342 { 5343 mdu_array_info_t info; 5344 if (!arg) 5345 memset(&info, 0, sizeof(info)); 5346 else if (copy_from_user(&info, argp, sizeof(info))) { 5347 err = -EFAULT; 5348 goto abort_unlock; 5349 } 5350 if (mddev->pers) { 5351 err = update_array_info(mddev, &info); 5352 if (err) { 5353 printk(KERN_WARNING "md: couldn't update" 5354 " array info. %d\n", err); 5355 goto abort_unlock; 5356 } 5357 goto done_unlock; 5358 } 5359 if (!list_empty(&mddev->disks)) { 5360 printk(KERN_WARNING 5361 "md: array %s already has disks!\n", 5362 mdname(mddev)); 5363 err = -EBUSY; 5364 goto abort_unlock; 5365 } 5366 if (mddev->raid_disks) { 5367 printk(KERN_WARNING 5368 "md: array %s already initialised!\n", 5369 mdname(mddev)); 5370 err = -EBUSY; 5371 goto abort_unlock; 5372 } 5373 err = set_array_info(mddev, &info); 5374 if (err) { 5375 printk(KERN_WARNING "md: couldn't set" 5376 " array info. %d\n", err); 5377 goto abort_unlock; 5378 } 5379 } 5380 goto done_unlock; 5381 5382 default:; 5383 } 5384 5385 /* 5386 * Commands querying/configuring an existing array: 5387 */ 5388 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY, 5389 * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */ 5390 if ((!mddev->raid_disks && !mddev->external) 5391 && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY 5392 && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE 5393 && cmd != GET_BITMAP_FILE) { 5394 err = -ENODEV; 5395 goto abort_unlock; 5396 } 5397 5398 /* 5399 * Commands even a read-only array can execute: 5400 */ 5401 switch (cmd) 5402 { 5403 case GET_ARRAY_INFO: 5404 err = get_array_info(mddev, argp); 5405 goto done_unlock; 5406 5407 case GET_BITMAP_FILE: 5408 err = get_bitmap_file(mddev, argp); 5409 goto done_unlock; 5410 5411 case GET_DISK_INFO: 5412 err = get_disk_info(mddev, argp); 5413 goto done_unlock; 5414 5415 case RESTART_ARRAY_RW: 5416 err = restart_array(mddev); 5417 goto done_unlock; 5418 5419 case STOP_ARRAY: 5420 err = do_md_stop(mddev, 0, 1); 5421 goto done_unlock; 5422 5423 case STOP_ARRAY_RO: 5424 err = do_md_stop(mddev, 1, 1); 5425 goto done_unlock; 5426 5427 } 5428 5429 /* 5430 * The remaining ioctls are changing the state of the 5431 * superblock, so we do not allow them on read-only arrays. 5432 * However non-MD ioctls (e.g. get-size) will still come through 5433 * here and hit the 'default' below, so only disallow 5434 * 'md' ioctls, and switch to rw mode if started auto-readonly. 5435 */ 5436 if (_IOC_TYPE(cmd) == MD_MAJOR && mddev->ro && mddev->pers) { 5437 if (mddev->ro == 2) { 5438 mddev->ro = 0; 5439 sysfs_notify_dirent(mddev->sysfs_state); 5440 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5441 md_wakeup_thread(mddev->thread); 5442 } else { 5443 err = -EROFS; 5444 goto abort_unlock; 5445 } 5446 } 5447 5448 switch (cmd) 5449 { 5450 case ADD_NEW_DISK: 5451 { 5452 mdu_disk_info_t info; 5453 if (copy_from_user(&info, argp, sizeof(info))) 5454 err = -EFAULT; 5455 else 5456 err = add_new_disk(mddev, &info); 5457 goto done_unlock; 5458 } 5459 5460 case HOT_REMOVE_DISK: 5461 err = hot_remove_disk(mddev, new_decode_dev(arg)); 5462 goto done_unlock; 5463 5464 case HOT_ADD_DISK: 5465 err = hot_add_disk(mddev, new_decode_dev(arg)); 5466 goto done_unlock; 5467 5468 case SET_DISK_FAULTY: 5469 err = set_disk_faulty(mddev, new_decode_dev(arg)); 5470 goto done_unlock; 5471 5472 case RUN_ARRAY: 5473 err = do_md_run(mddev); 5474 goto done_unlock; 5475 5476 case SET_BITMAP_FILE: 5477 err = set_bitmap_file(mddev, (int)arg); 5478 goto done_unlock; 5479 5480 default: 5481 err = -EINVAL; 5482 goto abort_unlock; 5483 } 5484 5485 done_unlock: 5486 abort_unlock: 5487 if (mddev->hold_active == UNTIL_IOCTL && 5488 err != -EINVAL) 5489 mddev->hold_active = 0; 5490 mddev_unlock(mddev); 5491 5492 return err; 5493 done: 5494 if (err) 5495 MD_BUG(); 5496 abort: 5497 return err; 5498 } 5499 5500 static int md_open(struct block_device *bdev, fmode_t mode) 5501 { 5502 /* 5503 * Succeed if we can lock the mddev, which confirms that 5504 * it isn't being stopped right now. 5505 */ 5506 mddev_t *mddev = mddev_find(bdev->bd_dev); 5507 int err; 5508 5509 if (mddev->gendisk != bdev->bd_disk) { 5510 /* we are racing with mddev_put which is discarding this 5511 * bd_disk. 5512 */ 5513 mddev_put(mddev); 5514 /* Wait until bdev->bd_disk is definitely gone */ 5515 flush_scheduled_work(); 5516 /* Then retry the open from the top */ 5517 return -ERESTARTSYS; 5518 } 5519 BUG_ON(mddev != bdev->bd_disk->private_data); 5520 5521 if ((err = mutex_lock_interruptible_nested(&mddev->reconfig_mutex, 1))) 5522 goto out; 5523 5524 err = 0; 5525 atomic_inc(&mddev->openers); 5526 mddev_unlock(mddev); 5527 5528 check_disk_change(bdev); 5529 out: 5530 return err; 5531 } 5532 5533 static int md_release(struct gendisk *disk, fmode_t mode) 5534 { 5535 mddev_t *mddev = disk->private_data; 5536 5537 BUG_ON(!mddev); 5538 atomic_dec(&mddev->openers); 5539 mddev_put(mddev); 5540 5541 return 0; 5542 } 5543 5544 static int md_media_changed(struct gendisk *disk) 5545 { 5546 mddev_t *mddev = disk->private_data; 5547 5548 return mddev->changed; 5549 } 5550 5551 static int md_revalidate(struct gendisk *disk) 5552 { 5553 mddev_t *mddev = disk->private_data; 5554 5555 mddev->changed = 0; 5556 return 0; 5557 } 5558 static struct block_device_operations md_fops = 5559 { 5560 .owner = THIS_MODULE, 5561 .open = md_open, 5562 .release = md_release, 5563 .locked_ioctl = md_ioctl, 5564 .getgeo = md_getgeo, 5565 .media_changed = md_media_changed, 5566 .revalidate_disk= md_revalidate, 5567 }; 5568 5569 static int md_thread(void * arg) 5570 { 5571 mdk_thread_t *thread = arg; 5572 5573 /* 5574 * md_thread is a 'system-thread', it's priority should be very 5575 * high. We avoid resource deadlocks individually in each 5576 * raid personality. (RAID5 does preallocation) We also use RR and 5577 * the very same RT priority as kswapd, thus we will never get 5578 * into a priority inversion deadlock. 5579 * 5580 * we definitely have to have equal or higher priority than 5581 * bdflush, otherwise bdflush will deadlock if there are too 5582 * many dirty RAID5 blocks. 5583 */ 5584 5585 allow_signal(SIGKILL); 5586 while (!kthread_should_stop()) { 5587 5588 /* We need to wait INTERRUPTIBLE so that 5589 * we don't add to the load-average. 5590 * That means we need to be sure no signals are 5591 * pending 5592 */ 5593 if (signal_pending(current)) 5594 flush_signals(current); 5595 5596 wait_event_interruptible_timeout 5597 (thread->wqueue, 5598 test_bit(THREAD_WAKEUP, &thread->flags) 5599 || kthread_should_stop(), 5600 thread->timeout); 5601 5602 clear_bit(THREAD_WAKEUP, &thread->flags); 5603 5604 thread->run(thread->mddev); 5605 } 5606 5607 return 0; 5608 } 5609 5610 void md_wakeup_thread(mdk_thread_t *thread) 5611 { 5612 if (thread) { 5613 dprintk("md: waking up MD thread %s.\n", thread->tsk->comm); 5614 set_bit(THREAD_WAKEUP, &thread->flags); 5615 wake_up(&thread->wqueue); 5616 } 5617 } 5618 5619 mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev, 5620 const char *name) 5621 { 5622 mdk_thread_t *thread; 5623 5624 thread = kzalloc(sizeof(mdk_thread_t), GFP_KERNEL); 5625 if (!thread) 5626 return NULL; 5627 5628 init_waitqueue_head(&thread->wqueue); 5629 5630 thread->run = run; 5631 thread->mddev = mddev; 5632 thread->timeout = MAX_SCHEDULE_TIMEOUT; 5633 thread->tsk = kthread_run(md_thread, thread, name, mdname(thread->mddev)); 5634 if (IS_ERR(thread->tsk)) { 5635 kfree(thread); 5636 return NULL; 5637 } 5638 return thread; 5639 } 5640 5641 void md_unregister_thread(mdk_thread_t *thread) 5642 { 5643 if (!thread) 5644 return; 5645 dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk)); 5646 5647 kthread_stop(thread->tsk); 5648 kfree(thread); 5649 } 5650 5651 void md_error(mddev_t *mddev, mdk_rdev_t *rdev) 5652 { 5653 if (!mddev) { 5654 MD_BUG(); 5655 return; 5656 } 5657 5658 if (!rdev || test_bit(Faulty, &rdev->flags)) 5659 return; 5660 5661 if (mddev->external) 5662 set_bit(Blocked, &rdev->flags); 5663 /* 5664 dprintk("md_error dev:%s, rdev:(%d:%d), (caller: %p,%p,%p,%p).\n", 5665 mdname(mddev), 5666 MAJOR(rdev->bdev->bd_dev), MINOR(rdev->bdev->bd_dev), 5667 __builtin_return_address(0),__builtin_return_address(1), 5668 __builtin_return_address(2),__builtin_return_address(3)); 5669 */ 5670 if (!mddev->pers) 5671 return; 5672 if (!mddev->pers->error_handler) 5673 return; 5674 mddev->pers->error_handler(mddev,rdev); 5675 if (mddev->degraded) 5676 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 5677 set_bit(StateChanged, &rdev->flags); 5678 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 5679 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5680 md_wakeup_thread(mddev->thread); 5681 md_new_event_inintr(mddev); 5682 } 5683 5684 /* seq_file implementation /proc/mdstat */ 5685 5686 static void status_unused(struct seq_file *seq) 5687 { 5688 int i = 0; 5689 mdk_rdev_t *rdev; 5690 5691 seq_printf(seq, "unused devices: "); 5692 5693 list_for_each_entry(rdev, &pending_raid_disks, same_set) { 5694 char b[BDEVNAME_SIZE]; 5695 i++; 5696 seq_printf(seq, "%s ", 5697 bdevname(rdev->bdev,b)); 5698 } 5699 if (!i) 5700 seq_printf(seq, "<none>"); 5701 5702 seq_printf(seq, "\n"); 5703 } 5704 5705 5706 static void status_resync(struct seq_file *seq, mddev_t * mddev) 5707 { 5708 sector_t max_blocks, resync, res; 5709 unsigned long dt, db, rt; 5710 int scale; 5711 unsigned int per_milli; 5712 5713 resync = (mddev->curr_resync - atomic_read(&mddev->recovery_active))/2; 5714 5715 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 5716 max_blocks = mddev->resync_max_sectors >> 1; 5717 else 5718 max_blocks = mddev->dev_sectors / 2; 5719 5720 /* 5721 * Should not happen. 5722 */ 5723 if (!max_blocks) { 5724 MD_BUG(); 5725 return; 5726 } 5727 /* Pick 'scale' such that (resync>>scale)*1000 will fit 5728 * in a sector_t, and (max_blocks>>scale) will fit in a 5729 * u32, as those are the requirements for sector_div. 5730 * Thus 'scale' must be at least 10 5731 */ 5732 scale = 10; 5733 if (sizeof(sector_t) > sizeof(unsigned long)) { 5734 while ( max_blocks/2 > (1ULL<<(scale+32))) 5735 scale++; 5736 } 5737 res = (resync>>scale)*1000; 5738 sector_div(res, (u32)((max_blocks>>scale)+1)); 5739 5740 per_milli = res; 5741 { 5742 int i, x = per_milli/50, y = 20-x; 5743 seq_printf(seq, "["); 5744 for (i = 0; i < x; i++) 5745 seq_printf(seq, "="); 5746 seq_printf(seq, ">"); 5747 for (i = 0; i < y; i++) 5748 seq_printf(seq, "."); 5749 seq_printf(seq, "] "); 5750 } 5751 seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)", 5752 (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)? 5753 "reshape" : 5754 (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)? 5755 "check" : 5756 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ? 5757 "resync" : "recovery"))), 5758 per_milli/10, per_milli % 10, 5759 (unsigned long long) resync, 5760 (unsigned long long) max_blocks); 5761 5762 /* 5763 * We do not want to overflow, so the order of operands and 5764 * the * 100 / 100 trick are important. We do a +1 to be 5765 * safe against division by zero. We only estimate anyway. 5766 * 5767 * dt: time from mark until now 5768 * db: blocks written from mark until now 5769 * rt: remaining time 5770 */ 5771 dt = ((jiffies - mddev->resync_mark) / HZ); 5772 if (!dt) dt++; 5773 db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active)) 5774 - mddev->resync_mark_cnt; 5775 rt = (dt * ((unsigned long)(max_blocks-resync) / (db/2/100+1)))/100; 5776 5777 seq_printf(seq, " finish=%lu.%lumin", rt / 60, (rt % 60)/6); 5778 5779 seq_printf(seq, " speed=%ldK/sec", db/2/dt); 5780 } 5781 5782 static void *md_seq_start(struct seq_file *seq, loff_t *pos) 5783 { 5784 struct list_head *tmp; 5785 loff_t l = *pos; 5786 mddev_t *mddev; 5787 5788 if (l >= 0x10000) 5789 return NULL; 5790 if (!l--) 5791 /* header */ 5792 return (void*)1; 5793 5794 spin_lock(&all_mddevs_lock); 5795 list_for_each(tmp,&all_mddevs) 5796 if (!l--) { 5797 mddev = list_entry(tmp, mddev_t, all_mddevs); 5798 mddev_get(mddev); 5799 spin_unlock(&all_mddevs_lock); 5800 return mddev; 5801 } 5802 spin_unlock(&all_mddevs_lock); 5803 if (!l--) 5804 return (void*)2;/* tail */ 5805 return NULL; 5806 } 5807 5808 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos) 5809 { 5810 struct list_head *tmp; 5811 mddev_t *next_mddev, *mddev = v; 5812 5813 ++*pos; 5814 if (v == (void*)2) 5815 return NULL; 5816 5817 spin_lock(&all_mddevs_lock); 5818 if (v == (void*)1) 5819 tmp = all_mddevs.next; 5820 else 5821 tmp = mddev->all_mddevs.next; 5822 if (tmp != &all_mddevs) 5823 next_mddev = mddev_get(list_entry(tmp,mddev_t,all_mddevs)); 5824 else { 5825 next_mddev = (void*)2; 5826 *pos = 0x10000; 5827 } 5828 spin_unlock(&all_mddevs_lock); 5829 5830 if (v != (void*)1) 5831 mddev_put(mddev); 5832 return next_mddev; 5833 5834 } 5835 5836 static void md_seq_stop(struct seq_file *seq, void *v) 5837 { 5838 mddev_t *mddev = v; 5839 5840 if (mddev && v != (void*)1 && v != (void*)2) 5841 mddev_put(mddev); 5842 } 5843 5844 struct mdstat_info { 5845 int event; 5846 }; 5847 5848 static int md_seq_show(struct seq_file *seq, void *v) 5849 { 5850 mddev_t *mddev = v; 5851 sector_t sectors; 5852 mdk_rdev_t *rdev; 5853 struct mdstat_info *mi = seq->private; 5854 struct bitmap *bitmap; 5855 5856 if (v == (void*)1) { 5857 struct mdk_personality *pers; 5858 seq_printf(seq, "Personalities : "); 5859 spin_lock(&pers_lock); 5860 list_for_each_entry(pers, &pers_list, list) 5861 seq_printf(seq, "[%s] ", pers->name); 5862 5863 spin_unlock(&pers_lock); 5864 seq_printf(seq, "\n"); 5865 mi->event = atomic_read(&md_event_count); 5866 return 0; 5867 } 5868 if (v == (void*)2) { 5869 status_unused(seq); 5870 return 0; 5871 } 5872 5873 if (mddev_lock(mddev) < 0) 5874 return -EINTR; 5875 5876 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) { 5877 seq_printf(seq, "%s : %sactive", mdname(mddev), 5878 mddev->pers ? "" : "in"); 5879 if (mddev->pers) { 5880 if (mddev->ro==1) 5881 seq_printf(seq, " (read-only)"); 5882 if (mddev->ro==2) 5883 seq_printf(seq, " (auto-read-only)"); 5884 seq_printf(seq, " %s", mddev->pers->name); 5885 } 5886 5887 sectors = 0; 5888 list_for_each_entry(rdev, &mddev->disks, same_set) { 5889 char b[BDEVNAME_SIZE]; 5890 seq_printf(seq, " %s[%d]", 5891 bdevname(rdev->bdev,b), rdev->desc_nr); 5892 if (test_bit(WriteMostly, &rdev->flags)) 5893 seq_printf(seq, "(W)"); 5894 if (test_bit(Faulty, &rdev->flags)) { 5895 seq_printf(seq, "(F)"); 5896 continue; 5897 } else if (rdev->raid_disk < 0) 5898 seq_printf(seq, "(S)"); /* spare */ 5899 sectors += rdev->sectors; 5900 } 5901 5902 if (!list_empty(&mddev->disks)) { 5903 if (mddev->pers) 5904 seq_printf(seq, "\n %llu blocks", 5905 (unsigned long long) 5906 mddev->array_sectors / 2); 5907 else 5908 seq_printf(seq, "\n %llu blocks", 5909 (unsigned long long)sectors / 2); 5910 } 5911 if (mddev->persistent) { 5912 if (mddev->major_version != 0 || 5913 mddev->minor_version != 90) { 5914 seq_printf(seq," super %d.%d", 5915 mddev->major_version, 5916 mddev->minor_version); 5917 } 5918 } else if (mddev->external) 5919 seq_printf(seq, " super external:%s", 5920 mddev->metadata_type); 5921 else 5922 seq_printf(seq, " super non-persistent"); 5923 5924 if (mddev->pers) { 5925 mddev->pers->status(seq, mddev); 5926 seq_printf(seq, "\n "); 5927 if (mddev->pers->sync_request) { 5928 if (mddev->curr_resync > 2) { 5929 status_resync(seq, mddev); 5930 seq_printf(seq, "\n "); 5931 } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2) 5932 seq_printf(seq, "\tresync=DELAYED\n "); 5933 else if (mddev->recovery_cp < MaxSector) 5934 seq_printf(seq, "\tresync=PENDING\n "); 5935 } 5936 } else 5937 seq_printf(seq, "\n "); 5938 5939 if ((bitmap = mddev->bitmap)) { 5940 unsigned long chunk_kb; 5941 unsigned long flags; 5942 spin_lock_irqsave(&bitmap->lock, flags); 5943 chunk_kb = bitmap->chunksize >> 10; 5944 seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], " 5945 "%lu%s chunk", 5946 bitmap->pages - bitmap->missing_pages, 5947 bitmap->pages, 5948 (bitmap->pages - bitmap->missing_pages) 5949 << (PAGE_SHIFT - 10), 5950 chunk_kb ? chunk_kb : bitmap->chunksize, 5951 chunk_kb ? "KB" : "B"); 5952 if (bitmap->file) { 5953 seq_printf(seq, ", file: "); 5954 seq_path(seq, &bitmap->file->f_path, " \t\n"); 5955 } 5956 5957 seq_printf(seq, "\n"); 5958 spin_unlock_irqrestore(&bitmap->lock, flags); 5959 } 5960 5961 seq_printf(seq, "\n"); 5962 } 5963 mddev_unlock(mddev); 5964 5965 return 0; 5966 } 5967 5968 static struct seq_operations md_seq_ops = { 5969 .start = md_seq_start, 5970 .next = md_seq_next, 5971 .stop = md_seq_stop, 5972 .show = md_seq_show, 5973 }; 5974 5975 static int md_seq_open(struct inode *inode, struct file *file) 5976 { 5977 int error; 5978 struct mdstat_info *mi = kmalloc(sizeof(*mi), GFP_KERNEL); 5979 if (mi == NULL) 5980 return -ENOMEM; 5981 5982 error = seq_open(file, &md_seq_ops); 5983 if (error) 5984 kfree(mi); 5985 else { 5986 struct seq_file *p = file->private_data; 5987 p->private = mi; 5988 mi->event = atomic_read(&md_event_count); 5989 } 5990 return error; 5991 } 5992 5993 static unsigned int mdstat_poll(struct file *filp, poll_table *wait) 5994 { 5995 struct seq_file *m = filp->private_data; 5996 struct mdstat_info *mi = m->private; 5997 int mask; 5998 5999 poll_wait(filp, &md_event_waiters, wait); 6000 6001 /* always allow read */ 6002 mask = POLLIN | POLLRDNORM; 6003 6004 if (mi->event != atomic_read(&md_event_count)) 6005 mask |= POLLERR | POLLPRI; 6006 return mask; 6007 } 6008 6009 static const struct file_operations md_seq_fops = { 6010 .owner = THIS_MODULE, 6011 .open = md_seq_open, 6012 .read = seq_read, 6013 .llseek = seq_lseek, 6014 .release = seq_release_private, 6015 .poll = mdstat_poll, 6016 }; 6017 6018 int register_md_personality(struct mdk_personality *p) 6019 { 6020 spin_lock(&pers_lock); 6021 list_add_tail(&p->list, &pers_list); 6022 printk(KERN_INFO "md: %s personality registered for level %d\n", p->name, p->level); 6023 spin_unlock(&pers_lock); 6024 return 0; 6025 } 6026 6027 int unregister_md_personality(struct mdk_personality *p) 6028 { 6029 printk(KERN_INFO "md: %s personality unregistered\n", p->name); 6030 spin_lock(&pers_lock); 6031 list_del_init(&p->list); 6032 spin_unlock(&pers_lock); 6033 return 0; 6034 } 6035 6036 static int is_mddev_idle(mddev_t *mddev, int init) 6037 { 6038 mdk_rdev_t * rdev; 6039 int idle; 6040 int curr_events; 6041 6042 idle = 1; 6043 rcu_read_lock(); 6044 rdev_for_each_rcu(rdev, mddev) { 6045 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk; 6046 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) + 6047 (int)part_stat_read(&disk->part0, sectors[1]) - 6048 atomic_read(&disk->sync_io); 6049 /* sync IO will cause sync_io to increase before the disk_stats 6050 * as sync_io is counted when a request starts, and 6051 * disk_stats is counted when it completes. 6052 * So resync activity will cause curr_events to be smaller than 6053 * when there was no such activity. 6054 * non-sync IO will cause disk_stat to increase without 6055 * increasing sync_io so curr_events will (eventually) 6056 * be larger than it was before. Once it becomes 6057 * substantially larger, the test below will cause 6058 * the array to appear non-idle, and resync will slow 6059 * down. 6060 * If there is a lot of outstanding resync activity when 6061 * we set last_event to curr_events, then all that activity 6062 * completing might cause the array to appear non-idle 6063 * and resync will be slowed down even though there might 6064 * not have been non-resync activity. This will only 6065 * happen once though. 'last_events' will soon reflect 6066 * the state where there is little or no outstanding 6067 * resync requests, and further resync activity will 6068 * always make curr_events less than last_events. 6069 * 6070 */ 6071 if (init || curr_events - rdev->last_events > 64) { 6072 rdev->last_events = curr_events; 6073 idle = 0; 6074 } 6075 } 6076 rcu_read_unlock(); 6077 return idle; 6078 } 6079 6080 void md_done_sync(mddev_t *mddev, int blocks, int ok) 6081 { 6082 /* another "blocks" (512byte) blocks have been synced */ 6083 atomic_sub(blocks, &mddev->recovery_active); 6084 wake_up(&mddev->recovery_wait); 6085 if (!ok) { 6086 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 6087 md_wakeup_thread(mddev->thread); 6088 // stop recovery, signal do_sync .... 6089 } 6090 } 6091 6092 6093 /* md_write_start(mddev, bi) 6094 * If we need to update some array metadata (e.g. 'active' flag 6095 * in superblock) before writing, schedule a superblock update 6096 * and wait for it to complete. 6097 */ 6098 void md_write_start(mddev_t *mddev, struct bio *bi) 6099 { 6100 int did_change = 0; 6101 if (bio_data_dir(bi) != WRITE) 6102 return; 6103 6104 BUG_ON(mddev->ro == 1); 6105 if (mddev->ro == 2) { 6106 /* need to switch to read/write */ 6107 mddev->ro = 0; 6108 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6109 md_wakeup_thread(mddev->thread); 6110 md_wakeup_thread(mddev->sync_thread); 6111 did_change = 1; 6112 } 6113 atomic_inc(&mddev->writes_pending); 6114 if (mddev->safemode == 1) 6115 mddev->safemode = 0; 6116 if (mddev->in_sync) { 6117 spin_lock_irq(&mddev->write_lock); 6118 if (mddev->in_sync) { 6119 mddev->in_sync = 0; 6120 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 6121 md_wakeup_thread(mddev->thread); 6122 did_change = 1; 6123 } 6124 spin_unlock_irq(&mddev->write_lock); 6125 } 6126 if (did_change) 6127 sysfs_notify_dirent(mddev->sysfs_state); 6128 wait_event(mddev->sb_wait, 6129 !test_bit(MD_CHANGE_CLEAN, &mddev->flags) && 6130 !test_bit(MD_CHANGE_PENDING, &mddev->flags)); 6131 } 6132 6133 void md_write_end(mddev_t *mddev) 6134 { 6135 if (atomic_dec_and_test(&mddev->writes_pending)) { 6136 if (mddev->safemode == 2) 6137 md_wakeup_thread(mddev->thread); 6138 else if (mddev->safemode_delay) 6139 mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay); 6140 } 6141 } 6142 6143 /* md_allow_write(mddev) 6144 * Calling this ensures that the array is marked 'active' so that writes 6145 * may proceed without blocking. It is important to call this before 6146 * attempting a GFP_KERNEL allocation while holding the mddev lock. 6147 * Must be called with mddev_lock held. 6148 * 6149 * In the ->external case MD_CHANGE_CLEAN can not be cleared until mddev->lock 6150 * is dropped, so return -EAGAIN after notifying userspace. 6151 */ 6152 int md_allow_write(mddev_t *mddev) 6153 { 6154 if (!mddev->pers) 6155 return 0; 6156 if (mddev->ro) 6157 return 0; 6158 if (!mddev->pers->sync_request) 6159 return 0; 6160 6161 spin_lock_irq(&mddev->write_lock); 6162 if (mddev->in_sync) { 6163 mddev->in_sync = 0; 6164 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 6165 if (mddev->safemode_delay && 6166 mddev->safemode == 0) 6167 mddev->safemode = 1; 6168 spin_unlock_irq(&mddev->write_lock); 6169 md_update_sb(mddev, 0); 6170 sysfs_notify_dirent(mddev->sysfs_state); 6171 } else 6172 spin_unlock_irq(&mddev->write_lock); 6173 6174 if (test_bit(MD_CHANGE_CLEAN, &mddev->flags)) 6175 return -EAGAIN; 6176 else 6177 return 0; 6178 } 6179 EXPORT_SYMBOL_GPL(md_allow_write); 6180 6181 #define SYNC_MARKS 10 6182 #define SYNC_MARK_STEP (3*HZ) 6183 void md_do_sync(mddev_t *mddev) 6184 { 6185 mddev_t *mddev2; 6186 unsigned int currspeed = 0, 6187 window; 6188 sector_t max_sectors,j, io_sectors; 6189 unsigned long mark[SYNC_MARKS]; 6190 sector_t mark_cnt[SYNC_MARKS]; 6191 int last_mark,m; 6192 struct list_head *tmp; 6193 sector_t last_check; 6194 int skipped = 0; 6195 mdk_rdev_t *rdev; 6196 char *desc; 6197 6198 /* just incase thread restarts... */ 6199 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) 6200 return; 6201 if (mddev->ro) /* never try to sync a read-only array */ 6202 return; 6203 6204 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 6205 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) 6206 desc = "data-check"; 6207 else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 6208 desc = "requested-resync"; 6209 else 6210 desc = "resync"; 6211 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 6212 desc = "reshape"; 6213 else 6214 desc = "recovery"; 6215 6216 /* we overload curr_resync somewhat here. 6217 * 0 == not engaged in resync at all 6218 * 2 == checking that there is no conflict with another sync 6219 * 1 == like 2, but have yielded to allow conflicting resync to 6220 * commense 6221 * other == active in resync - this many blocks 6222 * 6223 * Before starting a resync we must have set curr_resync to 6224 * 2, and then checked that every "conflicting" array has curr_resync 6225 * less than ours. When we find one that is the same or higher 6226 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync 6227 * to 1 if we choose to yield (based arbitrarily on address of mddev structure). 6228 * This will mean we have to start checking from the beginning again. 6229 * 6230 */ 6231 6232 do { 6233 mddev->curr_resync = 2; 6234 6235 try_again: 6236 if (kthread_should_stop()) { 6237 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 6238 goto skip; 6239 } 6240 for_each_mddev(mddev2, tmp) { 6241 if (mddev2 == mddev) 6242 continue; 6243 if (!mddev->parallel_resync 6244 && mddev2->curr_resync 6245 && match_mddev_units(mddev, mddev2)) { 6246 DEFINE_WAIT(wq); 6247 if (mddev < mddev2 && mddev->curr_resync == 2) { 6248 /* arbitrarily yield */ 6249 mddev->curr_resync = 1; 6250 wake_up(&resync_wait); 6251 } 6252 if (mddev > mddev2 && mddev->curr_resync == 1) 6253 /* no need to wait here, we can wait the next 6254 * time 'round when curr_resync == 2 6255 */ 6256 continue; 6257 /* We need to wait 'interruptible' so as not to 6258 * contribute to the load average, and not to 6259 * be caught by 'softlockup' 6260 */ 6261 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE); 6262 if (!kthread_should_stop() && 6263 mddev2->curr_resync >= mddev->curr_resync) { 6264 printk(KERN_INFO "md: delaying %s of %s" 6265 " until %s has finished (they" 6266 " share one or more physical units)\n", 6267 desc, mdname(mddev), mdname(mddev2)); 6268 mddev_put(mddev2); 6269 if (signal_pending(current)) 6270 flush_signals(current); 6271 schedule(); 6272 finish_wait(&resync_wait, &wq); 6273 goto try_again; 6274 } 6275 finish_wait(&resync_wait, &wq); 6276 } 6277 } 6278 } while (mddev->curr_resync < 2); 6279 6280 j = 0; 6281 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 6282 /* resync follows the size requested by the personality, 6283 * which defaults to physical size, but can be virtual size 6284 */ 6285 max_sectors = mddev->resync_max_sectors; 6286 mddev->resync_mismatches = 0; 6287 /* we don't use the checkpoint if there's a bitmap */ 6288 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 6289 j = mddev->resync_min; 6290 else if (!mddev->bitmap) 6291 j = mddev->recovery_cp; 6292 6293 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 6294 max_sectors = mddev->dev_sectors; 6295 else { 6296 /* recovery follows the physical size of devices */ 6297 max_sectors = mddev->dev_sectors; 6298 j = MaxSector; 6299 list_for_each_entry(rdev, &mddev->disks, same_set) 6300 if (rdev->raid_disk >= 0 && 6301 !test_bit(Faulty, &rdev->flags) && 6302 !test_bit(In_sync, &rdev->flags) && 6303 rdev->recovery_offset < j) 6304 j = rdev->recovery_offset; 6305 } 6306 6307 printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev)); 6308 printk(KERN_INFO "md: minimum _guaranteed_ speed:" 6309 " %d KB/sec/disk.\n", speed_min(mddev)); 6310 printk(KERN_INFO "md: using maximum available idle IO bandwidth " 6311 "(but not more than %d KB/sec) for %s.\n", 6312 speed_max(mddev), desc); 6313 6314 is_mddev_idle(mddev, 1); /* this initializes IO event counters */ 6315 6316 io_sectors = 0; 6317 for (m = 0; m < SYNC_MARKS; m++) { 6318 mark[m] = jiffies; 6319 mark_cnt[m] = io_sectors; 6320 } 6321 last_mark = 0; 6322 mddev->resync_mark = mark[last_mark]; 6323 mddev->resync_mark_cnt = mark_cnt[last_mark]; 6324 6325 /* 6326 * Tune reconstruction: 6327 */ 6328 window = 32*(PAGE_SIZE/512); 6329 printk(KERN_INFO "md: using %dk window, over a total of %llu blocks.\n", 6330 window/2,(unsigned long long) max_sectors/2); 6331 6332 atomic_set(&mddev->recovery_active, 0); 6333 last_check = 0; 6334 6335 if (j>2) { 6336 printk(KERN_INFO 6337 "md: resuming %s of %s from checkpoint.\n", 6338 desc, mdname(mddev)); 6339 mddev->curr_resync = j; 6340 } 6341 6342 while (j < max_sectors) { 6343 sector_t sectors; 6344 6345 skipped = 0; 6346 6347 if ((mddev->curr_resync > mddev->curr_resync_completed && 6348 (mddev->curr_resync - mddev->curr_resync_completed) 6349 > (max_sectors >> 4)) || 6350 (j - mddev->curr_resync_completed)*2 6351 >= mddev->resync_max - mddev->curr_resync_completed 6352 ) { 6353 /* time to update curr_resync_completed */ 6354 blk_unplug(mddev->queue); 6355 wait_event(mddev->recovery_wait, 6356 atomic_read(&mddev->recovery_active) == 0); 6357 mddev->curr_resync_completed = 6358 mddev->curr_resync; 6359 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 6360 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 6361 } 6362 6363 if (j >= mddev->resync_max) 6364 wait_event(mddev->recovery_wait, 6365 mddev->resync_max > j 6366 || kthread_should_stop()); 6367 6368 if (kthread_should_stop()) 6369 goto interrupted; 6370 6371 sectors = mddev->pers->sync_request(mddev, j, &skipped, 6372 currspeed < speed_min(mddev)); 6373 if (sectors == 0) { 6374 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 6375 goto out; 6376 } 6377 6378 if (!skipped) { /* actual IO requested */ 6379 io_sectors += sectors; 6380 atomic_add(sectors, &mddev->recovery_active); 6381 } 6382 6383 j += sectors; 6384 if (j>1) mddev->curr_resync = j; 6385 mddev->curr_mark_cnt = io_sectors; 6386 if (last_check == 0) 6387 /* this is the earliers that rebuilt will be 6388 * visible in /proc/mdstat 6389 */ 6390 md_new_event(mddev); 6391 6392 if (last_check + window > io_sectors || j == max_sectors) 6393 continue; 6394 6395 last_check = io_sectors; 6396 6397 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 6398 break; 6399 6400 repeat: 6401 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) { 6402 /* step marks */ 6403 int next = (last_mark+1) % SYNC_MARKS; 6404 6405 mddev->resync_mark = mark[next]; 6406 mddev->resync_mark_cnt = mark_cnt[next]; 6407 mark[next] = jiffies; 6408 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active); 6409 last_mark = next; 6410 } 6411 6412 6413 if (kthread_should_stop()) 6414 goto interrupted; 6415 6416 6417 /* 6418 * this loop exits only if either when we are slower than 6419 * the 'hard' speed limit, or the system was IO-idle for 6420 * a jiffy. 6421 * the system might be non-idle CPU-wise, but we only care 6422 * about not overloading the IO subsystem. (things like an 6423 * e2fsck being done on the RAID array should execute fast) 6424 */ 6425 blk_unplug(mddev->queue); 6426 cond_resched(); 6427 6428 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2 6429 /((jiffies-mddev->resync_mark)/HZ +1) +1; 6430 6431 if (currspeed > speed_min(mddev)) { 6432 if ((currspeed > speed_max(mddev)) || 6433 !is_mddev_idle(mddev, 0)) { 6434 msleep(500); 6435 goto repeat; 6436 } 6437 } 6438 } 6439 printk(KERN_INFO "md: %s: %s done.\n",mdname(mddev), desc); 6440 /* 6441 * this also signals 'finished resyncing' to md_stop 6442 */ 6443 out: 6444 blk_unplug(mddev->queue); 6445 6446 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); 6447 6448 /* tell personality that we are finished */ 6449 mddev->pers->sync_request(mddev, max_sectors, &skipped, 1); 6450 6451 if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) && 6452 mddev->curr_resync > 2) { 6453 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 6454 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 6455 if (mddev->curr_resync >= mddev->recovery_cp) { 6456 printk(KERN_INFO 6457 "md: checkpointing %s of %s.\n", 6458 desc, mdname(mddev)); 6459 mddev->recovery_cp = mddev->curr_resync; 6460 } 6461 } else 6462 mddev->recovery_cp = MaxSector; 6463 } else { 6464 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 6465 mddev->curr_resync = MaxSector; 6466 list_for_each_entry(rdev, &mddev->disks, same_set) 6467 if (rdev->raid_disk >= 0 && 6468 !test_bit(Faulty, &rdev->flags) && 6469 !test_bit(In_sync, &rdev->flags) && 6470 rdev->recovery_offset < mddev->curr_resync) 6471 rdev->recovery_offset = mddev->curr_resync; 6472 } 6473 } 6474 set_bit(MD_CHANGE_DEVS, &mddev->flags); 6475 6476 skip: 6477 mddev->curr_resync = 0; 6478 mddev->curr_resync_completed = 0; 6479 mddev->resync_min = 0; 6480 mddev->resync_max = MaxSector; 6481 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 6482 wake_up(&resync_wait); 6483 set_bit(MD_RECOVERY_DONE, &mddev->recovery); 6484 md_wakeup_thread(mddev->thread); 6485 return; 6486 6487 interrupted: 6488 /* 6489 * got a signal, exit. 6490 */ 6491 printk(KERN_INFO 6492 "md: md_do_sync() got signal ... exiting\n"); 6493 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 6494 goto out; 6495 6496 } 6497 EXPORT_SYMBOL_GPL(md_do_sync); 6498 6499 6500 static int remove_and_add_spares(mddev_t *mddev) 6501 { 6502 mdk_rdev_t *rdev; 6503 int spares = 0; 6504 6505 mddev->curr_resync_completed = 0; 6506 6507 list_for_each_entry(rdev, &mddev->disks, same_set) 6508 if (rdev->raid_disk >= 0 && 6509 !test_bit(Blocked, &rdev->flags) && 6510 (test_bit(Faulty, &rdev->flags) || 6511 ! test_bit(In_sync, &rdev->flags)) && 6512 atomic_read(&rdev->nr_pending)==0) { 6513 if (mddev->pers->hot_remove_disk( 6514 mddev, rdev->raid_disk)==0) { 6515 char nm[20]; 6516 sprintf(nm,"rd%d", rdev->raid_disk); 6517 sysfs_remove_link(&mddev->kobj, nm); 6518 rdev->raid_disk = -1; 6519 } 6520 } 6521 6522 if (mddev->degraded && ! mddev->ro && !mddev->recovery_disabled) { 6523 list_for_each_entry(rdev, &mddev->disks, same_set) { 6524 if (rdev->raid_disk >= 0 && 6525 !test_bit(In_sync, &rdev->flags) && 6526 !test_bit(Blocked, &rdev->flags)) 6527 spares++; 6528 if (rdev->raid_disk < 0 6529 && !test_bit(Faulty, &rdev->flags)) { 6530 rdev->recovery_offset = 0; 6531 if (mddev->pers-> 6532 hot_add_disk(mddev, rdev) == 0) { 6533 char nm[20]; 6534 sprintf(nm, "rd%d", rdev->raid_disk); 6535 if (sysfs_create_link(&mddev->kobj, 6536 &rdev->kobj, nm)) 6537 printk(KERN_WARNING 6538 "md: cannot register " 6539 "%s for %s\n", 6540 nm, mdname(mddev)); 6541 spares++; 6542 md_new_event(mddev); 6543 } else 6544 break; 6545 } 6546 } 6547 } 6548 return spares; 6549 } 6550 /* 6551 * This routine is regularly called by all per-raid-array threads to 6552 * deal with generic issues like resync and super-block update. 6553 * Raid personalities that don't have a thread (linear/raid0) do not 6554 * need this as they never do any recovery or update the superblock. 6555 * 6556 * It does not do any resync itself, but rather "forks" off other threads 6557 * to do that as needed. 6558 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in 6559 * "->recovery" and create a thread at ->sync_thread. 6560 * When the thread finishes it sets MD_RECOVERY_DONE 6561 * and wakeups up this thread which will reap the thread and finish up. 6562 * This thread also removes any faulty devices (with nr_pending == 0). 6563 * 6564 * The overall approach is: 6565 * 1/ if the superblock needs updating, update it. 6566 * 2/ If a recovery thread is running, don't do anything else. 6567 * 3/ If recovery has finished, clean up, possibly marking spares active. 6568 * 4/ If there are any faulty devices, remove them. 6569 * 5/ If array is degraded, try to add spares devices 6570 * 6/ If array has spares or is not in-sync, start a resync thread. 6571 */ 6572 void md_check_recovery(mddev_t *mddev) 6573 { 6574 mdk_rdev_t *rdev; 6575 6576 6577 if (mddev->bitmap) 6578 bitmap_daemon_work(mddev->bitmap); 6579 6580 if (mddev->ro) 6581 return; 6582 6583 if (signal_pending(current)) { 6584 if (mddev->pers->sync_request && !mddev->external) { 6585 printk(KERN_INFO "md: %s in immediate safe mode\n", 6586 mdname(mddev)); 6587 mddev->safemode = 2; 6588 } 6589 flush_signals(current); 6590 } 6591 6592 if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) 6593 return; 6594 if ( ! ( 6595 (mddev->flags && !mddev->external) || 6596 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || 6597 test_bit(MD_RECOVERY_DONE, &mddev->recovery) || 6598 (mddev->external == 0 && mddev->safemode == 1) || 6599 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending) 6600 && !mddev->in_sync && mddev->recovery_cp == MaxSector) 6601 )) 6602 return; 6603 6604 if (mddev_trylock(mddev)) { 6605 int spares = 0; 6606 6607 if (mddev->ro) { 6608 /* Only thing we do on a ro array is remove 6609 * failed devices. 6610 */ 6611 remove_and_add_spares(mddev); 6612 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6613 goto unlock; 6614 } 6615 6616 if (!mddev->external) { 6617 int did_change = 0; 6618 spin_lock_irq(&mddev->write_lock); 6619 if (mddev->safemode && 6620 !atomic_read(&mddev->writes_pending) && 6621 !mddev->in_sync && 6622 mddev->recovery_cp == MaxSector) { 6623 mddev->in_sync = 1; 6624 did_change = 1; 6625 if (mddev->persistent) 6626 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 6627 } 6628 if (mddev->safemode == 1) 6629 mddev->safemode = 0; 6630 spin_unlock_irq(&mddev->write_lock); 6631 if (did_change) 6632 sysfs_notify_dirent(mddev->sysfs_state); 6633 } 6634 6635 if (mddev->flags) 6636 md_update_sb(mddev, 0); 6637 6638 list_for_each_entry(rdev, &mddev->disks, same_set) 6639 if (test_and_clear_bit(StateChanged, &rdev->flags)) 6640 sysfs_notify_dirent(rdev->sysfs_state); 6641 6642 6643 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && 6644 !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) { 6645 /* resync/recovery still happening */ 6646 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6647 goto unlock; 6648 } 6649 if (mddev->sync_thread) { 6650 /* resync has finished, collect result */ 6651 md_unregister_thread(mddev->sync_thread); 6652 mddev->sync_thread = NULL; 6653 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 6654 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 6655 /* success...*/ 6656 /* activate any spares */ 6657 if (mddev->pers->spare_active(mddev)) 6658 sysfs_notify(&mddev->kobj, NULL, 6659 "degraded"); 6660 } 6661 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 6662 mddev->pers->finish_reshape) 6663 mddev->pers->finish_reshape(mddev); 6664 md_update_sb(mddev, 1); 6665 6666 /* if array is no-longer degraded, then any saved_raid_disk 6667 * information must be scrapped 6668 */ 6669 if (!mddev->degraded) 6670 list_for_each_entry(rdev, &mddev->disks, same_set) 6671 rdev->saved_raid_disk = -1; 6672 6673 mddev->recovery = 0; 6674 /* flag recovery needed just to double check */ 6675 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6676 sysfs_notify_dirent(mddev->sysfs_action); 6677 md_new_event(mddev); 6678 goto unlock; 6679 } 6680 /* Set RUNNING before clearing NEEDED to avoid 6681 * any transients in the value of "sync_action". 6682 */ 6683 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 6684 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6685 /* Clear some bits that don't mean anything, but 6686 * might be left set 6687 */ 6688 clear_bit(MD_RECOVERY_INTR, &mddev->recovery); 6689 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); 6690 6691 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) 6692 goto unlock; 6693 /* no recovery is running. 6694 * remove any failed drives, then 6695 * add spares if possible. 6696 * Spare are also removed and re-added, to allow 6697 * the personality to fail the re-add. 6698 */ 6699 6700 if (mddev->reshape_position != MaxSector) { 6701 if (mddev->pers->check_reshape(mddev) != 0) 6702 /* Cannot proceed */ 6703 goto unlock; 6704 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 6705 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 6706 } else if ((spares = remove_and_add_spares(mddev))) { 6707 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 6708 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 6709 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 6710 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 6711 } else if (mddev->recovery_cp < MaxSector) { 6712 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 6713 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 6714 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 6715 /* nothing to be done ... */ 6716 goto unlock; 6717 6718 if (mddev->pers->sync_request) { 6719 if (spares && mddev->bitmap && ! mddev->bitmap->file) { 6720 /* We are adding a device or devices to an array 6721 * which has the bitmap stored on all devices. 6722 * So make sure all bitmap pages get written 6723 */ 6724 bitmap_write_all(mddev->bitmap); 6725 } 6726 mddev->sync_thread = md_register_thread(md_do_sync, 6727 mddev, 6728 "%s_resync"); 6729 if (!mddev->sync_thread) { 6730 printk(KERN_ERR "%s: could not start resync" 6731 " thread...\n", 6732 mdname(mddev)); 6733 /* leave the spares where they are, it shouldn't hurt */ 6734 mddev->recovery = 0; 6735 } else 6736 md_wakeup_thread(mddev->sync_thread); 6737 sysfs_notify_dirent(mddev->sysfs_action); 6738 md_new_event(mddev); 6739 } 6740 unlock: 6741 if (!mddev->sync_thread) { 6742 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 6743 if (test_and_clear_bit(MD_RECOVERY_RECOVER, 6744 &mddev->recovery)) 6745 if (mddev->sysfs_action) 6746 sysfs_notify_dirent(mddev->sysfs_action); 6747 } 6748 mddev_unlock(mddev); 6749 } 6750 } 6751 6752 void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev) 6753 { 6754 sysfs_notify_dirent(rdev->sysfs_state); 6755 wait_event_timeout(rdev->blocked_wait, 6756 !test_bit(Blocked, &rdev->flags), 6757 msecs_to_jiffies(5000)); 6758 rdev_dec_pending(rdev, mddev); 6759 } 6760 EXPORT_SYMBOL(md_wait_for_blocked_rdev); 6761 6762 static int md_notify_reboot(struct notifier_block *this, 6763 unsigned long code, void *x) 6764 { 6765 struct list_head *tmp; 6766 mddev_t *mddev; 6767 6768 if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) { 6769 6770 printk(KERN_INFO "md: stopping all md devices.\n"); 6771 6772 for_each_mddev(mddev, tmp) 6773 if (mddev_trylock(mddev)) { 6774 /* Force a switch to readonly even array 6775 * appears to still be in use. Hence 6776 * the '100'. 6777 */ 6778 do_md_stop(mddev, 1, 100); 6779 mddev_unlock(mddev); 6780 } 6781 /* 6782 * certain more exotic SCSI devices are known to be 6783 * volatile wrt too early system reboots. While the 6784 * right place to handle this issue is the given 6785 * driver, we do want to have a safe RAID driver ... 6786 */ 6787 mdelay(1000*1); 6788 } 6789 return NOTIFY_DONE; 6790 } 6791 6792 static struct notifier_block md_notifier = { 6793 .notifier_call = md_notify_reboot, 6794 .next = NULL, 6795 .priority = INT_MAX, /* before any real devices */ 6796 }; 6797 6798 static void md_geninit(void) 6799 { 6800 dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t)); 6801 6802 proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops); 6803 } 6804 6805 static int __init md_init(void) 6806 { 6807 if (register_blkdev(MD_MAJOR, "md")) 6808 return -1; 6809 if ((mdp_major=register_blkdev(0, "mdp"))<=0) { 6810 unregister_blkdev(MD_MAJOR, "md"); 6811 return -1; 6812 } 6813 blk_register_region(MKDEV(MD_MAJOR, 0), 1UL<<MINORBITS, THIS_MODULE, 6814 md_probe, NULL, NULL); 6815 blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE, 6816 md_probe, NULL, NULL); 6817 6818 register_reboot_notifier(&md_notifier); 6819 raid_table_header = register_sysctl_table(raid_root_table); 6820 6821 md_geninit(); 6822 return 0; 6823 } 6824 6825 6826 #ifndef MODULE 6827 6828 /* 6829 * Searches all registered partitions for autorun RAID arrays 6830 * at boot time. 6831 */ 6832 6833 static LIST_HEAD(all_detected_devices); 6834 struct detected_devices_node { 6835 struct list_head list; 6836 dev_t dev; 6837 }; 6838 6839 void md_autodetect_dev(dev_t dev) 6840 { 6841 struct detected_devices_node *node_detected_dev; 6842 6843 node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL); 6844 if (node_detected_dev) { 6845 node_detected_dev->dev = dev; 6846 list_add_tail(&node_detected_dev->list, &all_detected_devices); 6847 } else { 6848 printk(KERN_CRIT "md: md_autodetect_dev: kzalloc failed" 6849 ", skipping dev(%d,%d)\n", MAJOR(dev), MINOR(dev)); 6850 } 6851 } 6852 6853 6854 static void autostart_arrays(int part) 6855 { 6856 mdk_rdev_t *rdev; 6857 struct detected_devices_node *node_detected_dev; 6858 dev_t dev; 6859 int i_scanned, i_passed; 6860 6861 i_scanned = 0; 6862 i_passed = 0; 6863 6864 printk(KERN_INFO "md: Autodetecting RAID arrays.\n"); 6865 6866 while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) { 6867 i_scanned++; 6868 node_detected_dev = list_entry(all_detected_devices.next, 6869 struct detected_devices_node, list); 6870 list_del(&node_detected_dev->list); 6871 dev = node_detected_dev->dev; 6872 kfree(node_detected_dev); 6873 rdev = md_import_device(dev,0, 90); 6874 if (IS_ERR(rdev)) 6875 continue; 6876 6877 if (test_bit(Faulty, &rdev->flags)) { 6878 MD_BUG(); 6879 continue; 6880 } 6881 set_bit(AutoDetected, &rdev->flags); 6882 list_add(&rdev->same_set, &pending_raid_disks); 6883 i_passed++; 6884 } 6885 6886 printk(KERN_INFO "md: Scanned %d and added %d devices.\n", 6887 i_scanned, i_passed); 6888 6889 autorun_devices(part); 6890 } 6891 6892 #endif /* !MODULE */ 6893 6894 static __exit void md_exit(void) 6895 { 6896 mddev_t *mddev; 6897 struct list_head *tmp; 6898 6899 blk_unregister_region(MKDEV(MD_MAJOR,0), 1U << MINORBITS); 6900 blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS); 6901 6902 unregister_blkdev(MD_MAJOR,"md"); 6903 unregister_blkdev(mdp_major, "mdp"); 6904 unregister_reboot_notifier(&md_notifier); 6905 unregister_sysctl_table(raid_table_header); 6906 remove_proc_entry("mdstat", NULL); 6907 for_each_mddev(mddev, tmp) { 6908 export_array(mddev); 6909 mddev->hold_active = 0; 6910 } 6911 } 6912 6913 subsys_initcall(md_init); 6914 module_exit(md_exit) 6915 6916 static int get_ro(char *buffer, struct kernel_param *kp) 6917 { 6918 return sprintf(buffer, "%d", start_readonly); 6919 } 6920 static int set_ro(const char *val, struct kernel_param *kp) 6921 { 6922 char *e; 6923 int num = simple_strtoul(val, &e, 10); 6924 if (*val && (*e == '\0' || *e == '\n')) { 6925 start_readonly = num; 6926 return 0; 6927 } 6928 return -EINVAL; 6929 } 6930 6931 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR); 6932 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR); 6933 6934 module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR); 6935 6936 EXPORT_SYMBOL(register_md_personality); 6937 EXPORT_SYMBOL(unregister_md_personality); 6938 EXPORT_SYMBOL(md_error); 6939 EXPORT_SYMBOL(md_done_sync); 6940 EXPORT_SYMBOL(md_write_start); 6941 EXPORT_SYMBOL(md_write_end); 6942 EXPORT_SYMBOL(md_register_thread); 6943 EXPORT_SYMBOL(md_unregister_thread); 6944 EXPORT_SYMBOL(md_wakeup_thread); 6945 EXPORT_SYMBOL(md_check_recovery); 6946 MODULE_LICENSE("GPL"); 6947 MODULE_ALIAS("md"); 6948 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR); 6949