1 /* 2 md.c : Multiple Devices driver for Linux 3 Copyright (C) 1998, 1999, 2000 Ingo Molnar 4 5 completely rewritten, based on the MD driver code from Marc Zyngier 6 7 Changes: 8 9 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar 10 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com> 11 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net> 12 - kerneld support by Boris Tobotras <boris@xtalk.msk.su> 13 - kmod support by: Cyrus Durgin 14 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com> 15 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au> 16 17 - lots of fixes and improvements to the RAID1/RAID5 and generic 18 RAID code (such as request based resynchronization): 19 20 Neil Brown <neilb@cse.unsw.edu.au>. 21 22 - persistent bitmap code 23 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc. 24 25 This program is free software; you can redistribute it and/or modify 26 it under the terms of the GNU General Public License as published by 27 the Free Software Foundation; either version 2, or (at your option) 28 any later version. 29 30 You should have received a copy of the GNU General Public License 31 (for example /usr/src/linux/COPYING); if not, write to the Free 32 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 33 */ 34 35 #include <linux/kthread.h> 36 #include <linux/blkdev.h> 37 #include <linux/sysctl.h> 38 #include <linux/seq_file.h> 39 #include <linux/mutex.h> 40 #include <linux/buffer_head.h> /* for invalidate_bdev */ 41 #include <linux/poll.h> 42 #include <linux/ctype.h> 43 #include <linux/string.h> 44 #include <linux/hdreg.h> 45 #include <linux/proc_fs.h> 46 #include <linux/random.h> 47 #include <linux/reboot.h> 48 #include <linux/file.h> 49 #include <linux/compat.h> 50 #include <linux/delay.h> 51 #include <linux/raid/md_p.h> 52 #include <linux/raid/md_u.h> 53 #include <linux/slab.h> 54 #include "md.h" 55 #include "bitmap.h" 56 57 #ifndef MODULE 58 static void autostart_arrays(int part); 59 #endif 60 61 /* pers_list is a list of registered personalities protected 62 * by pers_lock. 63 * pers_lock does extra service to protect accesses to 64 * mddev->thread when the mutex cannot be held. 65 */ 66 static LIST_HEAD(pers_list); 67 static DEFINE_SPINLOCK(pers_lock); 68 69 static void md_print_devices(void); 70 71 static DECLARE_WAIT_QUEUE_HEAD(resync_wait); 72 static struct workqueue_struct *md_wq; 73 static struct workqueue_struct *md_misc_wq; 74 75 #define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); } 76 77 /* 78 * Default number of read corrections we'll attempt on an rdev 79 * before ejecting it from the array. We divide the read error 80 * count by 2 for every hour elapsed between read errors. 81 */ 82 #define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20 83 /* 84 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit' 85 * is 1000 KB/sec, so the extra system load does not show up that much. 86 * Increase it if you want to have more _guaranteed_ speed. Note that 87 * the RAID driver will use the maximum available bandwidth if the IO 88 * subsystem is idle. There is also an 'absolute maximum' reconstruction 89 * speed limit - in case reconstruction slows down your system despite 90 * idle IO detection. 91 * 92 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max. 93 * or /sys/block/mdX/md/sync_speed_{min,max} 94 */ 95 96 static int sysctl_speed_limit_min = 1000; 97 static int sysctl_speed_limit_max = 200000; 98 static inline int speed_min(struct mddev *mddev) 99 { 100 return mddev->sync_speed_min ? 101 mddev->sync_speed_min : sysctl_speed_limit_min; 102 } 103 104 static inline int speed_max(struct mddev *mddev) 105 { 106 return mddev->sync_speed_max ? 107 mddev->sync_speed_max : sysctl_speed_limit_max; 108 } 109 110 static struct ctl_table_header *raid_table_header; 111 112 static ctl_table raid_table[] = { 113 { 114 .procname = "speed_limit_min", 115 .data = &sysctl_speed_limit_min, 116 .maxlen = sizeof(int), 117 .mode = S_IRUGO|S_IWUSR, 118 .proc_handler = proc_dointvec, 119 }, 120 { 121 .procname = "speed_limit_max", 122 .data = &sysctl_speed_limit_max, 123 .maxlen = sizeof(int), 124 .mode = S_IRUGO|S_IWUSR, 125 .proc_handler = proc_dointvec, 126 }, 127 { } 128 }; 129 130 static ctl_table raid_dir_table[] = { 131 { 132 .procname = "raid", 133 .maxlen = 0, 134 .mode = S_IRUGO|S_IXUGO, 135 .child = raid_table, 136 }, 137 { } 138 }; 139 140 static ctl_table raid_root_table[] = { 141 { 142 .procname = "dev", 143 .maxlen = 0, 144 .mode = 0555, 145 .child = raid_dir_table, 146 }, 147 { } 148 }; 149 150 static const struct block_device_operations md_fops; 151 152 static int start_readonly; 153 154 /* bio_clone_mddev 155 * like bio_clone, but with a local bio set 156 */ 157 158 static void mddev_bio_destructor(struct bio *bio) 159 { 160 struct mddev *mddev, **mddevp; 161 162 mddevp = (void*)bio; 163 mddev = mddevp[-1]; 164 165 bio_free(bio, mddev->bio_set); 166 } 167 168 struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, 169 struct mddev *mddev) 170 { 171 struct bio *b; 172 struct mddev **mddevp; 173 174 if (!mddev || !mddev->bio_set) 175 return bio_alloc(gfp_mask, nr_iovecs); 176 177 b = bio_alloc_bioset(gfp_mask, nr_iovecs, 178 mddev->bio_set); 179 if (!b) 180 return NULL; 181 mddevp = (void*)b; 182 mddevp[-1] = mddev; 183 b->bi_destructor = mddev_bio_destructor; 184 return b; 185 } 186 EXPORT_SYMBOL_GPL(bio_alloc_mddev); 187 188 struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask, 189 struct mddev *mddev) 190 { 191 struct bio *b; 192 struct mddev **mddevp; 193 194 if (!mddev || !mddev->bio_set) 195 return bio_clone(bio, gfp_mask); 196 197 b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, 198 mddev->bio_set); 199 if (!b) 200 return NULL; 201 mddevp = (void*)b; 202 mddevp[-1] = mddev; 203 b->bi_destructor = mddev_bio_destructor; 204 __bio_clone(b, bio); 205 if (bio_integrity(bio)) { 206 int ret; 207 208 ret = bio_integrity_clone(b, bio, gfp_mask, mddev->bio_set); 209 210 if (ret < 0) { 211 bio_put(b); 212 return NULL; 213 } 214 } 215 216 return b; 217 } 218 EXPORT_SYMBOL_GPL(bio_clone_mddev); 219 220 void md_trim_bio(struct bio *bio, int offset, int size) 221 { 222 /* 'bio' is a cloned bio which we need to trim to match 223 * the given offset and size. 224 * This requires adjusting bi_sector, bi_size, and bi_io_vec 225 */ 226 int i; 227 struct bio_vec *bvec; 228 int sofar = 0; 229 230 size <<= 9; 231 if (offset == 0 && size == bio->bi_size) 232 return; 233 234 bio->bi_sector += offset; 235 bio->bi_size = size; 236 offset <<= 9; 237 clear_bit(BIO_SEG_VALID, &bio->bi_flags); 238 239 while (bio->bi_idx < bio->bi_vcnt && 240 bio->bi_io_vec[bio->bi_idx].bv_len <= offset) { 241 /* remove this whole bio_vec */ 242 offset -= bio->bi_io_vec[bio->bi_idx].bv_len; 243 bio->bi_idx++; 244 } 245 if (bio->bi_idx < bio->bi_vcnt) { 246 bio->bi_io_vec[bio->bi_idx].bv_offset += offset; 247 bio->bi_io_vec[bio->bi_idx].bv_len -= offset; 248 } 249 /* avoid any complications with bi_idx being non-zero*/ 250 if (bio->bi_idx) { 251 memmove(bio->bi_io_vec, bio->bi_io_vec+bio->bi_idx, 252 (bio->bi_vcnt - bio->bi_idx) * sizeof(struct bio_vec)); 253 bio->bi_vcnt -= bio->bi_idx; 254 bio->bi_idx = 0; 255 } 256 /* Make sure vcnt and last bv are not too big */ 257 bio_for_each_segment(bvec, bio, i) { 258 if (sofar + bvec->bv_len > size) 259 bvec->bv_len = size - sofar; 260 if (bvec->bv_len == 0) { 261 bio->bi_vcnt = i; 262 break; 263 } 264 sofar += bvec->bv_len; 265 } 266 } 267 EXPORT_SYMBOL_GPL(md_trim_bio); 268 269 /* 270 * We have a system wide 'event count' that is incremented 271 * on any 'interesting' event, and readers of /proc/mdstat 272 * can use 'poll' or 'select' to find out when the event 273 * count increases. 274 * 275 * Events are: 276 * start array, stop array, error, add device, remove device, 277 * start build, activate spare 278 */ 279 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters); 280 static atomic_t md_event_count; 281 void md_new_event(struct mddev *mddev) 282 { 283 atomic_inc(&md_event_count); 284 wake_up(&md_event_waiters); 285 } 286 EXPORT_SYMBOL_GPL(md_new_event); 287 288 /* Alternate version that can be called from interrupts 289 * when calling sysfs_notify isn't needed. 290 */ 291 static void md_new_event_inintr(struct mddev *mddev) 292 { 293 atomic_inc(&md_event_count); 294 wake_up(&md_event_waiters); 295 } 296 297 /* 298 * Enables to iterate over all existing md arrays 299 * all_mddevs_lock protects this list. 300 */ 301 static LIST_HEAD(all_mddevs); 302 static DEFINE_SPINLOCK(all_mddevs_lock); 303 304 305 /* 306 * iterates through all used mddevs in the system. 307 * We take care to grab the all_mddevs_lock whenever navigating 308 * the list, and to always hold a refcount when unlocked. 309 * Any code which breaks out of this loop while own 310 * a reference to the current mddev and must mddev_put it. 311 */ 312 #define for_each_mddev(_mddev,_tmp) \ 313 \ 314 for (({ spin_lock(&all_mddevs_lock); \ 315 _tmp = all_mddevs.next; \ 316 _mddev = NULL;}); \ 317 ({ if (_tmp != &all_mddevs) \ 318 mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\ 319 spin_unlock(&all_mddevs_lock); \ 320 if (_mddev) mddev_put(_mddev); \ 321 _mddev = list_entry(_tmp, struct mddev, all_mddevs); \ 322 _tmp != &all_mddevs;}); \ 323 ({ spin_lock(&all_mddevs_lock); \ 324 _tmp = _tmp->next;}) \ 325 ) 326 327 328 /* Rather than calling directly into the personality make_request function, 329 * IO requests come here first so that we can check if the device is 330 * being suspended pending a reconfiguration. 331 * We hold a refcount over the call to ->make_request. By the time that 332 * call has finished, the bio has been linked into some internal structure 333 * and so is visible to ->quiesce(), so we don't need the refcount any more. 334 */ 335 static int md_make_request(struct request_queue *q, struct bio *bio) 336 { 337 const int rw = bio_data_dir(bio); 338 struct mddev *mddev = q->queuedata; 339 int rv; 340 int cpu; 341 unsigned int sectors; 342 343 if (mddev == NULL || mddev->pers == NULL 344 || !mddev->ready) { 345 bio_io_error(bio); 346 return 0; 347 } 348 smp_rmb(); /* Ensure implications of 'active' are visible */ 349 rcu_read_lock(); 350 if (mddev->suspended) { 351 DEFINE_WAIT(__wait); 352 for (;;) { 353 prepare_to_wait(&mddev->sb_wait, &__wait, 354 TASK_UNINTERRUPTIBLE); 355 if (!mddev->suspended) 356 break; 357 rcu_read_unlock(); 358 schedule(); 359 rcu_read_lock(); 360 } 361 finish_wait(&mddev->sb_wait, &__wait); 362 } 363 atomic_inc(&mddev->active_io); 364 rcu_read_unlock(); 365 366 /* 367 * save the sectors now since our bio can 368 * go away inside make_request 369 */ 370 sectors = bio_sectors(bio); 371 rv = mddev->pers->make_request(mddev, bio); 372 373 cpu = part_stat_lock(); 374 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); 375 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors); 376 part_stat_unlock(); 377 378 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) 379 wake_up(&mddev->sb_wait); 380 381 return rv; 382 } 383 384 /* mddev_suspend makes sure no new requests are submitted 385 * to the device, and that any requests that have been submitted 386 * are completely handled. 387 * Once ->stop is called and completes, the module will be completely 388 * unused. 389 */ 390 void mddev_suspend(struct mddev *mddev) 391 { 392 BUG_ON(mddev->suspended); 393 mddev->suspended = 1; 394 synchronize_rcu(); 395 wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0); 396 mddev->pers->quiesce(mddev, 1); 397 } 398 EXPORT_SYMBOL_GPL(mddev_suspend); 399 400 void mddev_resume(struct mddev *mddev) 401 { 402 mddev->suspended = 0; 403 wake_up(&mddev->sb_wait); 404 mddev->pers->quiesce(mddev, 0); 405 406 md_wakeup_thread(mddev->thread); 407 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ 408 } 409 EXPORT_SYMBOL_GPL(mddev_resume); 410 411 int mddev_congested(struct mddev *mddev, int bits) 412 { 413 return mddev->suspended; 414 } 415 EXPORT_SYMBOL(mddev_congested); 416 417 /* 418 * Generic flush handling for md 419 */ 420 421 static void md_end_flush(struct bio *bio, int err) 422 { 423 struct md_rdev *rdev = bio->bi_private; 424 struct mddev *mddev = rdev->mddev; 425 426 rdev_dec_pending(rdev, mddev); 427 428 if (atomic_dec_and_test(&mddev->flush_pending)) { 429 /* The pre-request flush has finished */ 430 queue_work(md_wq, &mddev->flush_work); 431 } 432 bio_put(bio); 433 } 434 435 static void md_submit_flush_data(struct work_struct *ws); 436 437 static void submit_flushes(struct work_struct *ws) 438 { 439 struct mddev *mddev = container_of(ws, struct mddev, flush_work); 440 struct md_rdev *rdev; 441 442 INIT_WORK(&mddev->flush_work, md_submit_flush_data); 443 atomic_set(&mddev->flush_pending, 1); 444 rcu_read_lock(); 445 list_for_each_entry_rcu(rdev, &mddev->disks, same_set) 446 if (rdev->raid_disk >= 0 && 447 !test_bit(Faulty, &rdev->flags)) { 448 /* Take two references, one is dropped 449 * when request finishes, one after 450 * we reclaim rcu_read_lock 451 */ 452 struct bio *bi; 453 atomic_inc(&rdev->nr_pending); 454 atomic_inc(&rdev->nr_pending); 455 rcu_read_unlock(); 456 bi = bio_alloc_mddev(GFP_KERNEL, 0, mddev); 457 bi->bi_end_io = md_end_flush; 458 bi->bi_private = rdev; 459 bi->bi_bdev = rdev->bdev; 460 atomic_inc(&mddev->flush_pending); 461 submit_bio(WRITE_FLUSH, bi); 462 rcu_read_lock(); 463 rdev_dec_pending(rdev, mddev); 464 } 465 rcu_read_unlock(); 466 if (atomic_dec_and_test(&mddev->flush_pending)) 467 queue_work(md_wq, &mddev->flush_work); 468 } 469 470 static void md_submit_flush_data(struct work_struct *ws) 471 { 472 struct mddev *mddev = container_of(ws, struct mddev, flush_work); 473 struct bio *bio = mddev->flush_bio; 474 475 if (bio->bi_size == 0) 476 /* an empty barrier - all done */ 477 bio_endio(bio, 0); 478 else { 479 bio->bi_rw &= ~REQ_FLUSH; 480 if (mddev->pers->make_request(mddev, bio)) 481 generic_make_request(bio); 482 } 483 484 mddev->flush_bio = NULL; 485 wake_up(&mddev->sb_wait); 486 } 487 488 void md_flush_request(struct mddev *mddev, struct bio *bio) 489 { 490 spin_lock_irq(&mddev->write_lock); 491 wait_event_lock_irq(mddev->sb_wait, 492 !mddev->flush_bio, 493 mddev->write_lock, /*nothing*/); 494 mddev->flush_bio = bio; 495 spin_unlock_irq(&mddev->write_lock); 496 497 INIT_WORK(&mddev->flush_work, submit_flushes); 498 queue_work(md_wq, &mddev->flush_work); 499 } 500 EXPORT_SYMBOL(md_flush_request); 501 502 /* Support for plugging. 503 * This mirrors the plugging support in request_queue, but does not 504 * require having a whole queue or request structures. 505 * We allocate an md_plug_cb for each md device and each thread it gets 506 * plugged on. This links tot the private plug_handle structure in the 507 * personality data where we keep a count of the number of outstanding 508 * plugs so other code can see if a plug is active. 509 */ 510 struct md_plug_cb { 511 struct blk_plug_cb cb; 512 struct mddev *mddev; 513 }; 514 515 static void plugger_unplug(struct blk_plug_cb *cb) 516 { 517 struct md_plug_cb *mdcb = container_of(cb, struct md_plug_cb, cb); 518 if (atomic_dec_and_test(&mdcb->mddev->plug_cnt)) 519 md_wakeup_thread(mdcb->mddev->thread); 520 kfree(mdcb); 521 } 522 523 /* Check that an unplug wakeup will come shortly. 524 * If not, wakeup the md thread immediately 525 */ 526 int mddev_check_plugged(struct mddev *mddev) 527 { 528 struct blk_plug *plug = current->plug; 529 struct md_plug_cb *mdcb; 530 531 if (!plug) 532 return 0; 533 534 list_for_each_entry(mdcb, &plug->cb_list, cb.list) { 535 if (mdcb->cb.callback == plugger_unplug && 536 mdcb->mddev == mddev) { 537 /* Already on the list, move to top */ 538 if (mdcb != list_first_entry(&plug->cb_list, 539 struct md_plug_cb, 540 cb.list)) 541 list_move(&mdcb->cb.list, &plug->cb_list); 542 return 1; 543 } 544 } 545 /* Not currently on the callback list */ 546 mdcb = kmalloc(sizeof(*mdcb), GFP_ATOMIC); 547 if (!mdcb) 548 return 0; 549 550 mdcb->mddev = mddev; 551 mdcb->cb.callback = plugger_unplug; 552 atomic_inc(&mddev->plug_cnt); 553 list_add(&mdcb->cb.list, &plug->cb_list); 554 return 1; 555 } 556 EXPORT_SYMBOL_GPL(mddev_check_plugged); 557 558 static inline struct mddev *mddev_get(struct mddev *mddev) 559 { 560 atomic_inc(&mddev->active); 561 return mddev; 562 } 563 564 static void mddev_delayed_delete(struct work_struct *ws); 565 566 static void mddev_put(struct mddev *mddev) 567 { 568 struct bio_set *bs = NULL; 569 570 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock)) 571 return; 572 if (!mddev->raid_disks && list_empty(&mddev->disks) && 573 mddev->ctime == 0 && !mddev->hold_active) { 574 /* Array is not configured at all, and not held active, 575 * so destroy it */ 576 list_del(&mddev->all_mddevs); 577 bs = mddev->bio_set; 578 mddev->bio_set = NULL; 579 if (mddev->gendisk) { 580 /* We did a probe so need to clean up. Call 581 * queue_work inside the spinlock so that 582 * flush_workqueue() after mddev_find will 583 * succeed in waiting for the work to be done. 584 */ 585 INIT_WORK(&mddev->del_work, mddev_delayed_delete); 586 queue_work(md_misc_wq, &mddev->del_work); 587 } else 588 kfree(mddev); 589 } 590 spin_unlock(&all_mddevs_lock); 591 if (bs) 592 bioset_free(bs); 593 } 594 595 void mddev_init(struct mddev *mddev) 596 { 597 mutex_init(&mddev->open_mutex); 598 mutex_init(&mddev->reconfig_mutex); 599 mutex_init(&mddev->bitmap_info.mutex); 600 INIT_LIST_HEAD(&mddev->disks); 601 INIT_LIST_HEAD(&mddev->all_mddevs); 602 init_timer(&mddev->safemode_timer); 603 atomic_set(&mddev->active, 1); 604 atomic_set(&mddev->openers, 0); 605 atomic_set(&mddev->active_io, 0); 606 atomic_set(&mddev->plug_cnt, 0); 607 spin_lock_init(&mddev->write_lock); 608 atomic_set(&mddev->flush_pending, 0); 609 init_waitqueue_head(&mddev->sb_wait); 610 init_waitqueue_head(&mddev->recovery_wait); 611 mddev->reshape_position = MaxSector; 612 mddev->resync_min = 0; 613 mddev->resync_max = MaxSector; 614 mddev->level = LEVEL_NONE; 615 } 616 EXPORT_SYMBOL_GPL(mddev_init); 617 618 static struct mddev * mddev_find(dev_t unit) 619 { 620 struct mddev *mddev, *new = NULL; 621 622 if (unit && MAJOR(unit) != MD_MAJOR) 623 unit &= ~((1<<MdpMinorShift)-1); 624 625 retry: 626 spin_lock(&all_mddevs_lock); 627 628 if (unit) { 629 list_for_each_entry(mddev, &all_mddevs, all_mddevs) 630 if (mddev->unit == unit) { 631 mddev_get(mddev); 632 spin_unlock(&all_mddevs_lock); 633 kfree(new); 634 return mddev; 635 } 636 637 if (new) { 638 list_add(&new->all_mddevs, &all_mddevs); 639 spin_unlock(&all_mddevs_lock); 640 new->hold_active = UNTIL_IOCTL; 641 return new; 642 } 643 } else if (new) { 644 /* find an unused unit number */ 645 static int next_minor = 512; 646 int start = next_minor; 647 int is_free = 0; 648 int dev = 0; 649 while (!is_free) { 650 dev = MKDEV(MD_MAJOR, next_minor); 651 next_minor++; 652 if (next_minor > MINORMASK) 653 next_minor = 0; 654 if (next_minor == start) { 655 /* Oh dear, all in use. */ 656 spin_unlock(&all_mddevs_lock); 657 kfree(new); 658 return NULL; 659 } 660 661 is_free = 1; 662 list_for_each_entry(mddev, &all_mddevs, all_mddevs) 663 if (mddev->unit == dev) { 664 is_free = 0; 665 break; 666 } 667 } 668 new->unit = dev; 669 new->md_minor = MINOR(dev); 670 new->hold_active = UNTIL_STOP; 671 list_add(&new->all_mddevs, &all_mddevs); 672 spin_unlock(&all_mddevs_lock); 673 return new; 674 } 675 spin_unlock(&all_mddevs_lock); 676 677 new = kzalloc(sizeof(*new), GFP_KERNEL); 678 if (!new) 679 return NULL; 680 681 new->unit = unit; 682 if (MAJOR(unit) == MD_MAJOR) 683 new->md_minor = MINOR(unit); 684 else 685 new->md_minor = MINOR(unit) >> MdpMinorShift; 686 687 mddev_init(new); 688 689 goto retry; 690 } 691 692 static inline int mddev_lock(struct mddev * mddev) 693 { 694 return mutex_lock_interruptible(&mddev->reconfig_mutex); 695 } 696 697 static inline int mddev_is_locked(struct mddev *mddev) 698 { 699 return mutex_is_locked(&mddev->reconfig_mutex); 700 } 701 702 static inline int mddev_trylock(struct mddev * mddev) 703 { 704 return mutex_trylock(&mddev->reconfig_mutex); 705 } 706 707 static struct attribute_group md_redundancy_group; 708 709 static void mddev_unlock(struct mddev * mddev) 710 { 711 if (mddev->to_remove) { 712 /* These cannot be removed under reconfig_mutex as 713 * an access to the files will try to take reconfig_mutex 714 * while holding the file unremovable, which leads to 715 * a deadlock. 716 * So hold set sysfs_active while the remove in happeing, 717 * and anything else which might set ->to_remove or my 718 * otherwise change the sysfs namespace will fail with 719 * -EBUSY if sysfs_active is still set. 720 * We set sysfs_active under reconfig_mutex and elsewhere 721 * test it under the same mutex to ensure its correct value 722 * is seen. 723 */ 724 struct attribute_group *to_remove = mddev->to_remove; 725 mddev->to_remove = NULL; 726 mddev->sysfs_active = 1; 727 mutex_unlock(&mddev->reconfig_mutex); 728 729 if (mddev->kobj.sd) { 730 if (to_remove != &md_redundancy_group) 731 sysfs_remove_group(&mddev->kobj, to_remove); 732 if (mddev->pers == NULL || 733 mddev->pers->sync_request == NULL) { 734 sysfs_remove_group(&mddev->kobj, &md_redundancy_group); 735 if (mddev->sysfs_action) 736 sysfs_put(mddev->sysfs_action); 737 mddev->sysfs_action = NULL; 738 } 739 } 740 mddev->sysfs_active = 0; 741 } else 742 mutex_unlock(&mddev->reconfig_mutex); 743 744 /* As we've dropped the mutex we need a spinlock to 745 * make sure the thread doesn't disappear 746 */ 747 spin_lock(&pers_lock); 748 md_wakeup_thread(mddev->thread); 749 spin_unlock(&pers_lock); 750 } 751 752 static struct md_rdev * find_rdev_nr(struct mddev *mddev, int nr) 753 { 754 struct md_rdev *rdev; 755 756 list_for_each_entry(rdev, &mddev->disks, same_set) 757 if (rdev->desc_nr == nr) 758 return rdev; 759 760 return NULL; 761 } 762 763 static struct md_rdev * find_rdev(struct mddev * mddev, dev_t dev) 764 { 765 struct md_rdev *rdev; 766 767 list_for_each_entry(rdev, &mddev->disks, same_set) 768 if (rdev->bdev->bd_dev == dev) 769 return rdev; 770 771 return NULL; 772 } 773 774 static struct md_personality *find_pers(int level, char *clevel) 775 { 776 struct md_personality *pers; 777 list_for_each_entry(pers, &pers_list, list) { 778 if (level != LEVEL_NONE && pers->level == level) 779 return pers; 780 if (strcmp(pers->name, clevel)==0) 781 return pers; 782 } 783 return NULL; 784 } 785 786 /* return the offset of the super block in 512byte sectors */ 787 static inline sector_t calc_dev_sboffset(struct md_rdev *rdev) 788 { 789 sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512; 790 return MD_NEW_SIZE_SECTORS(num_sectors); 791 } 792 793 static int alloc_disk_sb(struct md_rdev * rdev) 794 { 795 if (rdev->sb_page) 796 MD_BUG(); 797 798 rdev->sb_page = alloc_page(GFP_KERNEL); 799 if (!rdev->sb_page) { 800 printk(KERN_ALERT "md: out of memory.\n"); 801 return -ENOMEM; 802 } 803 804 return 0; 805 } 806 807 static void free_disk_sb(struct md_rdev * rdev) 808 { 809 if (rdev->sb_page) { 810 put_page(rdev->sb_page); 811 rdev->sb_loaded = 0; 812 rdev->sb_page = NULL; 813 rdev->sb_start = 0; 814 rdev->sectors = 0; 815 } 816 if (rdev->bb_page) { 817 put_page(rdev->bb_page); 818 rdev->bb_page = NULL; 819 } 820 } 821 822 823 static void super_written(struct bio *bio, int error) 824 { 825 struct md_rdev *rdev = bio->bi_private; 826 struct mddev *mddev = rdev->mddev; 827 828 if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) { 829 printk("md: super_written gets error=%d, uptodate=%d\n", 830 error, test_bit(BIO_UPTODATE, &bio->bi_flags)); 831 WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags)); 832 md_error(mddev, rdev); 833 } 834 835 if (atomic_dec_and_test(&mddev->pending_writes)) 836 wake_up(&mddev->sb_wait); 837 bio_put(bio); 838 } 839 840 void md_super_write(struct mddev *mddev, struct md_rdev *rdev, 841 sector_t sector, int size, struct page *page) 842 { 843 /* write first size bytes of page to sector of rdev 844 * Increment mddev->pending_writes before returning 845 * and decrement it on completion, waking up sb_wait 846 * if zero is reached. 847 * If an error occurred, call md_error 848 */ 849 struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev); 850 851 bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev; 852 bio->bi_sector = sector; 853 bio_add_page(bio, page, size, 0); 854 bio->bi_private = rdev; 855 bio->bi_end_io = super_written; 856 857 atomic_inc(&mddev->pending_writes); 858 submit_bio(WRITE_FLUSH_FUA, bio); 859 } 860 861 void md_super_wait(struct mddev *mddev) 862 { 863 /* wait for all superblock writes that were scheduled to complete */ 864 DEFINE_WAIT(wq); 865 for(;;) { 866 prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE); 867 if (atomic_read(&mddev->pending_writes)==0) 868 break; 869 schedule(); 870 } 871 finish_wait(&mddev->sb_wait, &wq); 872 } 873 874 static void bi_complete(struct bio *bio, int error) 875 { 876 complete((struct completion*)bio->bi_private); 877 } 878 879 int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, 880 struct page *page, int rw, bool metadata_op) 881 { 882 struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev); 883 struct completion event; 884 int ret; 885 886 rw |= REQ_SYNC; 887 888 bio->bi_bdev = (metadata_op && rdev->meta_bdev) ? 889 rdev->meta_bdev : rdev->bdev; 890 if (metadata_op) 891 bio->bi_sector = sector + rdev->sb_start; 892 else 893 bio->bi_sector = sector + rdev->data_offset; 894 bio_add_page(bio, page, size, 0); 895 init_completion(&event); 896 bio->bi_private = &event; 897 bio->bi_end_io = bi_complete; 898 submit_bio(rw, bio); 899 wait_for_completion(&event); 900 901 ret = test_bit(BIO_UPTODATE, &bio->bi_flags); 902 bio_put(bio); 903 return ret; 904 } 905 EXPORT_SYMBOL_GPL(sync_page_io); 906 907 static int read_disk_sb(struct md_rdev * rdev, int size) 908 { 909 char b[BDEVNAME_SIZE]; 910 if (!rdev->sb_page) { 911 MD_BUG(); 912 return -EINVAL; 913 } 914 if (rdev->sb_loaded) 915 return 0; 916 917 918 if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, true)) 919 goto fail; 920 rdev->sb_loaded = 1; 921 return 0; 922 923 fail: 924 printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n", 925 bdevname(rdev->bdev,b)); 926 return -EINVAL; 927 } 928 929 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2) 930 { 931 return sb1->set_uuid0 == sb2->set_uuid0 && 932 sb1->set_uuid1 == sb2->set_uuid1 && 933 sb1->set_uuid2 == sb2->set_uuid2 && 934 sb1->set_uuid3 == sb2->set_uuid3; 935 } 936 937 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2) 938 { 939 int ret; 940 mdp_super_t *tmp1, *tmp2; 941 942 tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL); 943 tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL); 944 945 if (!tmp1 || !tmp2) { 946 ret = 0; 947 printk(KERN_INFO "md.c sb_equal(): failed to allocate memory!\n"); 948 goto abort; 949 } 950 951 *tmp1 = *sb1; 952 *tmp2 = *sb2; 953 954 /* 955 * nr_disks is not constant 956 */ 957 tmp1->nr_disks = 0; 958 tmp2->nr_disks = 0; 959 960 ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0); 961 abort: 962 kfree(tmp1); 963 kfree(tmp2); 964 return ret; 965 } 966 967 968 static u32 md_csum_fold(u32 csum) 969 { 970 csum = (csum & 0xffff) + (csum >> 16); 971 return (csum & 0xffff) + (csum >> 16); 972 } 973 974 static unsigned int calc_sb_csum(mdp_super_t * sb) 975 { 976 u64 newcsum = 0; 977 u32 *sb32 = (u32*)sb; 978 int i; 979 unsigned int disk_csum, csum; 980 981 disk_csum = sb->sb_csum; 982 sb->sb_csum = 0; 983 984 for (i = 0; i < MD_SB_BYTES/4 ; i++) 985 newcsum += sb32[i]; 986 csum = (newcsum & 0xffffffff) + (newcsum>>32); 987 988 989 #ifdef CONFIG_ALPHA 990 /* This used to use csum_partial, which was wrong for several 991 * reasons including that different results are returned on 992 * different architectures. It isn't critical that we get exactly 993 * the same return value as before (we always csum_fold before 994 * testing, and that removes any differences). However as we 995 * know that csum_partial always returned a 16bit value on 996 * alphas, do a fold to maximise conformity to previous behaviour. 997 */ 998 sb->sb_csum = md_csum_fold(disk_csum); 999 #else 1000 sb->sb_csum = disk_csum; 1001 #endif 1002 return csum; 1003 } 1004 1005 1006 /* 1007 * Handle superblock details. 1008 * We want to be able to handle multiple superblock formats 1009 * so we have a common interface to them all, and an array of 1010 * different handlers. 1011 * We rely on user-space to write the initial superblock, and support 1012 * reading and updating of superblocks. 1013 * Interface methods are: 1014 * int load_super(struct md_rdev *dev, struct md_rdev *refdev, int minor_version) 1015 * loads and validates a superblock on dev. 1016 * if refdev != NULL, compare superblocks on both devices 1017 * Return: 1018 * 0 - dev has a superblock that is compatible with refdev 1019 * 1 - dev has a superblock that is compatible and newer than refdev 1020 * so dev should be used as the refdev in future 1021 * -EINVAL superblock incompatible or invalid 1022 * -othererror e.g. -EIO 1023 * 1024 * int validate_super(struct mddev *mddev, struct md_rdev *dev) 1025 * Verify that dev is acceptable into mddev. 1026 * The first time, mddev->raid_disks will be 0, and data from 1027 * dev should be merged in. Subsequent calls check that dev 1028 * is new enough. Return 0 or -EINVAL 1029 * 1030 * void sync_super(struct mddev *mddev, struct md_rdev *dev) 1031 * Update the superblock for rdev with data in mddev 1032 * This does not write to disc. 1033 * 1034 */ 1035 1036 struct super_type { 1037 char *name; 1038 struct module *owner; 1039 int (*load_super)(struct md_rdev *rdev, struct md_rdev *refdev, 1040 int minor_version); 1041 int (*validate_super)(struct mddev *mddev, struct md_rdev *rdev); 1042 void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev); 1043 unsigned long long (*rdev_size_change)(struct md_rdev *rdev, 1044 sector_t num_sectors); 1045 }; 1046 1047 /* 1048 * Check that the given mddev has no bitmap. 1049 * 1050 * This function is called from the run method of all personalities that do not 1051 * support bitmaps. It prints an error message and returns non-zero if mddev 1052 * has a bitmap. Otherwise, it returns 0. 1053 * 1054 */ 1055 int md_check_no_bitmap(struct mddev *mddev) 1056 { 1057 if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset) 1058 return 0; 1059 printk(KERN_ERR "%s: bitmaps are not supported for %s\n", 1060 mdname(mddev), mddev->pers->name); 1061 return 1; 1062 } 1063 EXPORT_SYMBOL(md_check_no_bitmap); 1064 1065 /* 1066 * load_super for 0.90.0 1067 */ 1068 static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version) 1069 { 1070 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 1071 mdp_super_t *sb; 1072 int ret; 1073 1074 /* 1075 * Calculate the position of the superblock (512byte sectors), 1076 * it's at the end of the disk. 1077 * 1078 * It also happens to be a multiple of 4Kb. 1079 */ 1080 rdev->sb_start = calc_dev_sboffset(rdev); 1081 1082 ret = read_disk_sb(rdev, MD_SB_BYTES); 1083 if (ret) return ret; 1084 1085 ret = -EINVAL; 1086 1087 bdevname(rdev->bdev, b); 1088 sb = page_address(rdev->sb_page); 1089 1090 if (sb->md_magic != MD_SB_MAGIC) { 1091 printk(KERN_ERR "md: invalid raid superblock magic on %s\n", 1092 b); 1093 goto abort; 1094 } 1095 1096 if (sb->major_version != 0 || 1097 sb->minor_version < 90 || 1098 sb->minor_version > 91) { 1099 printk(KERN_WARNING "Bad version number %d.%d on %s\n", 1100 sb->major_version, sb->minor_version, 1101 b); 1102 goto abort; 1103 } 1104 1105 if (sb->raid_disks <= 0) 1106 goto abort; 1107 1108 if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) { 1109 printk(KERN_WARNING "md: invalid superblock checksum on %s\n", 1110 b); 1111 goto abort; 1112 } 1113 1114 rdev->preferred_minor = sb->md_minor; 1115 rdev->data_offset = 0; 1116 rdev->sb_size = MD_SB_BYTES; 1117 rdev->badblocks.shift = -1; 1118 1119 if (sb->level == LEVEL_MULTIPATH) 1120 rdev->desc_nr = -1; 1121 else 1122 rdev->desc_nr = sb->this_disk.number; 1123 1124 if (!refdev) { 1125 ret = 1; 1126 } else { 1127 __u64 ev1, ev2; 1128 mdp_super_t *refsb = page_address(refdev->sb_page); 1129 if (!uuid_equal(refsb, sb)) { 1130 printk(KERN_WARNING "md: %s has different UUID to %s\n", 1131 b, bdevname(refdev->bdev,b2)); 1132 goto abort; 1133 } 1134 if (!sb_equal(refsb, sb)) { 1135 printk(KERN_WARNING "md: %s has same UUID" 1136 " but different superblock to %s\n", 1137 b, bdevname(refdev->bdev, b2)); 1138 goto abort; 1139 } 1140 ev1 = md_event(sb); 1141 ev2 = md_event(refsb); 1142 if (ev1 > ev2) 1143 ret = 1; 1144 else 1145 ret = 0; 1146 } 1147 rdev->sectors = rdev->sb_start; 1148 /* Limit to 4TB as metadata cannot record more than that */ 1149 if (rdev->sectors >= (2ULL << 32)) 1150 rdev->sectors = (2ULL << 32) - 2; 1151 1152 if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1) 1153 /* "this cannot possibly happen" ... */ 1154 ret = -EINVAL; 1155 1156 abort: 1157 return ret; 1158 } 1159 1160 /* 1161 * validate_super for 0.90.0 1162 */ 1163 static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev) 1164 { 1165 mdp_disk_t *desc; 1166 mdp_super_t *sb = page_address(rdev->sb_page); 1167 __u64 ev1 = md_event(sb); 1168 1169 rdev->raid_disk = -1; 1170 clear_bit(Faulty, &rdev->flags); 1171 clear_bit(In_sync, &rdev->flags); 1172 clear_bit(WriteMostly, &rdev->flags); 1173 1174 if (mddev->raid_disks == 0) { 1175 mddev->major_version = 0; 1176 mddev->minor_version = sb->minor_version; 1177 mddev->patch_version = sb->patch_version; 1178 mddev->external = 0; 1179 mddev->chunk_sectors = sb->chunk_size >> 9; 1180 mddev->ctime = sb->ctime; 1181 mddev->utime = sb->utime; 1182 mddev->level = sb->level; 1183 mddev->clevel[0] = 0; 1184 mddev->layout = sb->layout; 1185 mddev->raid_disks = sb->raid_disks; 1186 mddev->dev_sectors = ((sector_t)sb->size) * 2; 1187 mddev->events = ev1; 1188 mddev->bitmap_info.offset = 0; 1189 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; 1190 1191 if (mddev->minor_version >= 91) { 1192 mddev->reshape_position = sb->reshape_position; 1193 mddev->delta_disks = sb->delta_disks; 1194 mddev->new_level = sb->new_level; 1195 mddev->new_layout = sb->new_layout; 1196 mddev->new_chunk_sectors = sb->new_chunk >> 9; 1197 } else { 1198 mddev->reshape_position = MaxSector; 1199 mddev->delta_disks = 0; 1200 mddev->new_level = mddev->level; 1201 mddev->new_layout = mddev->layout; 1202 mddev->new_chunk_sectors = mddev->chunk_sectors; 1203 } 1204 1205 if (sb->state & (1<<MD_SB_CLEAN)) 1206 mddev->recovery_cp = MaxSector; 1207 else { 1208 if (sb->events_hi == sb->cp_events_hi && 1209 sb->events_lo == sb->cp_events_lo) { 1210 mddev->recovery_cp = sb->recovery_cp; 1211 } else 1212 mddev->recovery_cp = 0; 1213 } 1214 1215 memcpy(mddev->uuid+0, &sb->set_uuid0, 4); 1216 memcpy(mddev->uuid+4, &sb->set_uuid1, 4); 1217 memcpy(mddev->uuid+8, &sb->set_uuid2, 4); 1218 memcpy(mddev->uuid+12,&sb->set_uuid3, 4); 1219 1220 mddev->max_disks = MD_SB_DISKS; 1221 1222 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) && 1223 mddev->bitmap_info.file == NULL) 1224 mddev->bitmap_info.offset = 1225 mddev->bitmap_info.default_offset; 1226 1227 } else if (mddev->pers == NULL) { 1228 /* Insist on good event counter while assembling, except 1229 * for spares (which don't need an event count) */ 1230 ++ev1; 1231 if (sb->disks[rdev->desc_nr].state & ( 1232 (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE))) 1233 if (ev1 < mddev->events) 1234 return -EINVAL; 1235 } else if (mddev->bitmap) { 1236 /* if adding to array with a bitmap, then we can accept an 1237 * older device ... but not too old. 1238 */ 1239 if (ev1 < mddev->bitmap->events_cleared) 1240 return 0; 1241 } else { 1242 if (ev1 < mddev->events) 1243 /* just a hot-add of a new device, leave raid_disk at -1 */ 1244 return 0; 1245 } 1246 1247 if (mddev->level != LEVEL_MULTIPATH) { 1248 desc = sb->disks + rdev->desc_nr; 1249 1250 if (desc->state & (1<<MD_DISK_FAULTY)) 1251 set_bit(Faulty, &rdev->flags); 1252 else if (desc->state & (1<<MD_DISK_SYNC) /* && 1253 desc->raid_disk < mddev->raid_disks */) { 1254 set_bit(In_sync, &rdev->flags); 1255 rdev->raid_disk = desc->raid_disk; 1256 } else if (desc->state & (1<<MD_DISK_ACTIVE)) { 1257 /* active but not in sync implies recovery up to 1258 * reshape position. We don't know exactly where 1259 * that is, so set to zero for now */ 1260 if (mddev->minor_version >= 91) { 1261 rdev->recovery_offset = 0; 1262 rdev->raid_disk = desc->raid_disk; 1263 } 1264 } 1265 if (desc->state & (1<<MD_DISK_WRITEMOSTLY)) 1266 set_bit(WriteMostly, &rdev->flags); 1267 } else /* MULTIPATH are always insync */ 1268 set_bit(In_sync, &rdev->flags); 1269 return 0; 1270 } 1271 1272 /* 1273 * sync_super for 0.90.0 1274 */ 1275 static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev) 1276 { 1277 mdp_super_t *sb; 1278 struct md_rdev *rdev2; 1279 int next_spare = mddev->raid_disks; 1280 1281 1282 /* make rdev->sb match mddev data.. 1283 * 1284 * 1/ zero out disks 1285 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare); 1286 * 3/ any empty disks < next_spare become removed 1287 * 1288 * disks[0] gets initialised to REMOVED because 1289 * we cannot be sure from other fields if it has 1290 * been initialised or not. 1291 */ 1292 int i; 1293 int active=0, working=0,failed=0,spare=0,nr_disks=0; 1294 1295 rdev->sb_size = MD_SB_BYTES; 1296 1297 sb = page_address(rdev->sb_page); 1298 1299 memset(sb, 0, sizeof(*sb)); 1300 1301 sb->md_magic = MD_SB_MAGIC; 1302 sb->major_version = mddev->major_version; 1303 sb->patch_version = mddev->patch_version; 1304 sb->gvalid_words = 0; /* ignored */ 1305 memcpy(&sb->set_uuid0, mddev->uuid+0, 4); 1306 memcpy(&sb->set_uuid1, mddev->uuid+4, 4); 1307 memcpy(&sb->set_uuid2, mddev->uuid+8, 4); 1308 memcpy(&sb->set_uuid3, mddev->uuid+12,4); 1309 1310 sb->ctime = mddev->ctime; 1311 sb->level = mddev->level; 1312 sb->size = mddev->dev_sectors / 2; 1313 sb->raid_disks = mddev->raid_disks; 1314 sb->md_minor = mddev->md_minor; 1315 sb->not_persistent = 0; 1316 sb->utime = mddev->utime; 1317 sb->state = 0; 1318 sb->events_hi = (mddev->events>>32); 1319 sb->events_lo = (u32)mddev->events; 1320 1321 if (mddev->reshape_position == MaxSector) 1322 sb->minor_version = 90; 1323 else { 1324 sb->minor_version = 91; 1325 sb->reshape_position = mddev->reshape_position; 1326 sb->new_level = mddev->new_level; 1327 sb->delta_disks = mddev->delta_disks; 1328 sb->new_layout = mddev->new_layout; 1329 sb->new_chunk = mddev->new_chunk_sectors << 9; 1330 } 1331 mddev->minor_version = sb->minor_version; 1332 if (mddev->in_sync) 1333 { 1334 sb->recovery_cp = mddev->recovery_cp; 1335 sb->cp_events_hi = (mddev->events>>32); 1336 sb->cp_events_lo = (u32)mddev->events; 1337 if (mddev->recovery_cp == MaxSector) 1338 sb->state = (1<< MD_SB_CLEAN); 1339 } else 1340 sb->recovery_cp = 0; 1341 1342 sb->layout = mddev->layout; 1343 sb->chunk_size = mddev->chunk_sectors << 9; 1344 1345 if (mddev->bitmap && mddev->bitmap_info.file == NULL) 1346 sb->state |= (1<<MD_SB_BITMAP_PRESENT); 1347 1348 sb->disks[0].state = (1<<MD_DISK_REMOVED); 1349 list_for_each_entry(rdev2, &mddev->disks, same_set) { 1350 mdp_disk_t *d; 1351 int desc_nr; 1352 int is_active = test_bit(In_sync, &rdev2->flags); 1353 1354 if (rdev2->raid_disk >= 0 && 1355 sb->minor_version >= 91) 1356 /* we have nowhere to store the recovery_offset, 1357 * but if it is not below the reshape_position, 1358 * we can piggy-back on that. 1359 */ 1360 is_active = 1; 1361 if (rdev2->raid_disk < 0 || 1362 test_bit(Faulty, &rdev2->flags)) 1363 is_active = 0; 1364 if (is_active) 1365 desc_nr = rdev2->raid_disk; 1366 else 1367 desc_nr = next_spare++; 1368 rdev2->desc_nr = desc_nr; 1369 d = &sb->disks[rdev2->desc_nr]; 1370 nr_disks++; 1371 d->number = rdev2->desc_nr; 1372 d->major = MAJOR(rdev2->bdev->bd_dev); 1373 d->minor = MINOR(rdev2->bdev->bd_dev); 1374 if (is_active) 1375 d->raid_disk = rdev2->raid_disk; 1376 else 1377 d->raid_disk = rdev2->desc_nr; /* compatibility */ 1378 if (test_bit(Faulty, &rdev2->flags)) 1379 d->state = (1<<MD_DISK_FAULTY); 1380 else if (is_active) { 1381 d->state = (1<<MD_DISK_ACTIVE); 1382 if (test_bit(In_sync, &rdev2->flags)) 1383 d->state |= (1<<MD_DISK_SYNC); 1384 active++; 1385 working++; 1386 } else { 1387 d->state = 0; 1388 spare++; 1389 working++; 1390 } 1391 if (test_bit(WriteMostly, &rdev2->flags)) 1392 d->state |= (1<<MD_DISK_WRITEMOSTLY); 1393 } 1394 /* now set the "removed" and "faulty" bits on any missing devices */ 1395 for (i=0 ; i < mddev->raid_disks ; i++) { 1396 mdp_disk_t *d = &sb->disks[i]; 1397 if (d->state == 0 && d->number == 0) { 1398 d->number = i; 1399 d->raid_disk = i; 1400 d->state = (1<<MD_DISK_REMOVED); 1401 d->state |= (1<<MD_DISK_FAULTY); 1402 failed++; 1403 } 1404 } 1405 sb->nr_disks = nr_disks; 1406 sb->active_disks = active; 1407 sb->working_disks = working; 1408 sb->failed_disks = failed; 1409 sb->spare_disks = spare; 1410 1411 sb->this_disk = sb->disks[rdev->desc_nr]; 1412 sb->sb_csum = calc_sb_csum(sb); 1413 } 1414 1415 /* 1416 * rdev_size_change for 0.90.0 1417 */ 1418 static unsigned long long 1419 super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) 1420 { 1421 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) 1422 return 0; /* component must fit device */ 1423 if (rdev->mddev->bitmap_info.offset) 1424 return 0; /* can't move bitmap */ 1425 rdev->sb_start = calc_dev_sboffset(rdev); 1426 if (!num_sectors || num_sectors > rdev->sb_start) 1427 num_sectors = rdev->sb_start; 1428 /* Limit to 4TB as metadata cannot record more than that. 1429 * 4TB == 2^32 KB, or 2*2^32 sectors. 1430 */ 1431 if (num_sectors >= (2ULL << 32)) 1432 num_sectors = (2ULL << 32) - 2; 1433 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, 1434 rdev->sb_page); 1435 md_super_wait(rdev->mddev); 1436 return num_sectors; 1437 } 1438 1439 1440 /* 1441 * version 1 superblock 1442 */ 1443 1444 static __le32 calc_sb_1_csum(struct mdp_superblock_1 * sb) 1445 { 1446 __le32 disk_csum; 1447 u32 csum; 1448 unsigned long long newcsum; 1449 int size = 256 + le32_to_cpu(sb->max_dev)*2; 1450 __le32 *isuper = (__le32*)sb; 1451 int i; 1452 1453 disk_csum = sb->sb_csum; 1454 sb->sb_csum = 0; 1455 newcsum = 0; 1456 for (i=0; size>=4; size -= 4 ) 1457 newcsum += le32_to_cpu(*isuper++); 1458 1459 if (size == 2) 1460 newcsum += le16_to_cpu(*(__le16*) isuper); 1461 1462 csum = (newcsum & 0xffffffff) + (newcsum >> 32); 1463 sb->sb_csum = disk_csum; 1464 return cpu_to_le32(csum); 1465 } 1466 1467 static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors, 1468 int acknowledged); 1469 static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version) 1470 { 1471 struct mdp_superblock_1 *sb; 1472 int ret; 1473 sector_t sb_start; 1474 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 1475 int bmask; 1476 1477 /* 1478 * Calculate the position of the superblock in 512byte sectors. 1479 * It is always aligned to a 4K boundary and 1480 * depeding on minor_version, it can be: 1481 * 0: At least 8K, but less than 12K, from end of device 1482 * 1: At start of device 1483 * 2: 4K from start of device. 1484 */ 1485 switch(minor_version) { 1486 case 0: 1487 sb_start = i_size_read(rdev->bdev->bd_inode) >> 9; 1488 sb_start -= 8*2; 1489 sb_start &= ~(sector_t)(4*2-1); 1490 break; 1491 case 1: 1492 sb_start = 0; 1493 break; 1494 case 2: 1495 sb_start = 8; 1496 break; 1497 default: 1498 return -EINVAL; 1499 } 1500 rdev->sb_start = sb_start; 1501 1502 /* superblock is rarely larger than 1K, but it can be larger, 1503 * and it is safe to read 4k, so we do that 1504 */ 1505 ret = read_disk_sb(rdev, 4096); 1506 if (ret) return ret; 1507 1508 1509 sb = page_address(rdev->sb_page); 1510 1511 if (sb->magic != cpu_to_le32(MD_SB_MAGIC) || 1512 sb->major_version != cpu_to_le32(1) || 1513 le32_to_cpu(sb->max_dev) > (4096-256)/2 || 1514 le64_to_cpu(sb->super_offset) != rdev->sb_start || 1515 (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0) 1516 return -EINVAL; 1517 1518 if (calc_sb_1_csum(sb) != sb->sb_csum) { 1519 printk("md: invalid superblock checksum on %s\n", 1520 bdevname(rdev->bdev,b)); 1521 return -EINVAL; 1522 } 1523 if (le64_to_cpu(sb->data_size) < 10) { 1524 printk("md: data_size too small on %s\n", 1525 bdevname(rdev->bdev,b)); 1526 return -EINVAL; 1527 } 1528 1529 rdev->preferred_minor = 0xffff; 1530 rdev->data_offset = le64_to_cpu(sb->data_offset); 1531 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); 1532 1533 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; 1534 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; 1535 if (rdev->sb_size & bmask) 1536 rdev->sb_size = (rdev->sb_size | bmask) + 1; 1537 1538 if (minor_version 1539 && rdev->data_offset < sb_start + (rdev->sb_size/512)) 1540 return -EINVAL; 1541 1542 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH)) 1543 rdev->desc_nr = -1; 1544 else 1545 rdev->desc_nr = le32_to_cpu(sb->dev_number); 1546 1547 if (!rdev->bb_page) { 1548 rdev->bb_page = alloc_page(GFP_KERNEL); 1549 if (!rdev->bb_page) 1550 return -ENOMEM; 1551 } 1552 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) && 1553 rdev->badblocks.count == 0) { 1554 /* need to load the bad block list. 1555 * Currently we limit it to one page. 1556 */ 1557 s32 offset; 1558 sector_t bb_sector; 1559 u64 *bbp; 1560 int i; 1561 int sectors = le16_to_cpu(sb->bblog_size); 1562 if (sectors > (PAGE_SIZE / 512)) 1563 return -EINVAL; 1564 offset = le32_to_cpu(sb->bblog_offset); 1565 if (offset == 0) 1566 return -EINVAL; 1567 bb_sector = (long long)offset; 1568 if (!sync_page_io(rdev, bb_sector, sectors << 9, 1569 rdev->bb_page, READ, true)) 1570 return -EIO; 1571 bbp = (u64 *)page_address(rdev->bb_page); 1572 rdev->badblocks.shift = sb->bblog_shift; 1573 for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) { 1574 u64 bb = le64_to_cpu(*bbp); 1575 int count = bb & (0x3ff); 1576 u64 sector = bb >> 10; 1577 sector <<= sb->bblog_shift; 1578 count <<= sb->bblog_shift; 1579 if (bb + 1 == 0) 1580 break; 1581 if (md_set_badblocks(&rdev->badblocks, 1582 sector, count, 1) == 0) 1583 return -EINVAL; 1584 } 1585 } else if (sb->bblog_offset == 0) 1586 rdev->badblocks.shift = -1; 1587 1588 if (!refdev) { 1589 ret = 1; 1590 } else { 1591 __u64 ev1, ev2; 1592 struct mdp_superblock_1 *refsb = page_address(refdev->sb_page); 1593 1594 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 || 1595 sb->level != refsb->level || 1596 sb->layout != refsb->layout || 1597 sb->chunksize != refsb->chunksize) { 1598 printk(KERN_WARNING "md: %s has strangely different" 1599 " superblock to %s\n", 1600 bdevname(rdev->bdev,b), 1601 bdevname(refdev->bdev,b2)); 1602 return -EINVAL; 1603 } 1604 ev1 = le64_to_cpu(sb->events); 1605 ev2 = le64_to_cpu(refsb->events); 1606 1607 if (ev1 > ev2) 1608 ret = 1; 1609 else 1610 ret = 0; 1611 } 1612 if (minor_version) 1613 rdev->sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) - 1614 le64_to_cpu(sb->data_offset); 1615 else 1616 rdev->sectors = rdev->sb_start; 1617 if (rdev->sectors < le64_to_cpu(sb->data_size)) 1618 return -EINVAL; 1619 rdev->sectors = le64_to_cpu(sb->data_size); 1620 if (le64_to_cpu(sb->size) > rdev->sectors) 1621 return -EINVAL; 1622 return ret; 1623 } 1624 1625 static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev) 1626 { 1627 struct mdp_superblock_1 *sb = page_address(rdev->sb_page); 1628 __u64 ev1 = le64_to_cpu(sb->events); 1629 1630 rdev->raid_disk = -1; 1631 clear_bit(Faulty, &rdev->flags); 1632 clear_bit(In_sync, &rdev->flags); 1633 clear_bit(WriteMostly, &rdev->flags); 1634 1635 if (mddev->raid_disks == 0) { 1636 mddev->major_version = 1; 1637 mddev->patch_version = 0; 1638 mddev->external = 0; 1639 mddev->chunk_sectors = le32_to_cpu(sb->chunksize); 1640 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1); 1641 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1); 1642 mddev->level = le32_to_cpu(sb->level); 1643 mddev->clevel[0] = 0; 1644 mddev->layout = le32_to_cpu(sb->layout); 1645 mddev->raid_disks = le32_to_cpu(sb->raid_disks); 1646 mddev->dev_sectors = le64_to_cpu(sb->size); 1647 mddev->events = ev1; 1648 mddev->bitmap_info.offset = 0; 1649 mddev->bitmap_info.default_offset = 1024 >> 9; 1650 1651 mddev->recovery_cp = le64_to_cpu(sb->resync_offset); 1652 memcpy(mddev->uuid, sb->set_uuid, 16); 1653 1654 mddev->max_disks = (4096-256)/2; 1655 1656 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) && 1657 mddev->bitmap_info.file == NULL ) 1658 mddev->bitmap_info.offset = 1659 (__s32)le32_to_cpu(sb->bitmap_offset); 1660 1661 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) { 1662 mddev->reshape_position = le64_to_cpu(sb->reshape_position); 1663 mddev->delta_disks = le32_to_cpu(sb->delta_disks); 1664 mddev->new_level = le32_to_cpu(sb->new_level); 1665 mddev->new_layout = le32_to_cpu(sb->new_layout); 1666 mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk); 1667 } else { 1668 mddev->reshape_position = MaxSector; 1669 mddev->delta_disks = 0; 1670 mddev->new_level = mddev->level; 1671 mddev->new_layout = mddev->layout; 1672 mddev->new_chunk_sectors = mddev->chunk_sectors; 1673 } 1674 1675 } else if (mddev->pers == NULL) { 1676 /* Insist of good event counter while assembling, except for 1677 * spares (which don't need an event count) */ 1678 ++ev1; 1679 if (rdev->desc_nr >= 0 && 1680 rdev->desc_nr < le32_to_cpu(sb->max_dev) && 1681 le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < 0xfffe) 1682 if (ev1 < mddev->events) 1683 return -EINVAL; 1684 } else if (mddev->bitmap) { 1685 /* If adding to array with a bitmap, then we can accept an 1686 * older device, but not too old. 1687 */ 1688 if (ev1 < mddev->bitmap->events_cleared) 1689 return 0; 1690 } else { 1691 if (ev1 < mddev->events) 1692 /* just a hot-add of a new device, leave raid_disk at -1 */ 1693 return 0; 1694 } 1695 if (mddev->level != LEVEL_MULTIPATH) { 1696 int role; 1697 if (rdev->desc_nr < 0 || 1698 rdev->desc_nr >= le32_to_cpu(sb->max_dev)) { 1699 role = 0xffff; 1700 rdev->desc_nr = -1; 1701 } else 1702 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]); 1703 switch(role) { 1704 case 0xffff: /* spare */ 1705 break; 1706 case 0xfffe: /* faulty */ 1707 set_bit(Faulty, &rdev->flags); 1708 break; 1709 default: 1710 if ((le32_to_cpu(sb->feature_map) & 1711 MD_FEATURE_RECOVERY_OFFSET)) 1712 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset); 1713 else 1714 set_bit(In_sync, &rdev->flags); 1715 rdev->raid_disk = role; 1716 break; 1717 } 1718 if (sb->devflags & WriteMostly1) 1719 set_bit(WriteMostly, &rdev->flags); 1720 } else /* MULTIPATH are always insync */ 1721 set_bit(In_sync, &rdev->flags); 1722 1723 return 0; 1724 } 1725 1726 static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev) 1727 { 1728 struct mdp_superblock_1 *sb; 1729 struct md_rdev *rdev2; 1730 int max_dev, i; 1731 /* make rdev->sb match mddev and rdev data. */ 1732 1733 sb = page_address(rdev->sb_page); 1734 1735 sb->feature_map = 0; 1736 sb->pad0 = 0; 1737 sb->recovery_offset = cpu_to_le64(0); 1738 memset(sb->pad1, 0, sizeof(sb->pad1)); 1739 memset(sb->pad3, 0, sizeof(sb->pad3)); 1740 1741 sb->utime = cpu_to_le64((__u64)mddev->utime); 1742 sb->events = cpu_to_le64(mddev->events); 1743 if (mddev->in_sync) 1744 sb->resync_offset = cpu_to_le64(mddev->recovery_cp); 1745 else 1746 sb->resync_offset = cpu_to_le64(0); 1747 1748 sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors)); 1749 1750 sb->raid_disks = cpu_to_le32(mddev->raid_disks); 1751 sb->size = cpu_to_le64(mddev->dev_sectors); 1752 sb->chunksize = cpu_to_le32(mddev->chunk_sectors); 1753 sb->level = cpu_to_le32(mddev->level); 1754 sb->layout = cpu_to_le32(mddev->layout); 1755 1756 if (test_bit(WriteMostly, &rdev->flags)) 1757 sb->devflags |= WriteMostly1; 1758 else 1759 sb->devflags &= ~WriteMostly1; 1760 1761 if (mddev->bitmap && mddev->bitmap_info.file == NULL) { 1762 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset); 1763 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET); 1764 } 1765 1766 if (rdev->raid_disk >= 0 && 1767 !test_bit(In_sync, &rdev->flags)) { 1768 sb->feature_map |= 1769 cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET); 1770 sb->recovery_offset = 1771 cpu_to_le64(rdev->recovery_offset); 1772 } 1773 1774 if (mddev->reshape_position != MaxSector) { 1775 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE); 1776 sb->reshape_position = cpu_to_le64(mddev->reshape_position); 1777 sb->new_layout = cpu_to_le32(mddev->new_layout); 1778 sb->delta_disks = cpu_to_le32(mddev->delta_disks); 1779 sb->new_level = cpu_to_le32(mddev->new_level); 1780 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors); 1781 } 1782 1783 if (rdev->badblocks.count == 0) 1784 /* Nothing to do for bad blocks*/ ; 1785 else if (sb->bblog_offset == 0) 1786 /* Cannot record bad blocks on this device */ 1787 md_error(mddev, rdev); 1788 else { 1789 struct badblocks *bb = &rdev->badblocks; 1790 u64 *bbp = (u64 *)page_address(rdev->bb_page); 1791 u64 *p = bb->page; 1792 sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS); 1793 if (bb->changed) { 1794 unsigned seq; 1795 1796 retry: 1797 seq = read_seqbegin(&bb->lock); 1798 1799 memset(bbp, 0xff, PAGE_SIZE); 1800 1801 for (i = 0 ; i < bb->count ; i++) { 1802 u64 internal_bb = *p++; 1803 u64 store_bb = ((BB_OFFSET(internal_bb) << 10) 1804 | BB_LEN(internal_bb)); 1805 *bbp++ = cpu_to_le64(store_bb); 1806 } 1807 if (read_seqretry(&bb->lock, seq)) 1808 goto retry; 1809 1810 bb->sector = (rdev->sb_start + 1811 (int)le32_to_cpu(sb->bblog_offset)); 1812 bb->size = le16_to_cpu(sb->bblog_size); 1813 bb->changed = 0; 1814 } 1815 } 1816 1817 max_dev = 0; 1818 list_for_each_entry(rdev2, &mddev->disks, same_set) 1819 if (rdev2->desc_nr+1 > max_dev) 1820 max_dev = rdev2->desc_nr+1; 1821 1822 if (max_dev > le32_to_cpu(sb->max_dev)) { 1823 int bmask; 1824 sb->max_dev = cpu_to_le32(max_dev); 1825 rdev->sb_size = max_dev * 2 + 256; 1826 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; 1827 if (rdev->sb_size & bmask) 1828 rdev->sb_size = (rdev->sb_size | bmask) + 1; 1829 } else 1830 max_dev = le32_to_cpu(sb->max_dev); 1831 1832 for (i=0; i<max_dev;i++) 1833 sb->dev_roles[i] = cpu_to_le16(0xfffe); 1834 1835 list_for_each_entry(rdev2, &mddev->disks, same_set) { 1836 i = rdev2->desc_nr; 1837 if (test_bit(Faulty, &rdev2->flags)) 1838 sb->dev_roles[i] = cpu_to_le16(0xfffe); 1839 else if (test_bit(In_sync, &rdev2->flags)) 1840 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); 1841 else if (rdev2->raid_disk >= 0) 1842 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); 1843 else 1844 sb->dev_roles[i] = cpu_to_le16(0xffff); 1845 } 1846 1847 sb->sb_csum = calc_sb_1_csum(sb); 1848 } 1849 1850 static unsigned long long 1851 super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) 1852 { 1853 struct mdp_superblock_1 *sb; 1854 sector_t max_sectors; 1855 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) 1856 return 0; /* component must fit device */ 1857 if (rdev->sb_start < rdev->data_offset) { 1858 /* minor versions 1 and 2; superblock before data */ 1859 max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9; 1860 max_sectors -= rdev->data_offset; 1861 if (!num_sectors || num_sectors > max_sectors) 1862 num_sectors = max_sectors; 1863 } else if (rdev->mddev->bitmap_info.offset) { 1864 /* minor version 0 with bitmap we can't move */ 1865 return 0; 1866 } else { 1867 /* minor version 0; superblock after data */ 1868 sector_t sb_start; 1869 sb_start = (i_size_read(rdev->bdev->bd_inode) >> 9) - 8*2; 1870 sb_start &= ~(sector_t)(4*2 - 1); 1871 max_sectors = rdev->sectors + sb_start - rdev->sb_start; 1872 if (!num_sectors || num_sectors > max_sectors) 1873 num_sectors = max_sectors; 1874 rdev->sb_start = sb_start; 1875 } 1876 sb = page_address(rdev->sb_page); 1877 sb->data_size = cpu_to_le64(num_sectors); 1878 sb->super_offset = rdev->sb_start; 1879 sb->sb_csum = calc_sb_1_csum(sb); 1880 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, 1881 rdev->sb_page); 1882 md_super_wait(rdev->mddev); 1883 return num_sectors; 1884 } 1885 1886 static struct super_type super_types[] = { 1887 [0] = { 1888 .name = "0.90.0", 1889 .owner = THIS_MODULE, 1890 .load_super = super_90_load, 1891 .validate_super = super_90_validate, 1892 .sync_super = super_90_sync, 1893 .rdev_size_change = super_90_rdev_size_change, 1894 }, 1895 [1] = { 1896 .name = "md-1", 1897 .owner = THIS_MODULE, 1898 .load_super = super_1_load, 1899 .validate_super = super_1_validate, 1900 .sync_super = super_1_sync, 1901 .rdev_size_change = super_1_rdev_size_change, 1902 }, 1903 }; 1904 1905 static void sync_super(struct mddev *mddev, struct md_rdev *rdev) 1906 { 1907 if (mddev->sync_super) { 1908 mddev->sync_super(mddev, rdev); 1909 return; 1910 } 1911 1912 BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types)); 1913 1914 super_types[mddev->major_version].sync_super(mddev, rdev); 1915 } 1916 1917 static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2) 1918 { 1919 struct md_rdev *rdev, *rdev2; 1920 1921 rcu_read_lock(); 1922 rdev_for_each_rcu(rdev, mddev1) 1923 rdev_for_each_rcu(rdev2, mddev2) 1924 if (rdev->bdev->bd_contains == 1925 rdev2->bdev->bd_contains) { 1926 rcu_read_unlock(); 1927 return 1; 1928 } 1929 rcu_read_unlock(); 1930 return 0; 1931 } 1932 1933 static LIST_HEAD(pending_raid_disks); 1934 1935 /* 1936 * Try to register data integrity profile for an mddev 1937 * 1938 * This is called when an array is started and after a disk has been kicked 1939 * from the array. It only succeeds if all working and active component devices 1940 * are integrity capable with matching profiles. 1941 */ 1942 int md_integrity_register(struct mddev *mddev) 1943 { 1944 struct md_rdev *rdev, *reference = NULL; 1945 1946 if (list_empty(&mddev->disks)) 1947 return 0; /* nothing to do */ 1948 if (!mddev->gendisk || blk_get_integrity(mddev->gendisk)) 1949 return 0; /* shouldn't register, or already is */ 1950 list_for_each_entry(rdev, &mddev->disks, same_set) { 1951 /* skip spares and non-functional disks */ 1952 if (test_bit(Faulty, &rdev->flags)) 1953 continue; 1954 if (rdev->raid_disk < 0) 1955 continue; 1956 if (!reference) { 1957 /* Use the first rdev as the reference */ 1958 reference = rdev; 1959 continue; 1960 } 1961 /* does this rdev's profile match the reference profile? */ 1962 if (blk_integrity_compare(reference->bdev->bd_disk, 1963 rdev->bdev->bd_disk) < 0) 1964 return -EINVAL; 1965 } 1966 if (!reference || !bdev_get_integrity(reference->bdev)) 1967 return 0; 1968 /* 1969 * All component devices are integrity capable and have matching 1970 * profiles, register the common profile for the md device. 1971 */ 1972 if (blk_integrity_register(mddev->gendisk, 1973 bdev_get_integrity(reference->bdev)) != 0) { 1974 printk(KERN_ERR "md: failed to register integrity for %s\n", 1975 mdname(mddev)); 1976 return -EINVAL; 1977 } 1978 printk(KERN_NOTICE "md: data integrity enabled on %s\n", mdname(mddev)); 1979 if (bioset_integrity_create(mddev->bio_set, BIO_POOL_SIZE)) { 1980 printk(KERN_ERR "md: failed to create integrity pool for %s\n", 1981 mdname(mddev)); 1982 return -EINVAL; 1983 } 1984 return 0; 1985 } 1986 EXPORT_SYMBOL(md_integrity_register); 1987 1988 /* Disable data integrity if non-capable/non-matching disk is being added */ 1989 void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev) 1990 { 1991 struct blk_integrity *bi_rdev = bdev_get_integrity(rdev->bdev); 1992 struct blk_integrity *bi_mddev = blk_get_integrity(mddev->gendisk); 1993 1994 if (!bi_mddev) /* nothing to do */ 1995 return; 1996 if (rdev->raid_disk < 0) /* skip spares */ 1997 return; 1998 if (bi_rdev && blk_integrity_compare(mddev->gendisk, 1999 rdev->bdev->bd_disk) >= 0) 2000 return; 2001 printk(KERN_NOTICE "disabling data integrity on %s\n", mdname(mddev)); 2002 blk_integrity_unregister(mddev->gendisk); 2003 } 2004 EXPORT_SYMBOL(md_integrity_add_rdev); 2005 2006 static int bind_rdev_to_array(struct md_rdev * rdev, struct mddev * mddev) 2007 { 2008 char b[BDEVNAME_SIZE]; 2009 struct kobject *ko; 2010 char *s; 2011 int err; 2012 2013 if (rdev->mddev) { 2014 MD_BUG(); 2015 return -EINVAL; 2016 } 2017 2018 /* prevent duplicates */ 2019 if (find_rdev(mddev, rdev->bdev->bd_dev)) 2020 return -EEXIST; 2021 2022 /* make sure rdev->sectors exceeds mddev->dev_sectors */ 2023 if (rdev->sectors && (mddev->dev_sectors == 0 || 2024 rdev->sectors < mddev->dev_sectors)) { 2025 if (mddev->pers) { 2026 /* Cannot change size, so fail 2027 * If mddev->level <= 0, then we don't care 2028 * about aligning sizes (e.g. linear) 2029 */ 2030 if (mddev->level > 0) 2031 return -ENOSPC; 2032 } else 2033 mddev->dev_sectors = rdev->sectors; 2034 } 2035 2036 /* Verify rdev->desc_nr is unique. 2037 * If it is -1, assign a free number, else 2038 * check number is not in use 2039 */ 2040 if (rdev->desc_nr < 0) { 2041 int choice = 0; 2042 if (mddev->pers) choice = mddev->raid_disks; 2043 while (find_rdev_nr(mddev, choice)) 2044 choice++; 2045 rdev->desc_nr = choice; 2046 } else { 2047 if (find_rdev_nr(mddev, rdev->desc_nr)) 2048 return -EBUSY; 2049 } 2050 if (mddev->max_disks && rdev->desc_nr >= mddev->max_disks) { 2051 printk(KERN_WARNING "md: %s: array is limited to %d devices\n", 2052 mdname(mddev), mddev->max_disks); 2053 return -EBUSY; 2054 } 2055 bdevname(rdev->bdev,b); 2056 while ( (s=strchr(b, '/')) != NULL) 2057 *s = '!'; 2058 2059 rdev->mddev = mddev; 2060 printk(KERN_INFO "md: bind<%s>\n", b); 2061 2062 if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b))) 2063 goto fail; 2064 2065 ko = &part_to_dev(rdev->bdev->bd_part)->kobj; 2066 if (sysfs_create_link(&rdev->kobj, ko, "block")) 2067 /* failure here is OK */; 2068 rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state"); 2069 2070 list_add_rcu(&rdev->same_set, &mddev->disks); 2071 bd_link_disk_holder(rdev->bdev, mddev->gendisk); 2072 2073 /* May as well allow recovery to be retried once */ 2074 mddev->recovery_disabled++; 2075 2076 return 0; 2077 2078 fail: 2079 printk(KERN_WARNING "md: failed to register dev-%s for %s\n", 2080 b, mdname(mddev)); 2081 return err; 2082 } 2083 2084 static void md_delayed_delete(struct work_struct *ws) 2085 { 2086 struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work); 2087 kobject_del(&rdev->kobj); 2088 kobject_put(&rdev->kobj); 2089 } 2090 2091 static void unbind_rdev_from_array(struct md_rdev * rdev) 2092 { 2093 char b[BDEVNAME_SIZE]; 2094 if (!rdev->mddev) { 2095 MD_BUG(); 2096 return; 2097 } 2098 bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk); 2099 list_del_rcu(&rdev->same_set); 2100 printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b)); 2101 rdev->mddev = NULL; 2102 sysfs_remove_link(&rdev->kobj, "block"); 2103 sysfs_put(rdev->sysfs_state); 2104 rdev->sysfs_state = NULL; 2105 kfree(rdev->badblocks.page); 2106 rdev->badblocks.count = 0; 2107 rdev->badblocks.page = NULL; 2108 /* We need to delay this, otherwise we can deadlock when 2109 * writing to 'remove' to "dev/state". We also need 2110 * to delay it due to rcu usage. 2111 */ 2112 synchronize_rcu(); 2113 INIT_WORK(&rdev->del_work, md_delayed_delete); 2114 kobject_get(&rdev->kobj); 2115 queue_work(md_misc_wq, &rdev->del_work); 2116 } 2117 2118 /* 2119 * prevent the device from being mounted, repartitioned or 2120 * otherwise reused by a RAID array (or any other kernel 2121 * subsystem), by bd_claiming the device. 2122 */ 2123 static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared) 2124 { 2125 int err = 0; 2126 struct block_device *bdev; 2127 char b[BDEVNAME_SIZE]; 2128 2129 bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, 2130 shared ? (struct md_rdev *)lock_rdev : rdev); 2131 if (IS_ERR(bdev)) { 2132 printk(KERN_ERR "md: could not open %s.\n", 2133 __bdevname(dev, b)); 2134 return PTR_ERR(bdev); 2135 } 2136 rdev->bdev = bdev; 2137 return err; 2138 } 2139 2140 static void unlock_rdev(struct md_rdev *rdev) 2141 { 2142 struct block_device *bdev = rdev->bdev; 2143 rdev->bdev = NULL; 2144 if (!bdev) 2145 MD_BUG(); 2146 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 2147 } 2148 2149 void md_autodetect_dev(dev_t dev); 2150 2151 static void export_rdev(struct md_rdev * rdev) 2152 { 2153 char b[BDEVNAME_SIZE]; 2154 printk(KERN_INFO "md: export_rdev(%s)\n", 2155 bdevname(rdev->bdev,b)); 2156 if (rdev->mddev) 2157 MD_BUG(); 2158 free_disk_sb(rdev); 2159 #ifndef MODULE 2160 if (test_bit(AutoDetected, &rdev->flags)) 2161 md_autodetect_dev(rdev->bdev->bd_dev); 2162 #endif 2163 unlock_rdev(rdev); 2164 kobject_put(&rdev->kobj); 2165 } 2166 2167 static void kick_rdev_from_array(struct md_rdev * rdev) 2168 { 2169 unbind_rdev_from_array(rdev); 2170 export_rdev(rdev); 2171 } 2172 2173 static void export_array(struct mddev *mddev) 2174 { 2175 struct md_rdev *rdev, *tmp; 2176 2177 rdev_for_each(rdev, tmp, mddev) { 2178 if (!rdev->mddev) { 2179 MD_BUG(); 2180 continue; 2181 } 2182 kick_rdev_from_array(rdev); 2183 } 2184 if (!list_empty(&mddev->disks)) 2185 MD_BUG(); 2186 mddev->raid_disks = 0; 2187 mddev->major_version = 0; 2188 } 2189 2190 static void print_desc(mdp_disk_t *desc) 2191 { 2192 printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number, 2193 desc->major,desc->minor,desc->raid_disk,desc->state); 2194 } 2195 2196 static void print_sb_90(mdp_super_t *sb) 2197 { 2198 int i; 2199 2200 printk(KERN_INFO 2201 "md: SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n", 2202 sb->major_version, sb->minor_version, sb->patch_version, 2203 sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3, 2204 sb->ctime); 2205 printk(KERN_INFO "md: L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n", 2206 sb->level, sb->size, sb->nr_disks, sb->raid_disks, 2207 sb->md_minor, sb->layout, sb->chunk_size); 2208 printk(KERN_INFO "md: UT:%08x ST:%d AD:%d WD:%d" 2209 " FD:%d SD:%d CSUM:%08x E:%08lx\n", 2210 sb->utime, sb->state, sb->active_disks, sb->working_disks, 2211 sb->failed_disks, sb->spare_disks, 2212 sb->sb_csum, (unsigned long)sb->events_lo); 2213 2214 printk(KERN_INFO); 2215 for (i = 0; i < MD_SB_DISKS; i++) { 2216 mdp_disk_t *desc; 2217 2218 desc = sb->disks + i; 2219 if (desc->number || desc->major || desc->minor || 2220 desc->raid_disk || (desc->state && (desc->state != 4))) { 2221 printk(" D %2d: ", i); 2222 print_desc(desc); 2223 } 2224 } 2225 printk(KERN_INFO "md: THIS: "); 2226 print_desc(&sb->this_disk); 2227 } 2228 2229 static void print_sb_1(struct mdp_superblock_1 *sb) 2230 { 2231 __u8 *uuid; 2232 2233 uuid = sb->set_uuid; 2234 printk(KERN_INFO 2235 "md: SB: (V:%u) (F:0x%08x) Array-ID:<%pU>\n" 2236 "md: Name: \"%s\" CT:%llu\n", 2237 le32_to_cpu(sb->major_version), 2238 le32_to_cpu(sb->feature_map), 2239 uuid, 2240 sb->set_name, 2241 (unsigned long long)le64_to_cpu(sb->ctime) 2242 & MD_SUPERBLOCK_1_TIME_SEC_MASK); 2243 2244 uuid = sb->device_uuid; 2245 printk(KERN_INFO 2246 "md: L%u SZ%llu RD:%u LO:%u CS:%u DO:%llu DS:%llu SO:%llu" 2247 " RO:%llu\n" 2248 "md: Dev:%08x UUID: %pU\n" 2249 "md: (F:0x%08x) UT:%llu Events:%llu ResyncOffset:%llu CSUM:0x%08x\n" 2250 "md: (MaxDev:%u) \n", 2251 le32_to_cpu(sb->level), 2252 (unsigned long long)le64_to_cpu(sb->size), 2253 le32_to_cpu(sb->raid_disks), 2254 le32_to_cpu(sb->layout), 2255 le32_to_cpu(sb->chunksize), 2256 (unsigned long long)le64_to_cpu(sb->data_offset), 2257 (unsigned long long)le64_to_cpu(sb->data_size), 2258 (unsigned long long)le64_to_cpu(sb->super_offset), 2259 (unsigned long long)le64_to_cpu(sb->recovery_offset), 2260 le32_to_cpu(sb->dev_number), 2261 uuid, 2262 sb->devflags, 2263 (unsigned long long)le64_to_cpu(sb->utime) & MD_SUPERBLOCK_1_TIME_SEC_MASK, 2264 (unsigned long long)le64_to_cpu(sb->events), 2265 (unsigned long long)le64_to_cpu(sb->resync_offset), 2266 le32_to_cpu(sb->sb_csum), 2267 le32_to_cpu(sb->max_dev) 2268 ); 2269 } 2270 2271 static void print_rdev(struct md_rdev *rdev, int major_version) 2272 { 2273 char b[BDEVNAME_SIZE]; 2274 printk(KERN_INFO "md: rdev %s, Sect:%08llu F:%d S:%d DN:%u\n", 2275 bdevname(rdev->bdev, b), (unsigned long long)rdev->sectors, 2276 test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags), 2277 rdev->desc_nr); 2278 if (rdev->sb_loaded) { 2279 printk(KERN_INFO "md: rdev superblock (MJ:%d):\n", major_version); 2280 switch (major_version) { 2281 case 0: 2282 print_sb_90(page_address(rdev->sb_page)); 2283 break; 2284 case 1: 2285 print_sb_1(page_address(rdev->sb_page)); 2286 break; 2287 } 2288 } else 2289 printk(KERN_INFO "md: no rdev superblock!\n"); 2290 } 2291 2292 static void md_print_devices(void) 2293 { 2294 struct list_head *tmp; 2295 struct md_rdev *rdev; 2296 struct mddev *mddev; 2297 char b[BDEVNAME_SIZE]; 2298 2299 printk("\n"); 2300 printk("md: **********************************\n"); 2301 printk("md: * <COMPLETE RAID STATE PRINTOUT> *\n"); 2302 printk("md: **********************************\n"); 2303 for_each_mddev(mddev, tmp) { 2304 2305 if (mddev->bitmap) 2306 bitmap_print_sb(mddev->bitmap); 2307 else 2308 printk("%s: ", mdname(mddev)); 2309 list_for_each_entry(rdev, &mddev->disks, same_set) 2310 printk("<%s>", bdevname(rdev->bdev,b)); 2311 printk("\n"); 2312 2313 list_for_each_entry(rdev, &mddev->disks, same_set) 2314 print_rdev(rdev, mddev->major_version); 2315 } 2316 printk("md: **********************************\n"); 2317 printk("\n"); 2318 } 2319 2320 2321 static void sync_sbs(struct mddev * mddev, int nospares) 2322 { 2323 /* Update each superblock (in-memory image), but 2324 * if we are allowed to, skip spares which already 2325 * have the right event counter, or have one earlier 2326 * (which would mean they aren't being marked as dirty 2327 * with the rest of the array) 2328 */ 2329 struct md_rdev *rdev; 2330 list_for_each_entry(rdev, &mddev->disks, same_set) { 2331 if (rdev->sb_events == mddev->events || 2332 (nospares && 2333 rdev->raid_disk < 0 && 2334 rdev->sb_events+1 == mddev->events)) { 2335 /* Don't update this superblock */ 2336 rdev->sb_loaded = 2; 2337 } else { 2338 sync_super(mddev, rdev); 2339 rdev->sb_loaded = 1; 2340 } 2341 } 2342 } 2343 2344 static void md_update_sb(struct mddev * mddev, int force_change) 2345 { 2346 struct md_rdev *rdev; 2347 int sync_req; 2348 int nospares = 0; 2349 int any_badblocks_changed = 0; 2350 2351 repeat: 2352 /* First make sure individual recovery_offsets are correct */ 2353 list_for_each_entry(rdev, &mddev->disks, same_set) { 2354 if (rdev->raid_disk >= 0 && 2355 mddev->delta_disks >= 0 && 2356 !test_bit(In_sync, &rdev->flags) && 2357 mddev->curr_resync_completed > rdev->recovery_offset) 2358 rdev->recovery_offset = mddev->curr_resync_completed; 2359 2360 } 2361 if (!mddev->persistent) { 2362 clear_bit(MD_CHANGE_CLEAN, &mddev->flags); 2363 clear_bit(MD_CHANGE_DEVS, &mddev->flags); 2364 if (!mddev->external) { 2365 clear_bit(MD_CHANGE_PENDING, &mddev->flags); 2366 list_for_each_entry(rdev, &mddev->disks, same_set) { 2367 if (rdev->badblocks.changed) { 2368 md_ack_all_badblocks(&rdev->badblocks); 2369 md_error(mddev, rdev); 2370 } 2371 clear_bit(Blocked, &rdev->flags); 2372 clear_bit(BlockedBadBlocks, &rdev->flags); 2373 wake_up(&rdev->blocked_wait); 2374 } 2375 } 2376 wake_up(&mddev->sb_wait); 2377 return; 2378 } 2379 2380 spin_lock_irq(&mddev->write_lock); 2381 2382 mddev->utime = get_seconds(); 2383 2384 if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags)) 2385 force_change = 1; 2386 if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags)) 2387 /* just a clean<-> dirty transition, possibly leave spares alone, 2388 * though if events isn't the right even/odd, we will have to do 2389 * spares after all 2390 */ 2391 nospares = 1; 2392 if (force_change) 2393 nospares = 0; 2394 if (mddev->degraded) 2395 /* If the array is degraded, then skipping spares is both 2396 * dangerous and fairly pointless. 2397 * Dangerous because a device that was removed from the array 2398 * might have a event_count that still looks up-to-date, 2399 * so it can be re-added without a resync. 2400 * Pointless because if there are any spares to skip, 2401 * then a recovery will happen and soon that array won't 2402 * be degraded any more and the spare can go back to sleep then. 2403 */ 2404 nospares = 0; 2405 2406 sync_req = mddev->in_sync; 2407 2408 /* If this is just a dirty<->clean transition, and the array is clean 2409 * and 'events' is odd, we can roll back to the previous clean state */ 2410 if (nospares 2411 && (mddev->in_sync && mddev->recovery_cp == MaxSector) 2412 && mddev->can_decrease_events 2413 && mddev->events != 1) { 2414 mddev->events--; 2415 mddev->can_decrease_events = 0; 2416 } else { 2417 /* otherwise we have to go forward and ... */ 2418 mddev->events ++; 2419 mddev->can_decrease_events = nospares; 2420 } 2421 2422 if (!mddev->events) { 2423 /* 2424 * oops, this 64-bit counter should never wrap. 2425 * Either we are in around ~1 trillion A.C., assuming 2426 * 1 reboot per second, or we have a bug: 2427 */ 2428 MD_BUG(); 2429 mddev->events --; 2430 } 2431 2432 list_for_each_entry(rdev, &mddev->disks, same_set) { 2433 if (rdev->badblocks.changed) 2434 any_badblocks_changed++; 2435 if (test_bit(Faulty, &rdev->flags)) 2436 set_bit(FaultRecorded, &rdev->flags); 2437 } 2438 2439 sync_sbs(mddev, nospares); 2440 spin_unlock_irq(&mddev->write_lock); 2441 2442 pr_debug("md: updating %s RAID superblock on device (in sync %d)\n", 2443 mdname(mddev), mddev->in_sync); 2444 2445 bitmap_update_sb(mddev->bitmap); 2446 list_for_each_entry(rdev, &mddev->disks, same_set) { 2447 char b[BDEVNAME_SIZE]; 2448 2449 if (rdev->sb_loaded != 1) 2450 continue; /* no noise on spare devices */ 2451 2452 if (!test_bit(Faulty, &rdev->flags) && 2453 rdev->saved_raid_disk == -1) { 2454 md_super_write(mddev,rdev, 2455 rdev->sb_start, rdev->sb_size, 2456 rdev->sb_page); 2457 pr_debug("md: (write) %s's sb offset: %llu\n", 2458 bdevname(rdev->bdev, b), 2459 (unsigned long long)rdev->sb_start); 2460 rdev->sb_events = mddev->events; 2461 if (rdev->badblocks.size) { 2462 md_super_write(mddev, rdev, 2463 rdev->badblocks.sector, 2464 rdev->badblocks.size << 9, 2465 rdev->bb_page); 2466 rdev->badblocks.size = 0; 2467 } 2468 2469 } else if (test_bit(Faulty, &rdev->flags)) 2470 pr_debug("md: %s (skipping faulty)\n", 2471 bdevname(rdev->bdev, b)); 2472 else 2473 pr_debug("(skipping incremental s/r "); 2474 2475 if (mddev->level == LEVEL_MULTIPATH) 2476 /* only need to write one superblock... */ 2477 break; 2478 } 2479 md_super_wait(mddev); 2480 /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */ 2481 2482 spin_lock_irq(&mddev->write_lock); 2483 if (mddev->in_sync != sync_req || 2484 test_bit(MD_CHANGE_DEVS, &mddev->flags)) { 2485 /* have to write it out again */ 2486 spin_unlock_irq(&mddev->write_lock); 2487 goto repeat; 2488 } 2489 clear_bit(MD_CHANGE_PENDING, &mddev->flags); 2490 spin_unlock_irq(&mddev->write_lock); 2491 wake_up(&mddev->sb_wait); 2492 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 2493 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 2494 2495 list_for_each_entry(rdev, &mddev->disks, same_set) { 2496 if (test_and_clear_bit(FaultRecorded, &rdev->flags)) 2497 clear_bit(Blocked, &rdev->flags); 2498 2499 if (any_badblocks_changed) 2500 md_ack_all_badblocks(&rdev->badblocks); 2501 clear_bit(BlockedBadBlocks, &rdev->flags); 2502 wake_up(&rdev->blocked_wait); 2503 } 2504 } 2505 2506 /* words written to sysfs files may, or may not, be \n terminated. 2507 * We want to accept with case. For this we use cmd_match. 2508 */ 2509 static int cmd_match(const char *cmd, const char *str) 2510 { 2511 /* See if cmd, written into a sysfs file, matches 2512 * str. They must either be the same, or cmd can 2513 * have a trailing newline 2514 */ 2515 while (*cmd && *str && *cmd == *str) { 2516 cmd++; 2517 str++; 2518 } 2519 if (*cmd == '\n') 2520 cmd++; 2521 if (*str || *cmd) 2522 return 0; 2523 return 1; 2524 } 2525 2526 struct rdev_sysfs_entry { 2527 struct attribute attr; 2528 ssize_t (*show)(struct md_rdev *, char *); 2529 ssize_t (*store)(struct md_rdev *, const char *, size_t); 2530 }; 2531 2532 static ssize_t 2533 state_show(struct md_rdev *rdev, char *page) 2534 { 2535 char *sep = ""; 2536 size_t len = 0; 2537 2538 if (test_bit(Faulty, &rdev->flags) || 2539 rdev->badblocks.unacked_exist) { 2540 len+= sprintf(page+len, "%sfaulty",sep); 2541 sep = ","; 2542 } 2543 if (test_bit(In_sync, &rdev->flags)) { 2544 len += sprintf(page+len, "%sin_sync",sep); 2545 sep = ","; 2546 } 2547 if (test_bit(WriteMostly, &rdev->flags)) { 2548 len += sprintf(page+len, "%swrite_mostly",sep); 2549 sep = ","; 2550 } 2551 if (test_bit(Blocked, &rdev->flags) || 2552 rdev->badblocks.unacked_exist) { 2553 len += sprintf(page+len, "%sblocked", sep); 2554 sep = ","; 2555 } 2556 if (!test_bit(Faulty, &rdev->flags) && 2557 !test_bit(In_sync, &rdev->flags)) { 2558 len += sprintf(page+len, "%sspare", sep); 2559 sep = ","; 2560 } 2561 if (test_bit(WriteErrorSeen, &rdev->flags)) { 2562 len += sprintf(page+len, "%swrite_error", sep); 2563 sep = ","; 2564 } 2565 return len+sprintf(page+len, "\n"); 2566 } 2567 2568 static ssize_t 2569 state_store(struct md_rdev *rdev, const char *buf, size_t len) 2570 { 2571 /* can write 2572 * faulty - simulates an error 2573 * remove - disconnects the device 2574 * writemostly - sets write_mostly 2575 * -writemostly - clears write_mostly 2576 * blocked - sets the Blocked flags 2577 * -blocked - clears the Blocked and possibly simulates an error 2578 * insync - sets Insync providing device isn't active 2579 * write_error - sets WriteErrorSeen 2580 * -write_error - clears WriteErrorSeen 2581 */ 2582 int err = -EINVAL; 2583 if (cmd_match(buf, "faulty") && rdev->mddev->pers) { 2584 md_error(rdev->mddev, rdev); 2585 if (test_bit(Faulty, &rdev->flags)) 2586 err = 0; 2587 else 2588 err = -EBUSY; 2589 } else if (cmd_match(buf, "remove")) { 2590 if (rdev->raid_disk >= 0) 2591 err = -EBUSY; 2592 else { 2593 struct mddev *mddev = rdev->mddev; 2594 kick_rdev_from_array(rdev); 2595 if (mddev->pers) 2596 md_update_sb(mddev, 1); 2597 md_new_event(mddev); 2598 err = 0; 2599 } 2600 } else if (cmd_match(buf, "writemostly")) { 2601 set_bit(WriteMostly, &rdev->flags); 2602 err = 0; 2603 } else if (cmd_match(buf, "-writemostly")) { 2604 clear_bit(WriteMostly, &rdev->flags); 2605 err = 0; 2606 } else if (cmd_match(buf, "blocked")) { 2607 set_bit(Blocked, &rdev->flags); 2608 err = 0; 2609 } else if (cmd_match(buf, "-blocked")) { 2610 if (!test_bit(Faulty, &rdev->flags) && 2611 rdev->badblocks.unacked_exist) { 2612 /* metadata handler doesn't understand badblocks, 2613 * so we need to fail the device 2614 */ 2615 md_error(rdev->mddev, rdev); 2616 } 2617 clear_bit(Blocked, &rdev->flags); 2618 clear_bit(BlockedBadBlocks, &rdev->flags); 2619 wake_up(&rdev->blocked_wait); 2620 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); 2621 md_wakeup_thread(rdev->mddev->thread); 2622 2623 err = 0; 2624 } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) { 2625 set_bit(In_sync, &rdev->flags); 2626 err = 0; 2627 } else if (cmd_match(buf, "write_error")) { 2628 set_bit(WriteErrorSeen, &rdev->flags); 2629 err = 0; 2630 } else if (cmd_match(buf, "-write_error")) { 2631 clear_bit(WriteErrorSeen, &rdev->flags); 2632 err = 0; 2633 } 2634 if (!err) 2635 sysfs_notify_dirent_safe(rdev->sysfs_state); 2636 return err ? err : len; 2637 } 2638 static struct rdev_sysfs_entry rdev_state = 2639 __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store); 2640 2641 static ssize_t 2642 errors_show(struct md_rdev *rdev, char *page) 2643 { 2644 return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors)); 2645 } 2646 2647 static ssize_t 2648 errors_store(struct md_rdev *rdev, const char *buf, size_t len) 2649 { 2650 char *e; 2651 unsigned long n = simple_strtoul(buf, &e, 10); 2652 if (*buf && (*e == 0 || *e == '\n')) { 2653 atomic_set(&rdev->corrected_errors, n); 2654 return len; 2655 } 2656 return -EINVAL; 2657 } 2658 static struct rdev_sysfs_entry rdev_errors = 2659 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store); 2660 2661 static ssize_t 2662 slot_show(struct md_rdev *rdev, char *page) 2663 { 2664 if (rdev->raid_disk < 0) 2665 return sprintf(page, "none\n"); 2666 else 2667 return sprintf(page, "%d\n", rdev->raid_disk); 2668 } 2669 2670 static ssize_t 2671 slot_store(struct md_rdev *rdev, const char *buf, size_t len) 2672 { 2673 char *e; 2674 int err; 2675 int slot = simple_strtoul(buf, &e, 10); 2676 if (strncmp(buf, "none", 4)==0) 2677 slot = -1; 2678 else if (e==buf || (*e && *e!= '\n')) 2679 return -EINVAL; 2680 if (rdev->mddev->pers && slot == -1) { 2681 /* Setting 'slot' on an active array requires also 2682 * updating the 'rd%d' link, and communicating 2683 * with the personality with ->hot_*_disk. 2684 * For now we only support removing 2685 * failed/spare devices. This normally happens automatically, 2686 * but not when the metadata is externally managed. 2687 */ 2688 if (rdev->raid_disk == -1) 2689 return -EEXIST; 2690 /* personality does all needed checks */ 2691 if (rdev->mddev->pers->hot_remove_disk == NULL) 2692 return -EINVAL; 2693 err = rdev->mddev->pers-> 2694 hot_remove_disk(rdev->mddev, rdev->raid_disk); 2695 if (err) 2696 return err; 2697 sysfs_unlink_rdev(rdev->mddev, rdev); 2698 rdev->raid_disk = -1; 2699 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); 2700 md_wakeup_thread(rdev->mddev->thread); 2701 } else if (rdev->mddev->pers) { 2702 struct md_rdev *rdev2; 2703 /* Activating a spare .. or possibly reactivating 2704 * if we ever get bitmaps working here. 2705 */ 2706 2707 if (rdev->raid_disk != -1) 2708 return -EBUSY; 2709 2710 if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery)) 2711 return -EBUSY; 2712 2713 if (rdev->mddev->pers->hot_add_disk == NULL) 2714 return -EINVAL; 2715 2716 list_for_each_entry(rdev2, &rdev->mddev->disks, same_set) 2717 if (rdev2->raid_disk == slot) 2718 return -EEXIST; 2719 2720 if (slot >= rdev->mddev->raid_disks && 2721 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks) 2722 return -ENOSPC; 2723 2724 rdev->raid_disk = slot; 2725 if (test_bit(In_sync, &rdev->flags)) 2726 rdev->saved_raid_disk = slot; 2727 else 2728 rdev->saved_raid_disk = -1; 2729 clear_bit(In_sync, &rdev->flags); 2730 err = rdev->mddev->pers-> 2731 hot_add_disk(rdev->mddev, rdev); 2732 if (err) { 2733 rdev->raid_disk = -1; 2734 return err; 2735 } else 2736 sysfs_notify_dirent_safe(rdev->sysfs_state); 2737 if (sysfs_link_rdev(rdev->mddev, rdev)) 2738 /* failure here is OK */; 2739 /* don't wakeup anyone, leave that to userspace. */ 2740 } else { 2741 if (slot >= rdev->mddev->raid_disks && 2742 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks) 2743 return -ENOSPC; 2744 rdev->raid_disk = slot; 2745 /* assume it is working */ 2746 clear_bit(Faulty, &rdev->flags); 2747 clear_bit(WriteMostly, &rdev->flags); 2748 set_bit(In_sync, &rdev->flags); 2749 sysfs_notify_dirent_safe(rdev->sysfs_state); 2750 } 2751 return len; 2752 } 2753 2754 2755 static struct rdev_sysfs_entry rdev_slot = 2756 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store); 2757 2758 static ssize_t 2759 offset_show(struct md_rdev *rdev, char *page) 2760 { 2761 return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset); 2762 } 2763 2764 static ssize_t 2765 offset_store(struct md_rdev *rdev, const char *buf, size_t len) 2766 { 2767 char *e; 2768 unsigned long long offset = simple_strtoull(buf, &e, 10); 2769 if (e==buf || (*e && *e != '\n')) 2770 return -EINVAL; 2771 if (rdev->mddev->pers && rdev->raid_disk >= 0) 2772 return -EBUSY; 2773 if (rdev->sectors && rdev->mddev->external) 2774 /* Must set offset before size, so overlap checks 2775 * can be sane */ 2776 return -EBUSY; 2777 rdev->data_offset = offset; 2778 return len; 2779 } 2780 2781 static struct rdev_sysfs_entry rdev_offset = 2782 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store); 2783 2784 static ssize_t 2785 rdev_size_show(struct md_rdev *rdev, char *page) 2786 { 2787 return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2); 2788 } 2789 2790 static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2) 2791 { 2792 /* check if two start/length pairs overlap */ 2793 if (s1+l1 <= s2) 2794 return 0; 2795 if (s2+l2 <= s1) 2796 return 0; 2797 return 1; 2798 } 2799 2800 static int strict_blocks_to_sectors(const char *buf, sector_t *sectors) 2801 { 2802 unsigned long long blocks; 2803 sector_t new; 2804 2805 if (strict_strtoull(buf, 10, &blocks) < 0) 2806 return -EINVAL; 2807 2808 if (blocks & 1ULL << (8 * sizeof(blocks) - 1)) 2809 return -EINVAL; /* sector conversion overflow */ 2810 2811 new = blocks * 2; 2812 if (new != blocks * 2) 2813 return -EINVAL; /* unsigned long long to sector_t overflow */ 2814 2815 *sectors = new; 2816 return 0; 2817 } 2818 2819 static ssize_t 2820 rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len) 2821 { 2822 struct mddev *my_mddev = rdev->mddev; 2823 sector_t oldsectors = rdev->sectors; 2824 sector_t sectors; 2825 2826 if (strict_blocks_to_sectors(buf, §ors) < 0) 2827 return -EINVAL; 2828 if (my_mddev->pers && rdev->raid_disk >= 0) { 2829 if (my_mddev->persistent) { 2830 sectors = super_types[my_mddev->major_version]. 2831 rdev_size_change(rdev, sectors); 2832 if (!sectors) 2833 return -EBUSY; 2834 } else if (!sectors) 2835 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) - 2836 rdev->data_offset; 2837 } 2838 if (sectors < my_mddev->dev_sectors) 2839 return -EINVAL; /* component must fit device */ 2840 2841 rdev->sectors = sectors; 2842 if (sectors > oldsectors && my_mddev->external) { 2843 /* need to check that all other rdevs with the same ->bdev 2844 * do not overlap. We need to unlock the mddev to avoid 2845 * a deadlock. We have already changed rdev->sectors, and if 2846 * we have to change it back, we will have the lock again. 2847 */ 2848 struct mddev *mddev; 2849 int overlap = 0; 2850 struct list_head *tmp; 2851 2852 mddev_unlock(my_mddev); 2853 for_each_mddev(mddev, tmp) { 2854 struct md_rdev *rdev2; 2855 2856 mddev_lock(mddev); 2857 list_for_each_entry(rdev2, &mddev->disks, same_set) 2858 if (rdev->bdev == rdev2->bdev && 2859 rdev != rdev2 && 2860 overlaps(rdev->data_offset, rdev->sectors, 2861 rdev2->data_offset, 2862 rdev2->sectors)) { 2863 overlap = 1; 2864 break; 2865 } 2866 mddev_unlock(mddev); 2867 if (overlap) { 2868 mddev_put(mddev); 2869 break; 2870 } 2871 } 2872 mddev_lock(my_mddev); 2873 if (overlap) { 2874 /* Someone else could have slipped in a size 2875 * change here, but doing so is just silly. 2876 * We put oldsectors back because we *know* it is 2877 * safe, and trust userspace not to race with 2878 * itself 2879 */ 2880 rdev->sectors = oldsectors; 2881 return -EBUSY; 2882 } 2883 } 2884 return len; 2885 } 2886 2887 static struct rdev_sysfs_entry rdev_size = 2888 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store); 2889 2890 2891 static ssize_t recovery_start_show(struct md_rdev *rdev, char *page) 2892 { 2893 unsigned long long recovery_start = rdev->recovery_offset; 2894 2895 if (test_bit(In_sync, &rdev->flags) || 2896 recovery_start == MaxSector) 2897 return sprintf(page, "none\n"); 2898 2899 return sprintf(page, "%llu\n", recovery_start); 2900 } 2901 2902 static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len) 2903 { 2904 unsigned long long recovery_start; 2905 2906 if (cmd_match(buf, "none")) 2907 recovery_start = MaxSector; 2908 else if (strict_strtoull(buf, 10, &recovery_start)) 2909 return -EINVAL; 2910 2911 if (rdev->mddev->pers && 2912 rdev->raid_disk >= 0) 2913 return -EBUSY; 2914 2915 rdev->recovery_offset = recovery_start; 2916 if (recovery_start == MaxSector) 2917 set_bit(In_sync, &rdev->flags); 2918 else 2919 clear_bit(In_sync, &rdev->flags); 2920 return len; 2921 } 2922 2923 static struct rdev_sysfs_entry rdev_recovery_start = 2924 __ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store); 2925 2926 2927 static ssize_t 2928 badblocks_show(struct badblocks *bb, char *page, int unack); 2929 static ssize_t 2930 badblocks_store(struct badblocks *bb, const char *page, size_t len, int unack); 2931 2932 static ssize_t bb_show(struct md_rdev *rdev, char *page) 2933 { 2934 return badblocks_show(&rdev->badblocks, page, 0); 2935 } 2936 static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len) 2937 { 2938 int rv = badblocks_store(&rdev->badblocks, page, len, 0); 2939 /* Maybe that ack was all we needed */ 2940 if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags)) 2941 wake_up(&rdev->blocked_wait); 2942 return rv; 2943 } 2944 static struct rdev_sysfs_entry rdev_bad_blocks = 2945 __ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store); 2946 2947 2948 static ssize_t ubb_show(struct md_rdev *rdev, char *page) 2949 { 2950 return badblocks_show(&rdev->badblocks, page, 1); 2951 } 2952 static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len) 2953 { 2954 return badblocks_store(&rdev->badblocks, page, len, 1); 2955 } 2956 static struct rdev_sysfs_entry rdev_unack_bad_blocks = 2957 __ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store); 2958 2959 static struct attribute *rdev_default_attrs[] = { 2960 &rdev_state.attr, 2961 &rdev_errors.attr, 2962 &rdev_slot.attr, 2963 &rdev_offset.attr, 2964 &rdev_size.attr, 2965 &rdev_recovery_start.attr, 2966 &rdev_bad_blocks.attr, 2967 &rdev_unack_bad_blocks.attr, 2968 NULL, 2969 }; 2970 static ssize_t 2971 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 2972 { 2973 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); 2974 struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj); 2975 struct mddev *mddev = rdev->mddev; 2976 ssize_t rv; 2977 2978 if (!entry->show) 2979 return -EIO; 2980 2981 rv = mddev ? mddev_lock(mddev) : -EBUSY; 2982 if (!rv) { 2983 if (rdev->mddev == NULL) 2984 rv = -EBUSY; 2985 else 2986 rv = entry->show(rdev, page); 2987 mddev_unlock(mddev); 2988 } 2989 return rv; 2990 } 2991 2992 static ssize_t 2993 rdev_attr_store(struct kobject *kobj, struct attribute *attr, 2994 const char *page, size_t length) 2995 { 2996 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); 2997 struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj); 2998 ssize_t rv; 2999 struct mddev *mddev = rdev->mddev; 3000 3001 if (!entry->store) 3002 return -EIO; 3003 if (!capable(CAP_SYS_ADMIN)) 3004 return -EACCES; 3005 rv = mddev ? mddev_lock(mddev): -EBUSY; 3006 if (!rv) { 3007 if (rdev->mddev == NULL) 3008 rv = -EBUSY; 3009 else 3010 rv = entry->store(rdev, page, length); 3011 mddev_unlock(mddev); 3012 } 3013 return rv; 3014 } 3015 3016 static void rdev_free(struct kobject *ko) 3017 { 3018 struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj); 3019 kfree(rdev); 3020 } 3021 static const struct sysfs_ops rdev_sysfs_ops = { 3022 .show = rdev_attr_show, 3023 .store = rdev_attr_store, 3024 }; 3025 static struct kobj_type rdev_ktype = { 3026 .release = rdev_free, 3027 .sysfs_ops = &rdev_sysfs_ops, 3028 .default_attrs = rdev_default_attrs, 3029 }; 3030 3031 int md_rdev_init(struct md_rdev *rdev) 3032 { 3033 rdev->desc_nr = -1; 3034 rdev->saved_raid_disk = -1; 3035 rdev->raid_disk = -1; 3036 rdev->flags = 0; 3037 rdev->data_offset = 0; 3038 rdev->sb_events = 0; 3039 rdev->last_read_error.tv_sec = 0; 3040 rdev->last_read_error.tv_nsec = 0; 3041 rdev->sb_loaded = 0; 3042 rdev->bb_page = NULL; 3043 atomic_set(&rdev->nr_pending, 0); 3044 atomic_set(&rdev->read_errors, 0); 3045 atomic_set(&rdev->corrected_errors, 0); 3046 3047 INIT_LIST_HEAD(&rdev->same_set); 3048 init_waitqueue_head(&rdev->blocked_wait); 3049 3050 /* Add space to store bad block list. 3051 * This reserves the space even on arrays where it cannot 3052 * be used - I wonder if that matters 3053 */ 3054 rdev->badblocks.count = 0; 3055 rdev->badblocks.shift = 0; 3056 rdev->badblocks.page = kmalloc(PAGE_SIZE, GFP_KERNEL); 3057 seqlock_init(&rdev->badblocks.lock); 3058 if (rdev->badblocks.page == NULL) 3059 return -ENOMEM; 3060 3061 return 0; 3062 } 3063 EXPORT_SYMBOL_GPL(md_rdev_init); 3064 /* 3065 * Import a device. If 'super_format' >= 0, then sanity check the superblock 3066 * 3067 * mark the device faulty if: 3068 * 3069 * - the device is nonexistent (zero size) 3070 * - the device has no valid superblock 3071 * 3072 * a faulty rdev _never_ has rdev->sb set. 3073 */ 3074 static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor) 3075 { 3076 char b[BDEVNAME_SIZE]; 3077 int err; 3078 struct md_rdev *rdev; 3079 sector_t size; 3080 3081 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); 3082 if (!rdev) { 3083 printk(KERN_ERR "md: could not alloc mem for new device!\n"); 3084 return ERR_PTR(-ENOMEM); 3085 } 3086 3087 err = md_rdev_init(rdev); 3088 if (err) 3089 goto abort_free; 3090 err = alloc_disk_sb(rdev); 3091 if (err) 3092 goto abort_free; 3093 3094 err = lock_rdev(rdev, newdev, super_format == -2); 3095 if (err) 3096 goto abort_free; 3097 3098 kobject_init(&rdev->kobj, &rdev_ktype); 3099 3100 size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS; 3101 if (!size) { 3102 printk(KERN_WARNING 3103 "md: %s has zero or unknown size, marking faulty!\n", 3104 bdevname(rdev->bdev,b)); 3105 err = -EINVAL; 3106 goto abort_free; 3107 } 3108 3109 if (super_format >= 0) { 3110 err = super_types[super_format]. 3111 load_super(rdev, NULL, super_minor); 3112 if (err == -EINVAL) { 3113 printk(KERN_WARNING 3114 "md: %s does not have a valid v%d.%d " 3115 "superblock, not importing!\n", 3116 bdevname(rdev->bdev,b), 3117 super_format, super_minor); 3118 goto abort_free; 3119 } 3120 if (err < 0) { 3121 printk(KERN_WARNING 3122 "md: could not read %s's sb, not importing!\n", 3123 bdevname(rdev->bdev,b)); 3124 goto abort_free; 3125 } 3126 } 3127 if (super_format == -1) 3128 /* hot-add for 0.90, or non-persistent: so no badblocks */ 3129 rdev->badblocks.shift = -1; 3130 3131 return rdev; 3132 3133 abort_free: 3134 if (rdev->bdev) 3135 unlock_rdev(rdev); 3136 free_disk_sb(rdev); 3137 kfree(rdev->badblocks.page); 3138 kfree(rdev); 3139 return ERR_PTR(err); 3140 } 3141 3142 /* 3143 * Check a full RAID array for plausibility 3144 */ 3145 3146 3147 static void analyze_sbs(struct mddev * mddev) 3148 { 3149 int i; 3150 struct md_rdev *rdev, *freshest, *tmp; 3151 char b[BDEVNAME_SIZE]; 3152 3153 freshest = NULL; 3154 rdev_for_each(rdev, tmp, mddev) 3155 switch (super_types[mddev->major_version]. 3156 load_super(rdev, freshest, mddev->minor_version)) { 3157 case 1: 3158 freshest = rdev; 3159 break; 3160 case 0: 3161 break; 3162 default: 3163 printk( KERN_ERR \ 3164 "md: fatal superblock inconsistency in %s" 3165 " -- removing from array\n", 3166 bdevname(rdev->bdev,b)); 3167 kick_rdev_from_array(rdev); 3168 } 3169 3170 3171 super_types[mddev->major_version]. 3172 validate_super(mddev, freshest); 3173 3174 i = 0; 3175 rdev_for_each(rdev, tmp, mddev) { 3176 if (mddev->max_disks && 3177 (rdev->desc_nr >= mddev->max_disks || 3178 i > mddev->max_disks)) { 3179 printk(KERN_WARNING 3180 "md: %s: %s: only %d devices permitted\n", 3181 mdname(mddev), bdevname(rdev->bdev, b), 3182 mddev->max_disks); 3183 kick_rdev_from_array(rdev); 3184 continue; 3185 } 3186 if (rdev != freshest) 3187 if (super_types[mddev->major_version]. 3188 validate_super(mddev, rdev)) { 3189 printk(KERN_WARNING "md: kicking non-fresh %s" 3190 " from array!\n", 3191 bdevname(rdev->bdev,b)); 3192 kick_rdev_from_array(rdev); 3193 continue; 3194 } 3195 if (mddev->level == LEVEL_MULTIPATH) { 3196 rdev->desc_nr = i++; 3197 rdev->raid_disk = rdev->desc_nr; 3198 set_bit(In_sync, &rdev->flags); 3199 } else if (rdev->raid_disk >= (mddev->raid_disks - min(0, mddev->delta_disks))) { 3200 rdev->raid_disk = -1; 3201 clear_bit(In_sync, &rdev->flags); 3202 } 3203 } 3204 } 3205 3206 /* Read a fixed-point number. 3207 * Numbers in sysfs attributes should be in "standard" units where 3208 * possible, so time should be in seconds. 3209 * However we internally use a a much smaller unit such as 3210 * milliseconds or jiffies. 3211 * This function takes a decimal number with a possible fractional 3212 * component, and produces an integer which is the result of 3213 * multiplying that number by 10^'scale'. 3214 * all without any floating-point arithmetic. 3215 */ 3216 int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale) 3217 { 3218 unsigned long result = 0; 3219 long decimals = -1; 3220 while (isdigit(*cp) || (*cp == '.' && decimals < 0)) { 3221 if (*cp == '.') 3222 decimals = 0; 3223 else if (decimals < scale) { 3224 unsigned int value; 3225 value = *cp - '0'; 3226 result = result * 10 + value; 3227 if (decimals >= 0) 3228 decimals++; 3229 } 3230 cp++; 3231 } 3232 if (*cp == '\n') 3233 cp++; 3234 if (*cp) 3235 return -EINVAL; 3236 if (decimals < 0) 3237 decimals = 0; 3238 while (decimals < scale) { 3239 result *= 10; 3240 decimals ++; 3241 } 3242 *res = result; 3243 return 0; 3244 } 3245 3246 3247 static void md_safemode_timeout(unsigned long data); 3248 3249 static ssize_t 3250 safe_delay_show(struct mddev *mddev, char *page) 3251 { 3252 int msec = (mddev->safemode_delay*1000)/HZ; 3253 return sprintf(page, "%d.%03d\n", msec/1000, msec%1000); 3254 } 3255 static ssize_t 3256 safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len) 3257 { 3258 unsigned long msec; 3259 3260 if (strict_strtoul_scaled(cbuf, &msec, 3) < 0) 3261 return -EINVAL; 3262 if (msec == 0) 3263 mddev->safemode_delay = 0; 3264 else { 3265 unsigned long old_delay = mddev->safemode_delay; 3266 mddev->safemode_delay = (msec*HZ)/1000; 3267 if (mddev->safemode_delay == 0) 3268 mddev->safemode_delay = 1; 3269 if (mddev->safemode_delay < old_delay) 3270 md_safemode_timeout((unsigned long)mddev); 3271 } 3272 return len; 3273 } 3274 static struct md_sysfs_entry md_safe_delay = 3275 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store); 3276 3277 static ssize_t 3278 level_show(struct mddev *mddev, char *page) 3279 { 3280 struct md_personality *p = mddev->pers; 3281 if (p) 3282 return sprintf(page, "%s\n", p->name); 3283 else if (mddev->clevel[0]) 3284 return sprintf(page, "%s\n", mddev->clevel); 3285 else if (mddev->level != LEVEL_NONE) 3286 return sprintf(page, "%d\n", mddev->level); 3287 else 3288 return 0; 3289 } 3290 3291 static ssize_t 3292 level_store(struct mddev *mddev, const char *buf, size_t len) 3293 { 3294 char clevel[16]; 3295 ssize_t rv = len; 3296 struct md_personality *pers; 3297 long level; 3298 void *priv; 3299 struct md_rdev *rdev; 3300 3301 if (mddev->pers == NULL) { 3302 if (len == 0) 3303 return 0; 3304 if (len >= sizeof(mddev->clevel)) 3305 return -ENOSPC; 3306 strncpy(mddev->clevel, buf, len); 3307 if (mddev->clevel[len-1] == '\n') 3308 len--; 3309 mddev->clevel[len] = 0; 3310 mddev->level = LEVEL_NONE; 3311 return rv; 3312 } 3313 3314 /* request to change the personality. Need to ensure: 3315 * - array is not engaged in resync/recovery/reshape 3316 * - old personality can be suspended 3317 * - new personality will access other array. 3318 */ 3319 3320 if (mddev->sync_thread || 3321 mddev->reshape_position != MaxSector || 3322 mddev->sysfs_active) 3323 return -EBUSY; 3324 3325 if (!mddev->pers->quiesce) { 3326 printk(KERN_WARNING "md: %s: %s does not support online personality change\n", 3327 mdname(mddev), mddev->pers->name); 3328 return -EINVAL; 3329 } 3330 3331 /* Now find the new personality */ 3332 if (len == 0 || len >= sizeof(clevel)) 3333 return -EINVAL; 3334 strncpy(clevel, buf, len); 3335 if (clevel[len-1] == '\n') 3336 len--; 3337 clevel[len] = 0; 3338 if (strict_strtol(clevel, 10, &level)) 3339 level = LEVEL_NONE; 3340 3341 if (request_module("md-%s", clevel) != 0) 3342 request_module("md-level-%s", clevel); 3343 spin_lock(&pers_lock); 3344 pers = find_pers(level, clevel); 3345 if (!pers || !try_module_get(pers->owner)) { 3346 spin_unlock(&pers_lock); 3347 printk(KERN_WARNING "md: personality %s not loaded\n", clevel); 3348 return -EINVAL; 3349 } 3350 spin_unlock(&pers_lock); 3351 3352 if (pers == mddev->pers) { 3353 /* Nothing to do! */ 3354 module_put(pers->owner); 3355 return rv; 3356 } 3357 if (!pers->takeover) { 3358 module_put(pers->owner); 3359 printk(KERN_WARNING "md: %s: %s does not support personality takeover\n", 3360 mdname(mddev), clevel); 3361 return -EINVAL; 3362 } 3363 3364 list_for_each_entry(rdev, &mddev->disks, same_set) 3365 rdev->new_raid_disk = rdev->raid_disk; 3366 3367 /* ->takeover must set new_* and/or delta_disks 3368 * if it succeeds, and may set them when it fails. 3369 */ 3370 priv = pers->takeover(mddev); 3371 if (IS_ERR(priv)) { 3372 mddev->new_level = mddev->level; 3373 mddev->new_layout = mddev->layout; 3374 mddev->new_chunk_sectors = mddev->chunk_sectors; 3375 mddev->raid_disks -= mddev->delta_disks; 3376 mddev->delta_disks = 0; 3377 module_put(pers->owner); 3378 printk(KERN_WARNING "md: %s: %s would not accept array\n", 3379 mdname(mddev), clevel); 3380 return PTR_ERR(priv); 3381 } 3382 3383 /* Looks like we have a winner */ 3384 mddev_suspend(mddev); 3385 mddev->pers->stop(mddev); 3386 3387 if (mddev->pers->sync_request == NULL && 3388 pers->sync_request != NULL) { 3389 /* need to add the md_redundancy_group */ 3390 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group)) 3391 printk(KERN_WARNING 3392 "md: cannot register extra attributes for %s\n", 3393 mdname(mddev)); 3394 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, NULL, "sync_action"); 3395 } 3396 if (mddev->pers->sync_request != NULL && 3397 pers->sync_request == NULL) { 3398 /* need to remove the md_redundancy_group */ 3399 if (mddev->to_remove == NULL) 3400 mddev->to_remove = &md_redundancy_group; 3401 } 3402 3403 if (mddev->pers->sync_request == NULL && 3404 mddev->external) { 3405 /* We are converting from a no-redundancy array 3406 * to a redundancy array and metadata is managed 3407 * externally so we need to be sure that writes 3408 * won't block due to a need to transition 3409 * clean->dirty 3410 * until external management is started. 3411 */ 3412 mddev->in_sync = 0; 3413 mddev->safemode_delay = 0; 3414 mddev->safemode = 0; 3415 } 3416 3417 list_for_each_entry(rdev, &mddev->disks, same_set) { 3418 if (rdev->raid_disk < 0) 3419 continue; 3420 if (rdev->new_raid_disk >= mddev->raid_disks) 3421 rdev->new_raid_disk = -1; 3422 if (rdev->new_raid_disk == rdev->raid_disk) 3423 continue; 3424 sysfs_unlink_rdev(mddev, rdev); 3425 } 3426 list_for_each_entry(rdev, &mddev->disks, same_set) { 3427 if (rdev->raid_disk < 0) 3428 continue; 3429 if (rdev->new_raid_disk == rdev->raid_disk) 3430 continue; 3431 rdev->raid_disk = rdev->new_raid_disk; 3432 if (rdev->raid_disk < 0) 3433 clear_bit(In_sync, &rdev->flags); 3434 else { 3435 if (sysfs_link_rdev(mddev, rdev)) 3436 printk(KERN_WARNING "md: cannot register rd%d" 3437 " for %s after level change\n", 3438 rdev->raid_disk, mdname(mddev)); 3439 } 3440 } 3441 3442 module_put(mddev->pers->owner); 3443 mddev->pers = pers; 3444 mddev->private = priv; 3445 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); 3446 mddev->level = mddev->new_level; 3447 mddev->layout = mddev->new_layout; 3448 mddev->chunk_sectors = mddev->new_chunk_sectors; 3449 mddev->delta_disks = 0; 3450 mddev->degraded = 0; 3451 if (mddev->pers->sync_request == NULL) { 3452 /* this is now an array without redundancy, so 3453 * it must always be in_sync 3454 */ 3455 mddev->in_sync = 1; 3456 del_timer_sync(&mddev->safemode_timer); 3457 } 3458 pers->run(mddev); 3459 mddev_resume(mddev); 3460 set_bit(MD_CHANGE_DEVS, &mddev->flags); 3461 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3462 md_wakeup_thread(mddev->thread); 3463 sysfs_notify(&mddev->kobj, NULL, "level"); 3464 md_new_event(mddev); 3465 return rv; 3466 } 3467 3468 static struct md_sysfs_entry md_level = 3469 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store); 3470 3471 3472 static ssize_t 3473 layout_show(struct mddev *mddev, char *page) 3474 { 3475 /* just a number, not meaningful for all levels */ 3476 if (mddev->reshape_position != MaxSector && 3477 mddev->layout != mddev->new_layout) 3478 return sprintf(page, "%d (%d)\n", 3479 mddev->new_layout, mddev->layout); 3480 return sprintf(page, "%d\n", mddev->layout); 3481 } 3482 3483 static ssize_t 3484 layout_store(struct mddev *mddev, const char *buf, size_t len) 3485 { 3486 char *e; 3487 unsigned long n = simple_strtoul(buf, &e, 10); 3488 3489 if (!*buf || (*e && *e != '\n')) 3490 return -EINVAL; 3491 3492 if (mddev->pers) { 3493 int err; 3494 if (mddev->pers->check_reshape == NULL) 3495 return -EBUSY; 3496 mddev->new_layout = n; 3497 err = mddev->pers->check_reshape(mddev); 3498 if (err) { 3499 mddev->new_layout = mddev->layout; 3500 return err; 3501 } 3502 } else { 3503 mddev->new_layout = n; 3504 if (mddev->reshape_position == MaxSector) 3505 mddev->layout = n; 3506 } 3507 return len; 3508 } 3509 static struct md_sysfs_entry md_layout = 3510 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store); 3511 3512 3513 static ssize_t 3514 raid_disks_show(struct mddev *mddev, char *page) 3515 { 3516 if (mddev->raid_disks == 0) 3517 return 0; 3518 if (mddev->reshape_position != MaxSector && 3519 mddev->delta_disks != 0) 3520 return sprintf(page, "%d (%d)\n", mddev->raid_disks, 3521 mddev->raid_disks - mddev->delta_disks); 3522 return sprintf(page, "%d\n", mddev->raid_disks); 3523 } 3524 3525 static int update_raid_disks(struct mddev *mddev, int raid_disks); 3526 3527 static ssize_t 3528 raid_disks_store(struct mddev *mddev, const char *buf, size_t len) 3529 { 3530 char *e; 3531 int rv = 0; 3532 unsigned long n = simple_strtoul(buf, &e, 10); 3533 3534 if (!*buf || (*e && *e != '\n')) 3535 return -EINVAL; 3536 3537 if (mddev->pers) 3538 rv = update_raid_disks(mddev, n); 3539 else if (mddev->reshape_position != MaxSector) { 3540 int olddisks = mddev->raid_disks - mddev->delta_disks; 3541 mddev->delta_disks = n - olddisks; 3542 mddev->raid_disks = n; 3543 } else 3544 mddev->raid_disks = n; 3545 return rv ? rv : len; 3546 } 3547 static struct md_sysfs_entry md_raid_disks = 3548 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store); 3549 3550 static ssize_t 3551 chunk_size_show(struct mddev *mddev, char *page) 3552 { 3553 if (mddev->reshape_position != MaxSector && 3554 mddev->chunk_sectors != mddev->new_chunk_sectors) 3555 return sprintf(page, "%d (%d)\n", 3556 mddev->new_chunk_sectors << 9, 3557 mddev->chunk_sectors << 9); 3558 return sprintf(page, "%d\n", mddev->chunk_sectors << 9); 3559 } 3560 3561 static ssize_t 3562 chunk_size_store(struct mddev *mddev, const char *buf, size_t len) 3563 { 3564 char *e; 3565 unsigned long n = simple_strtoul(buf, &e, 10); 3566 3567 if (!*buf || (*e && *e != '\n')) 3568 return -EINVAL; 3569 3570 if (mddev->pers) { 3571 int err; 3572 if (mddev->pers->check_reshape == NULL) 3573 return -EBUSY; 3574 mddev->new_chunk_sectors = n >> 9; 3575 err = mddev->pers->check_reshape(mddev); 3576 if (err) { 3577 mddev->new_chunk_sectors = mddev->chunk_sectors; 3578 return err; 3579 } 3580 } else { 3581 mddev->new_chunk_sectors = n >> 9; 3582 if (mddev->reshape_position == MaxSector) 3583 mddev->chunk_sectors = n >> 9; 3584 } 3585 return len; 3586 } 3587 static struct md_sysfs_entry md_chunk_size = 3588 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store); 3589 3590 static ssize_t 3591 resync_start_show(struct mddev *mddev, char *page) 3592 { 3593 if (mddev->recovery_cp == MaxSector) 3594 return sprintf(page, "none\n"); 3595 return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp); 3596 } 3597 3598 static ssize_t 3599 resync_start_store(struct mddev *mddev, const char *buf, size_t len) 3600 { 3601 char *e; 3602 unsigned long long n = simple_strtoull(buf, &e, 10); 3603 3604 if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) 3605 return -EBUSY; 3606 if (cmd_match(buf, "none")) 3607 n = MaxSector; 3608 else if (!*buf || (*e && *e != '\n')) 3609 return -EINVAL; 3610 3611 mddev->recovery_cp = n; 3612 return len; 3613 } 3614 static struct md_sysfs_entry md_resync_start = 3615 __ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store); 3616 3617 /* 3618 * The array state can be: 3619 * 3620 * clear 3621 * No devices, no size, no level 3622 * Equivalent to STOP_ARRAY ioctl 3623 * inactive 3624 * May have some settings, but array is not active 3625 * all IO results in error 3626 * When written, doesn't tear down array, but just stops it 3627 * suspended (not supported yet) 3628 * All IO requests will block. The array can be reconfigured. 3629 * Writing this, if accepted, will block until array is quiescent 3630 * readonly 3631 * no resync can happen. no superblocks get written. 3632 * write requests fail 3633 * read-auto 3634 * like readonly, but behaves like 'clean' on a write request. 3635 * 3636 * clean - no pending writes, but otherwise active. 3637 * When written to inactive array, starts without resync 3638 * If a write request arrives then 3639 * if metadata is known, mark 'dirty' and switch to 'active'. 3640 * if not known, block and switch to write-pending 3641 * If written to an active array that has pending writes, then fails. 3642 * active 3643 * fully active: IO and resync can be happening. 3644 * When written to inactive array, starts with resync 3645 * 3646 * write-pending 3647 * clean, but writes are blocked waiting for 'active' to be written. 3648 * 3649 * active-idle 3650 * like active, but no writes have been seen for a while (100msec). 3651 * 3652 */ 3653 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active, 3654 write_pending, active_idle, bad_word}; 3655 static char *array_states[] = { 3656 "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active", 3657 "write-pending", "active-idle", NULL }; 3658 3659 static int match_word(const char *word, char **list) 3660 { 3661 int n; 3662 for (n=0; list[n]; n++) 3663 if (cmd_match(word, list[n])) 3664 break; 3665 return n; 3666 } 3667 3668 static ssize_t 3669 array_state_show(struct mddev *mddev, char *page) 3670 { 3671 enum array_state st = inactive; 3672 3673 if (mddev->pers) 3674 switch(mddev->ro) { 3675 case 1: 3676 st = readonly; 3677 break; 3678 case 2: 3679 st = read_auto; 3680 break; 3681 case 0: 3682 if (mddev->in_sync) 3683 st = clean; 3684 else if (test_bit(MD_CHANGE_PENDING, &mddev->flags)) 3685 st = write_pending; 3686 else if (mddev->safemode) 3687 st = active_idle; 3688 else 3689 st = active; 3690 } 3691 else { 3692 if (list_empty(&mddev->disks) && 3693 mddev->raid_disks == 0 && 3694 mddev->dev_sectors == 0) 3695 st = clear; 3696 else 3697 st = inactive; 3698 } 3699 return sprintf(page, "%s\n", array_states[st]); 3700 } 3701 3702 static int do_md_stop(struct mddev * mddev, int ro, int is_open); 3703 static int md_set_readonly(struct mddev * mddev, int is_open); 3704 static int do_md_run(struct mddev * mddev); 3705 static int restart_array(struct mddev *mddev); 3706 3707 static ssize_t 3708 array_state_store(struct mddev *mddev, const char *buf, size_t len) 3709 { 3710 int err = -EINVAL; 3711 enum array_state st = match_word(buf, array_states); 3712 switch(st) { 3713 case bad_word: 3714 break; 3715 case clear: 3716 /* stopping an active array */ 3717 if (atomic_read(&mddev->openers) > 0) 3718 return -EBUSY; 3719 err = do_md_stop(mddev, 0, 0); 3720 break; 3721 case inactive: 3722 /* stopping an active array */ 3723 if (mddev->pers) { 3724 if (atomic_read(&mddev->openers) > 0) 3725 return -EBUSY; 3726 err = do_md_stop(mddev, 2, 0); 3727 } else 3728 err = 0; /* already inactive */ 3729 break; 3730 case suspended: 3731 break; /* not supported yet */ 3732 case readonly: 3733 if (mddev->pers) 3734 err = md_set_readonly(mddev, 0); 3735 else { 3736 mddev->ro = 1; 3737 set_disk_ro(mddev->gendisk, 1); 3738 err = do_md_run(mddev); 3739 } 3740 break; 3741 case read_auto: 3742 if (mddev->pers) { 3743 if (mddev->ro == 0) 3744 err = md_set_readonly(mddev, 0); 3745 else if (mddev->ro == 1) 3746 err = restart_array(mddev); 3747 if (err == 0) { 3748 mddev->ro = 2; 3749 set_disk_ro(mddev->gendisk, 0); 3750 } 3751 } else { 3752 mddev->ro = 2; 3753 err = do_md_run(mddev); 3754 } 3755 break; 3756 case clean: 3757 if (mddev->pers) { 3758 restart_array(mddev); 3759 spin_lock_irq(&mddev->write_lock); 3760 if (atomic_read(&mddev->writes_pending) == 0) { 3761 if (mddev->in_sync == 0) { 3762 mddev->in_sync = 1; 3763 if (mddev->safemode == 1) 3764 mddev->safemode = 0; 3765 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 3766 } 3767 err = 0; 3768 } else 3769 err = -EBUSY; 3770 spin_unlock_irq(&mddev->write_lock); 3771 } else 3772 err = -EINVAL; 3773 break; 3774 case active: 3775 if (mddev->pers) { 3776 restart_array(mddev); 3777 clear_bit(MD_CHANGE_PENDING, &mddev->flags); 3778 wake_up(&mddev->sb_wait); 3779 err = 0; 3780 } else { 3781 mddev->ro = 0; 3782 set_disk_ro(mddev->gendisk, 0); 3783 err = do_md_run(mddev); 3784 } 3785 break; 3786 case write_pending: 3787 case active_idle: 3788 /* these cannot be set */ 3789 break; 3790 } 3791 if (err) 3792 return err; 3793 else { 3794 sysfs_notify_dirent_safe(mddev->sysfs_state); 3795 return len; 3796 } 3797 } 3798 static struct md_sysfs_entry md_array_state = 3799 __ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store); 3800 3801 static ssize_t 3802 max_corrected_read_errors_show(struct mddev *mddev, char *page) { 3803 return sprintf(page, "%d\n", 3804 atomic_read(&mddev->max_corr_read_errors)); 3805 } 3806 3807 static ssize_t 3808 max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len) 3809 { 3810 char *e; 3811 unsigned long n = simple_strtoul(buf, &e, 10); 3812 3813 if (*buf && (*e == 0 || *e == '\n')) { 3814 atomic_set(&mddev->max_corr_read_errors, n); 3815 return len; 3816 } 3817 return -EINVAL; 3818 } 3819 3820 static struct md_sysfs_entry max_corr_read_errors = 3821 __ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show, 3822 max_corrected_read_errors_store); 3823 3824 static ssize_t 3825 null_show(struct mddev *mddev, char *page) 3826 { 3827 return -EINVAL; 3828 } 3829 3830 static ssize_t 3831 new_dev_store(struct mddev *mddev, const char *buf, size_t len) 3832 { 3833 /* buf must be %d:%d\n? giving major and minor numbers */ 3834 /* The new device is added to the array. 3835 * If the array has a persistent superblock, we read the 3836 * superblock to initialise info and check validity. 3837 * Otherwise, only checking done is that in bind_rdev_to_array, 3838 * which mainly checks size. 3839 */ 3840 char *e; 3841 int major = simple_strtoul(buf, &e, 10); 3842 int minor; 3843 dev_t dev; 3844 struct md_rdev *rdev; 3845 int err; 3846 3847 if (!*buf || *e != ':' || !e[1] || e[1] == '\n') 3848 return -EINVAL; 3849 minor = simple_strtoul(e+1, &e, 10); 3850 if (*e && *e != '\n') 3851 return -EINVAL; 3852 dev = MKDEV(major, minor); 3853 if (major != MAJOR(dev) || 3854 minor != MINOR(dev)) 3855 return -EOVERFLOW; 3856 3857 3858 if (mddev->persistent) { 3859 rdev = md_import_device(dev, mddev->major_version, 3860 mddev->minor_version); 3861 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) { 3862 struct md_rdev *rdev0 3863 = list_entry(mddev->disks.next, 3864 struct md_rdev, same_set); 3865 err = super_types[mddev->major_version] 3866 .load_super(rdev, rdev0, mddev->minor_version); 3867 if (err < 0) 3868 goto out; 3869 } 3870 } else if (mddev->external) 3871 rdev = md_import_device(dev, -2, -1); 3872 else 3873 rdev = md_import_device(dev, -1, -1); 3874 3875 if (IS_ERR(rdev)) 3876 return PTR_ERR(rdev); 3877 err = bind_rdev_to_array(rdev, mddev); 3878 out: 3879 if (err) 3880 export_rdev(rdev); 3881 return err ? err : len; 3882 } 3883 3884 static struct md_sysfs_entry md_new_device = 3885 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store); 3886 3887 static ssize_t 3888 bitmap_store(struct mddev *mddev, const char *buf, size_t len) 3889 { 3890 char *end; 3891 unsigned long chunk, end_chunk; 3892 3893 if (!mddev->bitmap) 3894 goto out; 3895 /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */ 3896 while (*buf) { 3897 chunk = end_chunk = simple_strtoul(buf, &end, 0); 3898 if (buf == end) break; 3899 if (*end == '-') { /* range */ 3900 buf = end + 1; 3901 end_chunk = simple_strtoul(buf, &end, 0); 3902 if (buf == end) break; 3903 } 3904 if (*end && !isspace(*end)) break; 3905 bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk); 3906 buf = skip_spaces(end); 3907 } 3908 bitmap_unplug(mddev->bitmap); /* flush the bits to disk */ 3909 out: 3910 return len; 3911 } 3912 3913 static struct md_sysfs_entry md_bitmap = 3914 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store); 3915 3916 static ssize_t 3917 size_show(struct mddev *mddev, char *page) 3918 { 3919 return sprintf(page, "%llu\n", 3920 (unsigned long long)mddev->dev_sectors / 2); 3921 } 3922 3923 static int update_size(struct mddev *mddev, sector_t num_sectors); 3924 3925 static ssize_t 3926 size_store(struct mddev *mddev, const char *buf, size_t len) 3927 { 3928 /* If array is inactive, we can reduce the component size, but 3929 * not increase it (except from 0). 3930 * If array is active, we can try an on-line resize 3931 */ 3932 sector_t sectors; 3933 int err = strict_blocks_to_sectors(buf, §ors); 3934 3935 if (err < 0) 3936 return err; 3937 if (mddev->pers) { 3938 err = update_size(mddev, sectors); 3939 md_update_sb(mddev, 1); 3940 } else { 3941 if (mddev->dev_sectors == 0 || 3942 mddev->dev_sectors > sectors) 3943 mddev->dev_sectors = sectors; 3944 else 3945 err = -ENOSPC; 3946 } 3947 return err ? err : len; 3948 } 3949 3950 static struct md_sysfs_entry md_size = 3951 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store); 3952 3953 3954 /* Metdata version. 3955 * This is one of 3956 * 'none' for arrays with no metadata (good luck...) 3957 * 'external' for arrays with externally managed metadata, 3958 * or N.M for internally known formats 3959 */ 3960 static ssize_t 3961 metadata_show(struct mddev *mddev, char *page) 3962 { 3963 if (mddev->persistent) 3964 return sprintf(page, "%d.%d\n", 3965 mddev->major_version, mddev->minor_version); 3966 else if (mddev->external) 3967 return sprintf(page, "external:%s\n", mddev->metadata_type); 3968 else 3969 return sprintf(page, "none\n"); 3970 } 3971 3972 static ssize_t 3973 metadata_store(struct mddev *mddev, const char *buf, size_t len) 3974 { 3975 int major, minor; 3976 char *e; 3977 /* Changing the details of 'external' metadata is 3978 * always permitted. Otherwise there must be 3979 * no devices attached to the array. 3980 */ 3981 if (mddev->external && strncmp(buf, "external:", 9) == 0) 3982 ; 3983 else if (!list_empty(&mddev->disks)) 3984 return -EBUSY; 3985 3986 if (cmd_match(buf, "none")) { 3987 mddev->persistent = 0; 3988 mddev->external = 0; 3989 mddev->major_version = 0; 3990 mddev->minor_version = 90; 3991 return len; 3992 } 3993 if (strncmp(buf, "external:", 9) == 0) { 3994 size_t namelen = len-9; 3995 if (namelen >= sizeof(mddev->metadata_type)) 3996 namelen = sizeof(mddev->metadata_type)-1; 3997 strncpy(mddev->metadata_type, buf+9, namelen); 3998 mddev->metadata_type[namelen] = 0; 3999 if (namelen && mddev->metadata_type[namelen-1] == '\n') 4000 mddev->metadata_type[--namelen] = 0; 4001 mddev->persistent = 0; 4002 mddev->external = 1; 4003 mddev->major_version = 0; 4004 mddev->minor_version = 90; 4005 return len; 4006 } 4007 major = simple_strtoul(buf, &e, 10); 4008 if (e==buf || *e != '.') 4009 return -EINVAL; 4010 buf = e+1; 4011 minor = simple_strtoul(buf, &e, 10); 4012 if (e==buf || (*e && *e != '\n') ) 4013 return -EINVAL; 4014 if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL) 4015 return -ENOENT; 4016 mddev->major_version = major; 4017 mddev->minor_version = minor; 4018 mddev->persistent = 1; 4019 mddev->external = 0; 4020 return len; 4021 } 4022 4023 static struct md_sysfs_entry md_metadata = 4024 __ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store); 4025 4026 static ssize_t 4027 action_show(struct mddev *mddev, char *page) 4028 { 4029 char *type = "idle"; 4030 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) 4031 type = "frozen"; 4032 else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 4033 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) { 4034 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 4035 type = "reshape"; 4036 else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 4037 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 4038 type = "resync"; 4039 else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) 4040 type = "check"; 4041 else 4042 type = "repair"; 4043 } else if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) 4044 type = "recover"; 4045 } 4046 return sprintf(page, "%s\n", type); 4047 } 4048 4049 static void reap_sync_thread(struct mddev *mddev); 4050 4051 static ssize_t 4052 action_store(struct mddev *mddev, const char *page, size_t len) 4053 { 4054 if (!mddev->pers || !mddev->pers->sync_request) 4055 return -EINVAL; 4056 4057 if (cmd_match(page, "frozen")) 4058 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4059 else 4060 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4061 4062 if (cmd_match(page, "idle") || cmd_match(page, "frozen")) { 4063 if (mddev->sync_thread) { 4064 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 4065 reap_sync_thread(mddev); 4066 } 4067 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 4068 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) 4069 return -EBUSY; 4070 else if (cmd_match(page, "resync")) 4071 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4072 else if (cmd_match(page, "recover")) { 4073 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 4074 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4075 } else if (cmd_match(page, "reshape")) { 4076 int err; 4077 if (mddev->pers->start_reshape == NULL) 4078 return -EINVAL; 4079 err = mddev->pers->start_reshape(mddev); 4080 if (err) 4081 return err; 4082 sysfs_notify(&mddev->kobj, NULL, "degraded"); 4083 } else { 4084 if (cmd_match(page, "check")) 4085 set_bit(MD_RECOVERY_CHECK, &mddev->recovery); 4086 else if (!cmd_match(page, "repair")) 4087 return -EINVAL; 4088 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 4089 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 4090 } 4091 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4092 md_wakeup_thread(mddev->thread); 4093 sysfs_notify_dirent_safe(mddev->sysfs_action); 4094 return len; 4095 } 4096 4097 static ssize_t 4098 mismatch_cnt_show(struct mddev *mddev, char *page) 4099 { 4100 return sprintf(page, "%llu\n", 4101 (unsigned long long) mddev->resync_mismatches); 4102 } 4103 4104 static struct md_sysfs_entry md_scan_mode = 4105 __ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store); 4106 4107 4108 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt); 4109 4110 static ssize_t 4111 sync_min_show(struct mddev *mddev, char *page) 4112 { 4113 return sprintf(page, "%d (%s)\n", speed_min(mddev), 4114 mddev->sync_speed_min ? "local": "system"); 4115 } 4116 4117 static ssize_t 4118 sync_min_store(struct mddev *mddev, const char *buf, size_t len) 4119 { 4120 int min; 4121 char *e; 4122 if (strncmp(buf, "system", 6)==0) { 4123 mddev->sync_speed_min = 0; 4124 return len; 4125 } 4126 min = simple_strtoul(buf, &e, 10); 4127 if (buf == e || (*e && *e != '\n') || min <= 0) 4128 return -EINVAL; 4129 mddev->sync_speed_min = min; 4130 return len; 4131 } 4132 4133 static struct md_sysfs_entry md_sync_min = 4134 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store); 4135 4136 static ssize_t 4137 sync_max_show(struct mddev *mddev, char *page) 4138 { 4139 return sprintf(page, "%d (%s)\n", speed_max(mddev), 4140 mddev->sync_speed_max ? "local": "system"); 4141 } 4142 4143 static ssize_t 4144 sync_max_store(struct mddev *mddev, const char *buf, size_t len) 4145 { 4146 int max; 4147 char *e; 4148 if (strncmp(buf, "system", 6)==0) { 4149 mddev->sync_speed_max = 0; 4150 return len; 4151 } 4152 max = simple_strtoul(buf, &e, 10); 4153 if (buf == e || (*e && *e != '\n') || max <= 0) 4154 return -EINVAL; 4155 mddev->sync_speed_max = max; 4156 return len; 4157 } 4158 4159 static struct md_sysfs_entry md_sync_max = 4160 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store); 4161 4162 static ssize_t 4163 degraded_show(struct mddev *mddev, char *page) 4164 { 4165 return sprintf(page, "%d\n", mddev->degraded); 4166 } 4167 static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded); 4168 4169 static ssize_t 4170 sync_force_parallel_show(struct mddev *mddev, char *page) 4171 { 4172 return sprintf(page, "%d\n", mddev->parallel_resync); 4173 } 4174 4175 static ssize_t 4176 sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len) 4177 { 4178 long n; 4179 4180 if (strict_strtol(buf, 10, &n)) 4181 return -EINVAL; 4182 4183 if (n != 0 && n != 1) 4184 return -EINVAL; 4185 4186 mddev->parallel_resync = n; 4187 4188 if (mddev->sync_thread) 4189 wake_up(&resync_wait); 4190 4191 return len; 4192 } 4193 4194 /* force parallel resync, even with shared block devices */ 4195 static struct md_sysfs_entry md_sync_force_parallel = 4196 __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR, 4197 sync_force_parallel_show, sync_force_parallel_store); 4198 4199 static ssize_t 4200 sync_speed_show(struct mddev *mddev, char *page) 4201 { 4202 unsigned long resync, dt, db; 4203 if (mddev->curr_resync == 0) 4204 return sprintf(page, "none\n"); 4205 resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active); 4206 dt = (jiffies - mddev->resync_mark) / HZ; 4207 if (!dt) dt++; 4208 db = resync - mddev->resync_mark_cnt; 4209 return sprintf(page, "%lu\n", db/dt/2); /* K/sec */ 4210 } 4211 4212 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed); 4213 4214 static ssize_t 4215 sync_completed_show(struct mddev *mddev, char *page) 4216 { 4217 unsigned long long max_sectors, resync; 4218 4219 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4220 return sprintf(page, "none\n"); 4221 4222 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 4223 max_sectors = mddev->resync_max_sectors; 4224 else 4225 max_sectors = mddev->dev_sectors; 4226 4227 resync = mddev->curr_resync_completed; 4228 return sprintf(page, "%llu / %llu\n", resync, max_sectors); 4229 } 4230 4231 static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed); 4232 4233 static ssize_t 4234 min_sync_show(struct mddev *mddev, char *page) 4235 { 4236 return sprintf(page, "%llu\n", 4237 (unsigned long long)mddev->resync_min); 4238 } 4239 static ssize_t 4240 min_sync_store(struct mddev *mddev, const char *buf, size_t len) 4241 { 4242 unsigned long long min; 4243 if (strict_strtoull(buf, 10, &min)) 4244 return -EINVAL; 4245 if (min > mddev->resync_max) 4246 return -EINVAL; 4247 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4248 return -EBUSY; 4249 4250 /* Must be a multiple of chunk_size */ 4251 if (mddev->chunk_sectors) { 4252 sector_t temp = min; 4253 if (sector_div(temp, mddev->chunk_sectors)) 4254 return -EINVAL; 4255 } 4256 mddev->resync_min = min; 4257 4258 return len; 4259 } 4260 4261 static struct md_sysfs_entry md_min_sync = 4262 __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store); 4263 4264 static ssize_t 4265 max_sync_show(struct mddev *mddev, char *page) 4266 { 4267 if (mddev->resync_max == MaxSector) 4268 return sprintf(page, "max\n"); 4269 else 4270 return sprintf(page, "%llu\n", 4271 (unsigned long long)mddev->resync_max); 4272 } 4273 static ssize_t 4274 max_sync_store(struct mddev *mddev, const char *buf, size_t len) 4275 { 4276 if (strncmp(buf, "max", 3) == 0) 4277 mddev->resync_max = MaxSector; 4278 else { 4279 unsigned long long max; 4280 if (strict_strtoull(buf, 10, &max)) 4281 return -EINVAL; 4282 if (max < mddev->resync_min) 4283 return -EINVAL; 4284 if (max < mddev->resync_max && 4285 mddev->ro == 0 && 4286 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4287 return -EBUSY; 4288 4289 /* Must be a multiple of chunk_size */ 4290 if (mddev->chunk_sectors) { 4291 sector_t temp = max; 4292 if (sector_div(temp, mddev->chunk_sectors)) 4293 return -EINVAL; 4294 } 4295 mddev->resync_max = max; 4296 } 4297 wake_up(&mddev->recovery_wait); 4298 return len; 4299 } 4300 4301 static struct md_sysfs_entry md_max_sync = 4302 __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store); 4303 4304 static ssize_t 4305 suspend_lo_show(struct mddev *mddev, char *page) 4306 { 4307 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo); 4308 } 4309 4310 static ssize_t 4311 suspend_lo_store(struct mddev *mddev, const char *buf, size_t len) 4312 { 4313 char *e; 4314 unsigned long long new = simple_strtoull(buf, &e, 10); 4315 unsigned long long old = mddev->suspend_lo; 4316 4317 if (mddev->pers == NULL || 4318 mddev->pers->quiesce == NULL) 4319 return -EINVAL; 4320 if (buf == e || (*e && *e != '\n')) 4321 return -EINVAL; 4322 4323 mddev->suspend_lo = new; 4324 if (new >= old) 4325 /* Shrinking suspended region */ 4326 mddev->pers->quiesce(mddev, 2); 4327 else { 4328 /* Expanding suspended region - need to wait */ 4329 mddev->pers->quiesce(mddev, 1); 4330 mddev->pers->quiesce(mddev, 0); 4331 } 4332 return len; 4333 } 4334 static struct md_sysfs_entry md_suspend_lo = 4335 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store); 4336 4337 4338 static ssize_t 4339 suspend_hi_show(struct mddev *mddev, char *page) 4340 { 4341 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi); 4342 } 4343 4344 static ssize_t 4345 suspend_hi_store(struct mddev *mddev, const char *buf, size_t len) 4346 { 4347 char *e; 4348 unsigned long long new = simple_strtoull(buf, &e, 10); 4349 unsigned long long old = mddev->suspend_hi; 4350 4351 if (mddev->pers == NULL || 4352 mddev->pers->quiesce == NULL) 4353 return -EINVAL; 4354 if (buf == e || (*e && *e != '\n')) 4355 return -EINVAL; 4356 4357 mddev->suspend_hi = new; 4358 if (new <= old) 4359 /* Shrinking suspended region */ 4360 mddev->pers->quiesce(mddev, 2); 4361 else { 4362 /* Expanding suspended region - need to wait */ 4363 mddev->pers->quiesce(mddev, 1); 4364 mddev->pers->quiesce(mddev, 0); 4365 } 4366 return len; 4367 } 4368 static struct md_sysfs_entry md_suspend_hi = 4369 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store); 4370 4371 static ssize_t 4372 reshape_position_show(struct mddev *mddev, char *page) 4373 { 4374 if (mddev->reshape_position != MaxSector) 4375 return sprintf(page, "%llu\n", 4376 (unsigned long long)mddev->reshape_position); 4377 strcpy(page, "none\n"); 4378 return 5; 4379 } 4380 4381 static ssize_t 4382 reshape_position_store(struct mddev *mddev, const char *buf, size_t len) 4383 { 4384 char *e; 4385 unsigned long long new = simple_strtoull(buf, &e, 10); 4386 if (mddev->pers) 4387 return -EBUSY; 4388 if (buf == e || (*e && *e != '\n')) 4389 return -EINVAL; 4390 mddev->reshape_position = new; 4391 mddev->delta_disks = 0; 4392 mddev->new_level = mddev->level; 4393 mddev->new_layout = mddev->layout; 4394 mddev->new_chunk_sectors = mddev->chunk_sectors; 4395 return len; 4396 } 4397 4398 static struct md_sysfs_entry md_reshape_position = 4399 __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show, 4400 reshape_position_store); 4401 4402 static ssize_t 4403 array_size_show(struct mddev *mddev, char *page) 4404 { 4405 if (mddev->external_size) 4406 return sprintf(page, "%llu\n", 4407 (unsigned long long)mddev->array_sectors/2); 4408 else 4409 return sprintf(page, "default\n"); 4410 } 4411 4412 static ssize_t 4413 array_size_store(struct mddev *mddev, const char *buf, size_t len) 4414 { 4415 sector_t sectors; 4416 4417 if (strncmp(buf, "default", 7) == 0) { 4418 if (mddev->pers) 4419 sectors = mddev->pers->size(mddev, 0, 0); 4420 else 4421 sectors = mddev->array_sectors; 4422 4423 mddev->external_size = 0; 4424 } else { 4425 if (strict_blocks_to_sectors(buf, §ors) < 0) 4426 return -EINVAL; 4427 if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors) 4428 return -E2BIG; 4429 4430 mddev->external_size = 1; 4431 } 4432 4433 mddev->array_sectors = sectors; 4434 if (mddev->pers) { 4435 set_capacity(mddev->gendisk, mddev->array_sectors); 4436 revalidate_disk(mddev->gendisk); 4437 } 4438 return len; 4439 } 4440 4441 static struct md_sysfs_entry md_array_size = 4442 __ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show, 4443 array_size_store); 4444 4445 static struct attribute *md_default_attrs[] = { 4446 &md_level.attr, 4447 &md_layout.attr, 4448 &md_raid_disks.attr, 4449 &md_chunk_size.attr, 4450 &md_size.attr, 4451 &md_resync_start.attr, 4452 &md_metadata.attr, 4453 &md_new_device.attr, 4454 &md_safe_delay.attr, 4455 &md_array_state.attr, 4456 &md_reshape_position.attr, 4457 &md_array_size.attr, 4458 &max_corr_read_errors.attr, 4459 NULL, 4460 }; 4461 4462 static struct attribute *md_redundancy_attrs[] = { 4463 &md_scan_mode.attr, 4464 &md_mismatches.attr, 4465 &md_sync_min.attr, 4466 &md_sync_max.attr, 4467 &md_sync_speed.attr, 4468 &md_sync_force_parallel.attr, 4469 &md_sync_completed.attr, 4470 &md_min_sync.attr, 4471 &md_max_sync.attr, 4472 &md_suspend_lo.attr, 4473 &md_suspend_hi.attr, 4474 &md_bitmap.attr, 4475 &md_degraded.attr, 4476 NULL, 4477 }; 4478 static struct attribute_group md_redundancy_group = { 4479 .name = NULL, 4480 .attrs = md_redundancy_attrs, 4481 }; 4482 4483 4484 static ssize_t 4485 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 4486 { 4487 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); 4488 struct mddev *mddev = container_of(kobj, struct mddev, kobj); 4489 ssize_t rv; 4490 4491 if (!entry->show) 4492 return -EIO; 4493 rv = mddev_lock(mddev); 4494 if (!rv) { 4495 rv = entry->show(mddev, page); 4496 mddev_unlock(mddev); 4497 } 4498 return rv; 4499 } 4500 4501 static ssize_t 4502 md_attr_store(struct kobject *kobj, struct attribute *attr, 4503 const char *page, size_t length) 4504 { 4505 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); 4506 struct mddev *mddev = container_of(kobj, struct mddev, kobj); 4507 ssize_t rv; 4508 4509 if (!entry->store) 4510 return -EIO; 4511 if (!capable(CAP_SYS_ADMIN)) 4512 return -EACCES; 4513 rv = mddev_lock(mddev); 4514 if (mddev->hold_active == UNTIL_IOCTL) 4515 mddev->hold_active = 0; 4516 if (!rv) { 4517 rv = entry->store(mddev, page, length); 4518 mddev_unlock(mddev); 4519 } 4520 return rv; 4521 } 4522 4523 static void md_free(struct kobject *ko) 4524 { 4525 struct mddev *mddev = container_of(ko, struct mddev, kobj); 4526 4527 if (mddev->sysfs_state) 4528 sysfs_put(mddev->sysfs_state); 4529 4530 if (mddev->gendisk) { 4531 del_gendisk(mddev->gendisk); 4532 put_disk(mddev->gendisk); 4533 } 4534 if (mddev->queue) 4535 blk_cleanup_queue(mddev->queue); 4536 4537 kfree(mddev); 4538 } 4539 4540 static const struct sysfs_ops md_sysfs_ops = { 4541 .show = md_attr_show, 4542 .store = md_attr_store, 4543 }; 4544 static struct kobj_type md_ktype = { 4545 .release = md_free, 4546 .sysfs_ops = &md_sysfs_ops, 4547 .default_attrs = md_default_attrs, 4548 }; 4549 4550 int mdp_major = 0; 4551 4552 static void mddev_delayed_delete(struct work_struct *ws) 4553 { 4554 struct mddev *mddev = container_of(ws, struct mddev, del_work); 4555 4556 sysfs_remove_group(&mddev->kobj, &md_bitmap_group); 4557 kobject_del(&mddev->kobj); 4558 kobject_put(&mddev->kobj); 4559 } 4560 4561 static int md_alloc(dev_t dev, char *name) 4562 { 4563 static DEFINE_MUTEX(disks_mutex); 4564 struct mddev *mddev = mddev_find(dev); 4565 struct gendisk *disk; 4566 int partitioned; 4567 int shift; 4568 int unit; 4569 int error; 4570 4571 if (!mddev) 4572 return -ENODEV; 4573 4574 partitioned = (MAJOR(mddev->unit) != MD_MAJOR); 4575 shift = partitioned ? MdpMinorShift : 0; 4576 unit = MINOR(mddev->unit) >> shift; 4577 4578 /* wait for any previous instance of this device to be 4579 * completely removed (mddev_delayed_delete). 4580 */ 4581 flush_workqueue(md_misc_wq); 4582 4583 mutex_lock(&disks_mutex); 4584 error = -EEXIST; 4585 if (mddev->gendisk) 4586 goto abort; 4587 4588 if (name) { 4589 /* Need to ensure that 'name' is not a duplicate. 4590 */ 4591 struct mddev *mddev2; 4592 spin_lock(&all_mddevs_lock); 4593 4594 list_for_each_entry(mddev2, &all_mddevs, all_mddevs) 4595 if (mddev2->gendisk && 4596 strcmp(mddev2->gendisk->disk_name, name) == 0) { 4597 spin_unlock(&all_mddevs_lock); 4598 goto abort; 4599 } 4600 spin_unlock(&all_mddevs_lock); 4601 } 4602 4603 error = -ENOMEM; 4604 mddev->queue = blk_alloc_queue(GFP_KERNEL); 4605 if (!mddev->queue) 4606 goto abort; 4607 mddev->queue->queuedata = mddev; 4608 4609 blk_queue_make_request(mddev->queue, md_make_request); 4610 4611 disk = alloc_disk(1 << shift); 4612 if (!disk) { 4613 blk_cleanup_queue(mddev->queue); 4614 mddev->queue = NULL; 4615 goto abort; 4616 } 4617 disk->major = MAJOR(mddev->unit); 4618 disk->first_minor = unit << shift; 4619 if (name) 4620 strcpy(disk->disk_name, name); 4621 else if (partitioned) 4622 sprintf(disk->disk_name, "md_d%d", unit); 4623 else 4624 sprintf(disk->disk_name, "md%d", unit); 4625 disk->fops = &md_fops; 4626 disk->private_data = mddev; 4627 disk->queue = mddev->queue; 4628 blk_queue_flush(mddev->queue, REQ_FLUSH | REQ_FUA); 4629 /* Allow extended partitions. This makes the 4630 * 'mdp' device redundant, but we can't really 4631 * remove it now. 4632 */ 4633 disk->flags |= GENHD_FL_EXT_DEVT; 4634 mddev->gendisk = disk; 4635 /* As soon as we call add_disk(), another thread could get 4636 * through to md_open, so make sure it doesn't get too far 4637 */ 4638 mutex_lock(&mddev->open_mutex); 4639 add_disk(disk); 4640 4641 error = kobject_init_and_add(&mddev->kobj, &md_ktype, 4642 &disk_to_dev(disk)->kobj, "%s", "md"); 4643 if (error) { 4644 /* This isn't possible, but as kobject_init_and_add is marked 4645 * __must_check, we must do something with the result 4646 */ 4647 printk(KERN_WARNING "md: cannot register %s/md - name in use\n", 4648 disk->disk_name); 4649 error = 0; 4650 } 4651 if (mddev->kobj.sd && 4652 sysfs_create_group(&mddev->kobj, &md_bitmap_group)) 4653 printk(KERN_DEBUG "pointless warning\n"); 4654 mutex_unlock(&mddev->open_mutex); 4655 abort: 4656 mutex_unlock(&disks_mutex); 4657 if (!error && mddev->kobj.sd) { 4658 kobject_uevent(&mddev->kobj, KOBJ_ADD); 4659 mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state"); 4660 } 4661 mddev_put(mddev); 4662 return error; 4663 } 4664 4665 static struct kobject *md_probe(dev_t dev, int *part, void *data) 4666 { 4667 md_alloc(dev, NULL); 4668 return NULL; 4669 } 4670 4671 static int add_named_array(const char *val, struct kernel_param *kp) 4672 { 4673 /* val must be "md_*" where * is not all digits. 4674 * We allocate an array with a large free minor number, and 4675 * set the name to val. val must not already be an active name. 4676 */ 4677 int len = strlen(val); 4678 char buf[DISK_NAME_LEN]; 4679 4680 while (len && val[len-1] == '\n') 4681 len--; 4682 if (len >= DISK_NAME_LEN) 4683 return -E2BIG; 4684 strlcpy(buf, val, len+1); 4685 if (strncmp(buf, "md_", 3) != 0) 4686 return -EINVAL; 4687 return md_alloc(0, buf); 4688 } 4689 4690 static void md_safemode_timeout(unsigned long data) 4691 { 4692 struct mddev *mddev = (struct mddev *) data; 4693 4694 if (!atomic_read(&mddev->writes_pending)) { 4695 mddev->safemode = 1; 4696 if (mddev->external) 4697 sysfs_notify_dirent_safe(mddev->sysfs_state); 4698 } 4699 md_wakeup_thread(mddev->thread); 4700 } 4701 4702 static int start_dirty_degraded; 4703 4704 int md_run(struct mddev *mddev) 4705 { 4706 int err; 4707 struct md_rdev *rdev; 4708 struct md_personality *pers; 4709 4710 if (list_empty(&mddev->disks)) 4711 /* cannot run an array with no devices.. */ 4712 return -EINVAL; 4713 4714 if (mddev->pers) 4715 return -EBUSY; 4716 /* Cannot run until previous stop completes properly */ 4717 if (mddev->sysfs_active) 4718 return -EBUSY; 4719 4720 /* 4721 * Analyze all RAID superblock(s) 4722 */ 4723 if (!mddev->raid_disks) { 4724 if (!mddev->persistent) 4725 return -EINVAL; 4726 analyze_sbs(mddev); 4727 } 4728 4729 if (mddev->level != LEVEL_NONE) 4730 request_module("md-level-%d", mddev->level); 4731 else if (mddev->clevel[0]) 4732 request_module("md-%s", mddev->clevel); 4733 4734 /* 4735 * Drop all container device buffers, from now on 4736 * the only valid external interface is through the md 4737 * device. 4738 */ 4739 list_for_each_entry(rdev, &mddev->disks, same_set) { 4740 if (test_bit(Faulty, &rdev->flags)) 4741 continue; 4742 sync_blockdev(rdev->bdev); 4743 invalidate_bdev(rdev->bdev); 4744 4745 /* perform some consistency tests on the device. 4746 * We don't want the data to overlap the metadata, 4747 * Internal Bitmap issues have been handled elsewhere. 4748 */ 4749 if (rdev->meta_bdev) { 4750 /* Nothing to check */; 4751 } else if (rdev->data_offset < rdev->sb_start) { 4752 if (mddev->dev_sectors && 4753 rdev->data_offset + mddev->dev_sectors 4754 > rdev->sb_start) { 4755 printk("md: %s: data overlaps metadata\n", 4756 mdname(mddev)); 4757 return -EINVAL; 4758 } 4759 } else { 4760 if (rdev->sb_start + rdev->sb_size/512 4761 > rdev->data_offset) { 4762 printk("md: %s: metadata overlaps data\n", 4763 mdname(mddev)); 4764 return -EINVAL; 4765 } 4766 } 4767 sysfs_notify_dirent_safe(rdev->sysfs_state); 4768 } 4769 4770 if (mddev->bio_set == NULL) 4771 mddev->bio_set = bioset_create(BIO_POOL_SIZE, 4772 sizeof(struct mddev *)); 4773 4774 spin_lock(&pers_lock); 4775 pers = find_pers(mddev->level, mddev->clevel); 4776 if (!pers || !try_module_get(pers->owner)) { 4777 spin_unlock(&pers_lock); 4778 if (mddev->level != LEVEL_NONE) 4779 printk(KERN_WARNING "md: personality for level %d is not loaded!\n", 4780 mddev->level); 4781 else 4782 printk(KERN_WARNING "md: personality for level %s is not loaded!\n", 4783 mddev->clevel); 4784 return -EINVAL; 4785 } 4786 mddev->pers = pers; 4787 spin_unlock(&pers_lock); 4788 if (mddev->level != pers->level) { 4789 mddev->level = pers->level; 4790 mddev->new_level = pers->level; 4791 } 4792 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); 4793 4794 if (mddev->reshape_position != MaxSector && 4795 pers->start_reshape == NULL) { 4796 /* This personality cannot handle reshaping... */ 4797 mddev->pers = NULL; 4798 module_put(pers->owner); 4799 return -EINVAL; 4800 } 4801 4802 if (pers->sync_request) { 4803 /* Warn if this is a potentially silly 4804 * configuration. 4805 */ 4806 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 4807 struct md_rdev *rdev2; 4808 int warned = 0; 4809 4810 list_for_each_entry(rdev, &mddev->disks, same_set) 4811 list_for_each_entry(rdev2, &mddev->disks, same_set) { 4812 if (rdev < rdev2 && 4813 rdev->bdev->bd_contains == 4814 rdev2->bdev->bd_contains) { 4815 printk(KERN_WARNING 4816 "%s: WARNING: %s appears to be" 4817 " on the same physical disk as" 4818 " %s.\n", 4819 mdname(mddev), 4820 bdevname(rdev->bdev,b), 4821 bdevname(rdev2->bdev,b2)); 4822 warned = 1; 4823 } 4824 } 4825 4826 if (warned) 4827 printk(KERN_WARNING 4828 "True protection against single-disk" 4829 " failure might be compromised.\n"); 4830 } 4831 4832 mddev->recovery = 0; 4833 /* may be over-ridden by personality */ 4834 mddev->resync_max_sectors = mddev->dev_sectors; 4835 4836 mddev->ok_start_degraded = start_dirty_degraded; 4837 4838 if (start_readonly && mddev->ro == 0) 4839 mddev->ro = 2; /* read-only, but switch on first write */ 4840 4841 err = mddev->pers->run(mddev); 4842 if (err) 4843 printk(KERN_ERR "md: pers->run() failed ...\n"); 4844 else if (mddev->pers->size(mddev, 0, 0) < mddev->array_sectors) { 4845 WARN_ONCE(!mddev->external_size, "%s: default size too small," 4846 " but 'external_size' not in effect?\n", __func__); 4847 printk(KERN_ERR 4848 "md: invalid array_size %llu > default size %llu\n", 4849 (unsigned long long)mddev->array_sectors / 2, 4850 (unsigned long long)mddev->pers->size(mddev, 0, 0) / 2); 4851 err = -EINVAL; 4852 mddev->pers->stop(mddev); 4853 } 4854 if (err == 0 && mddev->pers->sync_request) { 4855 err = bitmap_create(mddev); 4856 if (err) { 4857 printk(KERN_ERR "%s: failed to create bitmap (%d)\n", 4858 mdname(mddev), err); 4859 mddev->pers->stop(mddev); 4860 } 4861 } 4862 if (err) { 4863 module_put(mddev->pers->owner); 4864 mddev->pers = NULL; 4865 bitmap_destroy(mddev); 4866 return err; 4867 } 4868 if (mddev->pers->sync_request) { 4869 if (mddev->kobj.sd && 4870 sysfs_create_group(&mddev->kobj, &md_redundancy_group)) 4871 printk(KERN_WARNING 4872 "md: cannot register extra attributes for %s\n", 4873 mdname(mddev)); 4874 mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action"); 4875 } else if (mddev->ro == 2) /* auto-readonly not meaningful */ 4876 mddev->ro = 0; 4877 4878 atomic_set(&mddev->writes_pending,0); 4879 atomic_set(&mddev->max_corr_read_errors, 4880 MD_DEFAULT_MAX_CORRECTED_READ_ERRORS); 4881 mddev->safemode = 0; 4882 mddev->safemode_timer.function = md_safemode_timeout; 4883 mddev->safemode_timer.data = (unsigned long) mddev; 4884 mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */ 4885 mddev->in_sync = 1; 4886 smp_wmb(); 4887 mddev->ready = 1; 4888 list_for_each_entry(rdev, &mddev->disks, same_set) 4889 if (rdev->raid_disk >= 0) 4890 if (sysfs_link_rdev(mddev, rdev)) 4891 /* failure here is OK */; 4892 4893 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4894 4895 if (mddev->flags) 4896 md_update_sb(mddev, 0); 4897 4898 md_new_event(mddev); 4899 sysfs_notify_dirent_safe(mddev->sysfs_state); 4900 sysfs_notify_dirent_safe(mddev->sysfs_action); 4901 sysfs_notify(&mddev->kobj, NULL, "degraded"); 4902 return 0; 4903 } 4904 EXPORT_SYMBOL_GPL(md_run); 4905 4906 static int do_md_run(struct mddev *mddev) 4907 { 4908 int err; 4909 4910 err = md_run(mddev); 4911 if (err) 4912 goto out; 4913 err = bitmap_load(mddev); 4914 if (err) { 4915 bitmap_destroy(mddev); 4916 goto out; 4917 } 4918 4919 md_wakeup_thread(mddev->thread); 4920 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ 4921 4922 set_capacity(mddev->gendisk, mddev->array_sectors); 4923 revalidate_disk(mddev->gendisk); 4924 mddev->changed = 1; 4925 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); 4926 out: 4927 return err; 4928 } 4929 4930 static int restart_array(struct mddev *mddev) 4931 { 4932 struct gendisk *disk = mddev->gendisk; 4933 4934 /* Complain if it has no devices */ 4935 if (list_empty(&mddev->disks)) 4936 return -ENXIO; 4937 if (!mddev->pers) 4938 return -EINVAL; 4939 if (!mddev->ro) 4940 return -EBUSY; 4941 mddev->safemode = 0; 4942 mddev->ro = 0; 4943 set_disk_ro(disk, 0); 4944 printk(KERN_INFO "md: %s switched to read-write mode.\n", 4945 mdname(mddev)); 4946 /* Kick recovery or resync if necessary */ 4947 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4948 md_wakeup_thread(mddev->thread); 4949 md_wakeup_thread(mddev->sync_thread); 4950 sysfs_notify_dirent_safe(mddev->sysfs_state); 4951 return 0; 4952 } 4953 4954 /* similar to deny_write_access, but accounts for our holding a reference 4955 * to the file ourselves */ 4956 static int deny_bitmap_write_access(struct file * file) 4957 { 4958 struct inode *inode = file->f_mapping->host; 4959 4960 spin_lock(&inode->i_lock); 4961 if (atomic_read(&inode->i_writecount) > 1) { 4962 spin_unlock(&inode->i_lock); 4963 return -ETXTBSY; 4964 } 4965 atomic_set(&inode->i_writecount, -1); 4966 spin_unlock(&inode->i_lock); 4967 4968 return 0; 4969 } 4970 4971 void restore_bitmap_write_access(struct file *file) 4972 { 4973 struct inode *inode = file->f_mapping->host; 4974 4975 spin_lock(&inode->i_lock); 4976 atomic_set(&inode->i_writecount, 1); 4977 spin_unlock(&inode->i_lock); 4978 } 4979 4980 static void md_clean(struct mddev *mddev) 4981 { 4982 mddev->array_sectors = 0; 4983 mddev->external_size = 0; 4984 mddev->dev_sectors = 0; 4985 mddev->raid_disks = 0; 4986 mddev->recovery_cp = 0; 4987 mddev->resync_min = 0; 4988 mddev->resync_max = MaxSector; 4989 mddev->reshape_position = MaxSector; 4990 mddev->external = 0; 4991 mddev->persistent = 0; 4992 mddev->level = LEVEL_NONE; 4993 mddev->clevel[0] = 0; 4994 mddev->flags = 0; 4995 mddev->ro = 0; 4996 mddev->metadata_type[0] = 0; 4997 mddev->chunk_sectors = 0; 4998 mddev->ctime = mddev->utime = 0; 4999 mddev->layout = 0; 5000 mddev->max_disks = 0; 5001 mddev->events = 0; 5002 mddev->can_decrease_events = 0; 5003 mddev->delta_disks = 0; 5004 mddev->new_level = LEVEL_NONE; 5005 mddev->new_layout = 0; 5006 mddev->new_chunk_sectors = 0; 5007 mddev->curr_resync = 0; 5008 mddev->resync_mismatches = 0; 5009 mddev->suspend_lo = mddev->suspend_hi = 0; 5010 mddev->sync_speed_min = mddev->sync_speed_max = 0; 5011 mddev->recovery = 0; 5012 mddev->in_sync = 0; 5013 mddev->changed = 0; 5014 mddev->degraded = 0; 5015 mddev->safemode = 0; 5016 mddev->bitmap_info.offset = 0; 5017 mddev->bitmap_info.default_offset = 0; 5018 mddev->bitmap_info.chunksize = 0; 5019 mddev->bitmap_info.daemon_sleep = 0; 5020 mddev->bitmap_info.max_write_behind = 0; 5021 } 5022 5023 static void __md_stop_writes(struct mddev *mddev) 5024 { 5025 if (mddev->sync_thread) { 5026 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5027 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 5028 reap_sync_thread(mddev); 5029 } 5030 5031 del_timer_sync(&mddev->safemode_timer); 5032 5033 bitmap_flush(mddev); 5034 md_super_wait(mddev); 5035 5036 if (!mddev->in_sync || mddev->flags) { 5037 /* mark array as shutdown cleanly */ 5038 mddev->in_sync = 1; 5039 md_update_sb(mddev, 1); 5040 } 5041 } 5042 5043 void md_stop_writes(struct mddev *mddev) 5044 { 5045 mddev_lock(mddev); 5046 __md_stop_writes(mddev); 5047 mddev_unlock(mddev); 5048 } 5049 EXPORT_SYMBOL_GPL(md_stop_writes); 5050 5051 void md_stop(struct mddev *mddev) 5052 { 5053 mddev->ready = 0; 5054 mddev->pers->stop(mddev); 5055 if (mddev->pers->sync_request && mddev->to_remove == NULL) 5056 mddev->to_remove = &md_redundancy_group; 5057 module_put(mddev->pers->owner); 5058 mddev->pers = NULL; 5059 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5060 } 5061 EXPORT_SYMBOL_GPL(md_stop); 5062 5063 static int md_set_readonly(struct mddev *mddev, int is_open) 5064 { 5065 int err = 0; 5066 mutex_lock(&mddev->open_mutex); 5067 if (atomic_read(&mddev->openers) > is_open) { 5068 printk("md: %s still in use.\n",mdname(mddev)); 5069 err = -EBUSY; 5070 goto out; 5071 } 5072 if (mddev->pers) { 5073 __md_stop_writes(mddev); 5074 5075 err = -ENXIO; 5076 if (mddev->ro==1) 5077 goto out; 5078 mddev->ro = 1; 5079 set_disk_ro(mddev->gendisk, 1); 5080 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5081 sysfs_notify_dirent_safe(mddev->sysfs_state); 5082 err = 0; 5083 } 5084 out: 5085 mutex_unlock(&mddev->open_mutex); 5086 return err; 5087 } 5088 5089 /* mode: 5090 * 0 - completely stop and dis-assemble array 5091 * 2 - stop but do not disassemble array 5092 */ 5093 static int do_md_stop(struct mddev * mddev, int mode, int is_open) 5094 { 5095 struct gendisk *disk = mddev->gendisk; 5096 struct md_rdev *rdev; 5097 5098 mutex_lock(&mddev->open_mutex); 5099 if (atomic_read(&mddev->openers) > is_open || 5100 mddev->sysfs_active) { 5101 printk("md: %s still in use.\n",mdname(mddev)); 5102 mutex_unlock(&mddev->open_mutex); 5103 return -EBUSY; 5104 } 5105 5106 if (mddev->pers) { 5107 if (mddev->ro) 5108 set_disk_ro(disk, 0); 5109 5110 __md_stop_writes(mddev); 5111 md_stop(mddev); 5112 mddev->queue->merge_bvec_fn = NULL; 5113 mddev->queue->backing_dev_info.congested_fn = NULL; 5114 5115 /* tell userspace to handle 'inactive' */ 5116 sysfs_notify_dirent_safe(mddev->sysfs_state); 5117 5118 list_for_each_entry(rdev, &mddev->disks, same_set) 5119 if (rdev->raid_disk >= 0) 5120 sysfs_unlink_rdev(mddev, rdev); 5121 5122 set_capacity(disk, 0); 5123 mutex_unlock(&mddev->open_mutex); 5124 mddev->changed = 1; 5125 revalidate_disk(disk); 5126 5127 if (mddev->ro) 5128 mddev->ro = 0; 5129 } else 5130 mutex_unlock(&mddev->open_mutex); 5131 /* 5132 * Free resources if final stop 5133 */ 5134 if (mode == 0) { 5135 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev)); 5136 5137 bitmap_destroy(mddev); 5138 if (mddev->bitmap_info.file) { 5139 restore_bitmap_write_access(mddev->bitmap_info.file); 5140 fput(mddev->bitmap_info.file); 5141 mddev->bitmap_info.file = NULL; 5142 } 5143 mddev->bitmap_info.offset = 0; 5144 5145 export_array(mddev); 5146 5147 md_clean(mddev); 5148 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); 5149 if (mddev->hold_active == UNTIL_STOP) 5150 mddev->hold_active = 0; 5151 } 5152 blk_integrity_unregister(disk); 5153 md_new_event(mddev); 5154 sysfs_notify_dirent_safe(mddev->sysfs_state); 5155 return 0; 5156 } 5157 5158 #ifndef MODULE 5159 static void autorun_array(struct mddev *mddev) 5160 { 5161 struct md_rdev *rdev; 5162 int err; 5163 5164 if (list_empty(&mddev->disks)) 5165 return; 5166 5167 printk(KERN_INFO "md: running: "); 5168 5169 list_for_each_entry(rdev, &mddev->disks, same_set) { 5170 char b[BDEVNAME_SIZE]; 5171 printk("<%s>", bdevname(rdev->bdev,b)); 5172 } 5173 printk("\n"); 5174 5175 err = do_md_run(mddev); 5176 if (err) { 5177 printk(KERN_WARNING "md: do_md_run() returned %d\n", err); 5178 do_md_stop(mddev, 0, 0); 5179 } 5180 } 5181 5182 /* 5183 * lets try to run arrays based on all disks that have arrived 5184 * until now. (those are in pending_raid_disks) 5185 * 5186 * the method: pick the first pending disk, collect all disks with 5187 * the same UUID, remove all from the pending list and put them into 5188 * the 'same_array' list. Then order this list based on superblock 5189 * update time (freshest comes first), kick out 'old' disks and 5190 * compare superblocks. If everything's fine then run it. 5191 * 5192 * If "unit" is allocated, then bump its reference count 5193 */ 5194 static void autorun_devices(int part) 5195 { 5196 struct md_rdev *rdev0, *rdev, *tmp; 5197 struct mddev *mddev; 5198 char b[BDEVNAME_SIZE]; 5199 5200 printk(KERN_INFO "md: autorun ...\n"); 5201 while (!list_empty(&pending_raid_disks)) { 5202 int unit; 5203 dev_t dev; 5204 LIST_HEAD(candidates); 5205 rdev0 = list_entry(pending_raid_disks.next, 5206 struct md_rdev, same_set); 5207 5208 printk(KERN_INFO "md: considering %s ...\n", 5209 bdevname(rdev0->bdev,b)); 5210 INIT_LIST_HEAD(&candidates); 5211 rdev_for_each_list(rdev, tmp, &pending_raid_disks) 5212 if (super_90_load(rdev, rdev0, 0) >= 0) { 5213 printk(KERN_INFO "md: adding %s ...\n", 5214 bdevname(rdev->bdev,b)); 5215 list_move(&rdev->same_set, &candidates); 5216 } 5217 /* 5218 * now we have a set of devices, with all of them having 5219 * mostly sane superblocks. It's time to allocate the 5220 * mddev. 5221 */ 5222 if (part) { 5223 dev = MKDEV(mdp_major, 5224 rdev0->preferred_minor << MdpMinorShift); 5225 unit = MINOR(dev) >> MdpMinorShift; 5226 } else { 5227 dev = MKDEV(MD_MAJOR, rdev0->preferred_minor); 5228 unit = MINOR(dev); 5229 } 5230 if (rdev0->preferred_minor != unit) { 5231 printk(KERN_INFO "md: unit number in %s is bad: %d\n", 5232 bdevname(rdev0->bdev, b), rdev0->preferred_minor); 5233 break; 5234 } 5235 5236 md_probe(dev, NULL, NULL); 5237 mddev = mddev_find(dev); 5238 if (!mddev || !mddev->gendisk) { 5239 if (mddev) 5240 mddev_put(mddev); 5241 printk(KERN_ERR 5242 "md: cannot allocate memory for md drive.\n"); 5243 break; 5244 } 5245 if (mddev_lock(mddev)) 5246 printk(KERN_WARNING "md: %s locked, cannot run\n", 5247 mdname(mddev)); 5248 else if (mddev->raid_disks || mddev->major_version 5249 || !list_empty(&mddev->disks)) { 5250 printk(KERN_WARNING 5251 "md: %s already running, cannot run %s\n", 5252 mdname(mddev), bdevname(rdev0->bdev,b)); 5253 mddev_unlock(mddev); 5254 } else { 5255 printk(KERN_INFO "md: created %s\n", mdname(mddev)); 5256 mddev->persistent = 1; 5257 rdev_for_each_list(rdev, tmp, &candidates) { 5258 list_del_init(&rdev->same_set); 5259 if (bind_rdev_to_array(rdev, mddev)) 5260 export_rdev(rdev); 5261 } 5262 autorun_array(mddev); 5263 mddev_unlock(mddev); 5264 } 5265 /* on success, candidates will be empty, on error 5266 * it won't... 5267 */ 5268 rdev_for_each_list(rdev, tmp, &candidates) { 5269 list_del_init(&rdev->same_set); 5270 export_rdev(rdev); 5271 } 5272 mddev_put(mddev); 5273 } 5274 printk(KERN_INFO "md: ... autorun DONE.\n"); 5275 } 5276 #endif /* !MODULE */ 5277 5278 static int get_version(void __user * arg) 5279 { 5280 mdu_version_t ver; 5281 5282 ver.major = MD_MAJOR_VERSION; 5283 ver.minor = MD_MINOR_VERSION; 5284 ver.patchlevel = MD_PATCHLEVEL_VERSION; 5285 5286 if (copy_to_user(arg, &ver, sizeof(ver))) 5287 return -EFAULT; 5288 5289 return 0; 5290 } 5291 5292 static int get_array_info(struct mddev * mddev, void __user * arg) 5293 { 5294 mdu_array_info_t info; 5295 int nr,working,insync,failed,spare; 5296 struct md_rdev *rdev; 5297 5298 nr=working=insync=failed=spare=0; 5299 list_for_each_entry(rdev, &mddev->disks, same_set) { 5300 nr++; 5301 if (test_bit(Faulty, &rdev->flags)) 5302 failed++; 5303 else { 5304 working++; 5305 if (test_bit(In_sync, &rdev->flags)) 5306 insync++; 5307 else 5308 spare++; 5309 } 5310 } 5311 5312 info.major_version = mddev->major_version; 5313 info.minor_version = mddev->minor_version; 5314 info.patch_version = MD_PATCHLEVEL_VERSION; 5315 info.ctime = mddev->ctime; 5316 info.level = mddev->level; 5317 info.size = mddev->dev_sectors / 2; 5318 if (info.size != mddev->dev_sectors / 2) /* overflow */ 5319 info.size = -1; 5320 info.nr_disks = nr; 5321 info.raid_disks = mddev->raid_disks; 5322 info.md_minor = mddev->md_minor; 5323 info.not_persistent= !mddev->persistent; 5324 5325 info.utime = mddev->utime; 5326 info.state = 0; 5327 if (mddev->in_sync) 5328 info.state = (1<<MD_SB_CLEAN); 5329 if (mddev->bitmap && mddev->bitmap_info.offset) 5330 info.state = (1<<MD_SB_BITMAP_PRESENT); 5331 info.active_disks = insync; 5332 info.working_disks = working; 5333 info.failed_disks = failed; 5334 info.spare_disks = spare; 5335 5336 info.layout = mddev->layout; 5337 info.chunk_size = mddev->chunk_sectors << 9; 5338 5339 if (copy_to_user(arg, &info, sizeof(info))) 5340 return -EFAULT; 5341 5342 return 0; 5343 } 5344 5345 static int get_bitmap_file(struct mddev * mddev, void __user * arg) 5346 { 5347 mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */ 5348 char *ptr, *buf = NULL; 5349 int err = -ENOMEM; 5350 5351 if (md_allow_write(mddev)) 5352 file = kmalloc(sizeof(*file), GFP_NOIO); 5353 else 5354 file = kmalloc(sizeof(*file), GFP_KERNEL); 5355 5356 if (!file) 5357 goto out; 5358 5359 /* bitmap disabled, zero the first byte and copy out */ 5360 if (!mddev->bitmap || !mddev->bitmap->file) { 5361 file->pathname[0] = '\0'; 5362 goto copy_out; 5363 } 5364 5365 buf = kmalloc(sizeof(file->pathname), GFP_KERNEL); 5366 if (!buf) 5367 goto out; 5368 5369 ptr = d_path(&mddev->bitmap->file->f_path, buf, sizeof(file->pathname)); 5370 if (IS_ERR(ptr)) 5371 goto out; 5372 5373 strcpy(file->pathname, ptr); 5374 5375 copy_out: 5376 err = 0; 5377 if (copy_to_user(arg, file, sizeof(*file))) 5378 err = -EFAULT; 5379 out: 5380 kfree(buf); 5381 kfree(file); 5382 return err; 5383 } 5384 5385 static int get_disk_info(struct mddev * mddev, void __user * arg) 5386 { 5387 mdu_disk_info_t info; 5388 struct md_rdev *rdev; 5389 5390 if (copy_from_user(&info, arg, sizeof(info))) 5391 return -EFAULT; 5392 5393 rdev = find_rdev_nr(mddev, info.number); 5394 if (rdev) { 5395 info.major = MAJOR(rdev->bdev->bd_dev); 5396 info.minor = MINOR(rdev->bdev->bd_dev); 5397 info.raid_disk = rdev->raid_disk; 5398 info.state = 0; 5399 if (test_bit(Faulty, &rdev->flags)) 5400 info.state |= (1<<MD_DISK_FAULTY); 5401 else if (test_bit(In_sync, &rdev->flags)) { 5402 info.state |= (1<<MD_DISK_ACTIVE); 5403 info.state |= (1<<MD_DISK_SYNC); 5404 } 5405 if (test_bit(WriteMostly, &rdev->flags)) 5406 info.state |= (1<<MD_DISK_WRITEMOSTLY); 5407 } else { 5408 info.major = info.minor = 0; 5409 info.raid_disk = -1; 5410 info.state = (1<<MD_DISK_REMOVED); 5411 } 5412 5413 if (copy_to_user(arg, &info, sizeof(info))) 5414 return -EFAULT; 5415 5416 return 0; 5417 } 5418 5419 static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info) 5420 { 5421 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 5422 struct md_rdev *rdev; 5423 dev_t dev = MKDEV(info->major,info->minor); 5424 5425 if (info->major != MAJOR(dev) || info->minor != MINOR(dev)) 5426 return -EOVERFLOW; 5427 5428 if (!mddev->raid_disks) { 5429 int err; 5430 /* expecting a device which has a superblock */ 5431 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version); 5432 if (IS_ERR(rdev)) { 5433 printk(KERN_WARNING 5434 "md: md_import_device returned %ld\n", 5435 PTR_ERR(rdev)); 5436 return PTR_ERR(rdev); 5437 } 5438 if (!list_empty(&mddev->disks)) { 5439 struct md_rdev *rdev0 5440 = list_entry(mddev->disks.next, 5441 struct md_rdev, same_set); 5442 err = super_types[mddev->major_version] 5443 .load_super(rdev, rdev0, mddev->minor_version); 5444 if (err < 0) { 5445 printk(KERN_WARNING 5446 "md: %s has different UUID to %s\n", 5447 bdevname(rdev->bdev,b), 5448 bdevname(rdev0->bdev,b2)); 5449 export_rdev(rdev); 5450 return -EINVAL; 5451 } 5452 } 5453 err = bind_rdev_to_array(rdev, mddev); 5454 if (err) 5455 export_rdev(rdev); 5456 return err; 5457 } 5458 5459 /* 5460 * add_new_disk can be used once the array is assembled 5461 * to add "hot spares". They must already have a superblock 5462 * written 5463 */ 5464 if (mddev->pers) { 5465 int err; 5466 if (!mddev->pers->hot_add_disk) { 5467 printk(KERN_WARNING 5468 "%s: personality does not support diskops!\n", 5469 mdname(mddev)); 5470 return -EINVAL; 5471 } 5472 if (mddev->persistent) 5473 rdev = md_import_device(dev, mddev->major_version, 5474 mddev->minor_version); 5475 else 5476 rdev = md_import_device(dev, -1, -1); 5477 if (IS_ERR(rdev)) { 5478 printk(KERN_WARNING 5479 "md: md_import_device returned %ld\n", 5480 PTR_ERR(rdev)); 5481 return PTR_ERR(rdev); 5482 } 5483 /* set saved_raid_disk if appropriate */ 5484 if (!mddev->persistent) { 5485 if (info->state & (1<<MD_DISK_SYNC) && 5486 info->raid_disk < mddev->raid_disks) { 5487 rdev->raid_disk = info->raid_disk; 5488 set_bit(In_sync, &rdev->flags); 5489 } else 5490 rdev->raid_disk = -1; 5491 } else 5492 super_types[mddev->major_version]. 5493 validate_super(mddev, rdev); 5494 if ((info->state & (1<<MD_DISK_SYNC)) && 5495 (!test_bit(In_sync, &rdev->flags) || 5496 rdev->raid_disk != info->raid_disk)) { 5497 /* This was a hot-add request, but events doesn't 5498 * match, so reject it. 5499 */ 5500 export_rdev(rdev); 5501 return -EINVAL; 5502 } 5503 5504 if (test_bit(In_sync, &rdev->flags)) 5505 rdev->saved_raid_disk = rdev->raid_disk; 5506 else 5507 rdev->saved_raid_disk = -1; 5508 5509 clear_bit(In_sync, &rdev->flags); /* just to be sure */ 5510 if (info->state & (1<<MD_DISK_WRITEMOSTLY)) 5511 set_bit(WriteMostly, &rdev->flags); 5512 else 5513 clear_bit(WriteMostly, &rdev->flags); 5514 5515 rdev->raid_disk = -1; 5516 err = bind_rdev_to_array(rdev, mddev); 5517 if (!err && !mddev->pers->hot_remove_disk) { 5518 /* If there is hot_add_disk but no hot_remove_disk 5519 * then added disks for geometry changes, 5520 * and should be added immediately. 5521 */ 5522 super_types[mddev->major_version]. 5523 validate_super(mddev, rdev); 5524 err = mddev->pers->hot_add_disk(mddev, rdev); 5525 if (err) 5526 unbind_rdev_from_array(rdev); 5527 } 5528 if (err) 5529 export_rdev(rdev); 5530 else 5531 sysfs_notify_dirent_safe(rdev->sysfs_state); 5532 5533 md_update_sb(mddev, 1); 5534 if (mddev->degraded) 5535 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 5536 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5537 if (!err) 5538 md_new_event(mddev); 5539 md_wakeup_thread(mddev->thread); 5540 return err; 5541 } 5542 5543 /* otherwise, add_new_disk is only allowed 5544 * for major_version==0 superblocks 5545 */ 5546 if (mddev->major_version != 0) { 5547 printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n", 5548 mdname(mddev)); 5549 return -EINVAL; 5550 } 5551 5552 if (!(info->state & (1<<MD_DISK_FAULTY))) { 5553 int err; 5554 rdev = md_import_device(dev, -1, 0); 5555 if (IS_ERR(rdev)) { 5556 printk(KERN_WARNING 5557 "md: error, md_import_device() returned %ld\n", 5558 PTR_ERR(rdev)); 5559 return PTR_ERR(rdev); 5560 } 5561 rdev->desc_nr = info->number; 5562 if (info->raid_disk < mddev->raid_disks) 5563 rdev->raid_disk = info->raid_disk; 5564 else 5565 rdev->raid_disk = -1; 5566 5567 if (rdev->raid_disk < mddev->raid_disks) 5568 if (info->state & (1<<MD_DISK_SYNC)) 5569 set_bit(In_sync, &rdev->flags); 5570 5571 if (info->state & (1<<MD_DISK_WRITEMOSTLY)) 5572 set_bit(WriteMostly, &rdev->flags); 5573 5574 if (!mddev->persistent) { 5575 printk(KERN_INFO "md: nonpersistent superblock ...\n"); 5576 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512; 5577 } else 5578 rdev->sb_start = calc_dev_sboffset(rdev); 5579 rdev->sectors = rdev->sb_start; 5580 5581 err = bind_rdev_to_array(rdev, mddev); 5582 if (err) { 5583 export_rdev(rdev); 5584 return err; 5585 } 5586 } 5587 5588 return 0; 5589 } 5590 5591 static int hot_remove_disk(struct mddev * mddev, dev_t dev) 5592 { 5593 char b[BDEVNAME_SIZE]; 5594 struct md_rdev *rdev; 5595 5596 rdev = find_rdev(mddev, dev); 5597 if (!rdev) 5598 return -ENXIO; 5599 5600 if (rdev->raid_disk >= 0) 5601 goto busy; 5602 5603 kick_rdev_from_array(rdev); 5604 md_update_sb(mddev, 1); 5605 md_new_event(mddev); 5606 5607 return 0; 5608 busy: 5609 printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n", 5610 bdevname(rdev->bdev,b), mdname(mddev)); 5611 return -EBUSY; 5612 } 5613 5614 static int hot_add_disk(struct mddev * mddev, dev_t dev) 5615 { 5616 char b[BDEVNAME_SIZE]; 5617 int err; 5618 struct md_rdev *rdev; 5619 5620 if (!mddev->pers) 5621 return -ENODEV; 5622 5623 if (mddev->major_version != 0) { 5624 printk(KERN_WARNING "%s: HOT_ADD may only be used with" 5625 " version-0 superblocks.\n", 5626 mdname(mddev)); 5627 return -EINVAL; 5628 } 5629 if (!mddev->pers->hot_add_disk) { 5630 printk(KERN_WARNING 5631 "%s: personality does not support diskops!\n", 5632 mdname(mddev)); 5633 return -EINVAL; 5634 } 5635 5636 rdev = md_import_device(dev, -1, 0); 5637 if (IS_ERR(rdev)) { 5638 printk(KERN_WARNING 5639 "md: error, md_import_device() returned %ld\n", 5640 PTR_ERR(rdev)); 5641 return -EINVAL; 5642 } 5643 5644 if (mddev->persistent) 5645 rdev->sb_start = calc_dev_sboffset(rdev); 5646 else 5647 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512; 5648 5649 rdev->sectors = rdev->sb_start; 5650 5651 if (test_bit(Faulty, &rdev->flags)) { 5652 printk(KERN_WARNING 5653 "md: can not hot-add faulty %s disk to %s!\n", 5654 bdevname(rdev->bdev,b), mdname(mddev)); 5655 err = -EINVAL; 5656 goto abort_export; 5657 } 5658 clear_bit(In_sync, &rdev->flags); 5659 rdev->desc_nr = -1; 5660 rdev->saved_raid_disk = -1; 5661 err = bind_rdev_to_array(rdev, mddev); 5662 if (err) 5663 goto abort_export; 5664 5665 /* 5666 * The rest should better be atomic, we can have disk failures 5667 * noticed in interrupt contexts ... 5668 */ 5669 5670 rdev->raid_disk = -1; 5671 5672 md_update_sb(mddev, 1); 5673 5674 /* 5675 * Kick recovery, maybe this spare has to be added to the 5676 * array immediately. 5677 */ 5678 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5679 md_wakeup_thread(mddev->thread); 5680 md_new_event(mddev); 5681 return 0; 5682 5683 abort_export: 5684 export_rdev(rdev); 5685 return err; 5686 } 5687 5688 static int set_bitmap_file(struct mddev *mddev, int fd) 5689 { 5690 int err; 5691 5692 if (mddev->pers) { 5693 if (!mddev->pers->quiesce) 5694 return -EBUSY; 5695 if (mddev->recovery || mddev->sync_thread) 5696 return -EBUSY; 5697 /* we should be able to change the bitmap.. */ 5698 } 5699 5700 5701 if (fd >= 0) { 5702 if (mddev->bitmap) 5703 return -EEXIST; /* cannot add when bitmap is present */ 5704 mddev->bitmap_info.file = fget(fd); 5705 5706 if (mddev->bitmap_info.file == NULL) { 5707 printk(KERN_ERR "%s: error: failed to get bitmap file\n", 5708 mdname(mddev)); 5709 return -EBADF; 5710 } 5711 5712 err = deny_bitmap_write_access(mddev->bitmap_info.file); 5713 if (err) { 5714 printk(KERN_ERR "%s: error: bitmap file is already in use\n", 5715 mdname(mddev)); 5716 fput(mddev->bitmap_info.file); 5717 mddev->bitmap_info.file = NULL; 5718 return err; 5719 } 5720 mddev->bitmap_info.offset = 0; /* file overrides offset */ 5721 } else if (mddev->bitmap == NULL) 5722 return -ENOENT; /* cannot remove what isn't there */ 5723 err = 0; 5724 if (mddev->pers) { 5725 mddev->pers->quiesce(mddev, 1); 5726 if (fd >= 0) { 5727 err = bitmap_create(mddev); 5728 if (!err) 5729 err = bitmap_load(mddev); 5730 } 5731 if (fd < 0 || err) { 5732 bitmap_destroy(mddev); 5733 fd = -1; /* make sure to put the file */ 5734 } 5735 mddev->pers->quiesce(mddev, 0); 5736 } 5737 if (fd < 0) { 5738 if (mddev->bitmap_info.file) { 5739 restore_bitmap_write_access(mddev->bitmap_info.file); 5740 fput(mddev->bitmap_info.file); 5741 } 5742 mddev->bitmap_info.file = NULL; 5743 } 5744 5745 return err; 5746 } 5747 5748 /* 5749 * set_array_info is used two different ways 5750 * The original usage is when creating a new array. 5751 * In this usage, raid_disks is > 0 and it together with 5752 * level, size, not_persistent,layout,chunksize determine the 5753 * shape of the array. 5754 * This will always create an array with a type-0.90.0 superblock. 5755 * The newer usage is when assembling an array. 5756 * In this case raid_disks will be 0, and the major_version field is 5757 * use to determine which style super-blocks are to be found on the devices. 5758 * The minor and patch _version numbers are also kept incase the 5759 * super_block handler wishes to interpret them. 5760 */ 5761 static int set_array_info(struct mddev * mddev, mdu_array_info_t *info) 5762 { 5763 5764 if (info->raid_disks == 0) { 5765 /* just setting version number for superblock loading */ 5766 if (info->major_version < 0 || 5767 info->major_version >= ARRAY_SIZE(super_types) || 5768 super_types[info->major_version].name == NULL) { 5769 /* maybe try to auto-load a module? */ 5770 printk(KERN_INFO 5771 "md: superblock version %d not known\n", 5772 info->major_version); 5773 return -EINVAL; 5774 } 5775 mddev->major_version = info->major_version; 5776 mddev->minor_version = info->minor_version; 5777 mddev->patch_version = info->patch_version; 5778 mddev->persistent = !info->not_persistent; 5779 /* ensure mddev_put doesn't delete this now that there 5780 * is some minimal configuration. 5781 */ 5782 mddev->ctime = get_seconds(); 5783 return 0; 5784 } 5785 mddev->major_version = MD_MAJOR_VERSION; 5786 mddev->minor_version = MD_MINOR_VERSION; 5787 mddev->patch_version = MD_PATCHLEVEL_VERSION; 5788 mddev->ctime = get_seconds(); 5789 5790 mddev->level = info->level; 5791 mddev->clevel[0] = 0; 5792 mddev->dev_sectors = 2 * (sector_t)info->size; 5793 mddev->raid_disks = info->raid_disks; 5794 /* don't set md_minor, it is determined by which /dev/md* was 5795 * openned 5796 */ 5797 if (info->state & (1<<MD_SB_CLEAN)) 5798 mddev->recovery_cp = MaxSector; 5799 else 5800 mddev->recovery_cp = 0; 5801 mddev->persistent = ! info->not_persistent; 5802 mddev->external = 0; 5803 5804 mddev->layout = info->layout; 5805 mddev->chunk_sectors = info->chunk_size >> 9; 5806 5807 mddev->max_disks = MD_SB_DISKS; 5808 5809 if (mddev->persistent) 5810 mddev->flags = 0; 5811 set_bit(MD_CHANGE_DEVS, &mddev->flags); 5812 5813 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; 5814 mddev->bitmap_info.offset = 0; 5815 5816 mddev->reshape_position = MaxSector; 5817 5818 /* 5819 * Generate a 128 bit UUID 5820 */ 5821 get_random_bytes(mddev->uuid, 16); 5822 5823 mddev->new_level = mddev->level; 5824 mddev->new_chunk_sectors = mddev->chunk_sectors; 5825 mddev->new_layout = mddev->layout; 5826 mddev->delta_disks = 0; 5827 5828 return 0; 5829 } 5830 5831 void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors) 5832 { 5833 WARN(!mddev_is_locked(mddev), "%s: unlocked mddev!\n", __func__); 5834 5835 if (mddev->external_size) 5836 return; 5837 5838 mddev->array_sectors = array_sectors; 5839 } 5840 EXPORT_SYMBOL(md_set_array_sectors); 5841 5842 static int update_size(struct mddev *mddev, sector_t num_sectors) 5843 { 5844 struct md_rdev *rdev; 5845 int rv; 5846 int fit = (num_sectors == 0); 5847 5848 if (mddev->pers->resize == NULL) 5849 return -EINVAL; 5850 /* The "num_sectors" is the number of sectors of each device that 5851 * is used. This can only make sense for arrays with redundancy. 5852 * linear and raid0 always use whatever space is available. We can only 5853 * consider changing this number if no resync or reconstruction is 5854 * happening, and if the new size is acceptable. It must fit before the 5855 * sb_start or, if that is <data_offset, it must fit before the size 5856 * of each device. If num_sectors is zero, we find the largest size 5857 * that fits. 5858 */ 5859 if (mddev->sync_thread) 5860 return -EBUSY; 5861 if (mddev->bitmap) 5862 /* Sorry, cannot grow a bitmap yet, just remove it, 5863 * grow, and re-add. 5864 */ 5865 return -EBUSY; 5866 list_for_each_entry(rdev, &mddev->disks, same_set) { 5867 sector_t avail = rdev->sectors; 5868 5869 if (fit && (num_sectors == 0 || num_sectors > avail)) 5870 num_sectors = avail; 5871 if (avail < num_sectors) 5872 return -ENOSPC; 5873 } 5874 rv = mddev->pers->resize(mddev, num_sectors); 5875 if (!rv) 5876 revalidate_disk(mddev->gendisk); 5877 return rv; 5878 } 5879 5880 static int update_raid_disks(struct mddev *mddev, int raid_disks) 5881 { 5882 int rv; 5883 /* change the number of raid disks */ 5884 if (mddev->pers->check_reshape == NULL) 5885 return -EINVAL; 5886 if (raid_disks <= 0 || 5887 (mddev->max_disks && raid_disks >= mddev->max_disks)) 5888 return -EINVAL; 5889 if (mddev->sync_thread || mddev->reshape_position != MaxSector) 5890 return -EBUSY; 5891 mddev->delta_disks = raid_disks - mddev->raid_disks; 5892 5893 rv = mddev->pers->check_reshape(mddev); 5894 if (rv < 0) 5895 mddev->delta_disks = 0; 5896 return rv; 5897 } 5898 5899 5900 /* 5901 * update_array_info is used to change the configuration of an 5902 * on-line array. 5903 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size 5904 * fields in the info are checked against the array. 5905 * Any differences that cannot be handled will cause an error. 5906 * Normally, only one change can be managed at a time. 5907 */ 5908 static int update_array_info(struct mddev *mddev, mdu_array_info_t *info) 5909 { 5910 int rv = 0; 5911 int cnt = 0; 5912 int state = 0; 5913 5914 /* calculate expected state,ignoring low bits */ 5915 if (mddev->bitmap && mddev->bitmap_info.offset) 5916 state |= (1 << MD_SB_BITMAP_PRESENT); 5917 5918 if (mddev->major_version != info->major_version || 5919 mddev->minor_version != info->minor_version || 5920 /* mddev->patch_version != info->patch_version || */ 5921 mddev->ctime != info->ctime || 5922 mddev->level != info->level || 5923 /* mddev->layout != info->layout || */ 5924 !mddev->persistent != info->not_persistent|| 5925 mddev->chunk_sectors != info->chunk_size >> 9 || 5926 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */ 5927 ((state^info->state) & 0xfffffe00) 5928 ) 5929 return -EINVAL; 5930 /* Check there is only one change */ 5931 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) 5932 cnt++; 5933 if (mddev->raid_disks != info->raid_disks) 5934 cnt++; 5935 if (mddev->layout != info->layout) 5936 cnt++; 5937 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) 5938 cnt++; 5939 if (cnt == 0) 5940 return 0; 5941 if (cnt > 1) 5942 return -EINVAL; 5943 5944 if (mddev->layout != info->layout) { 5945 /* Change layout 5946 * we don't need to do anything at the md level, the 5947 * personality will take care of it all. 5948 */ 5949 if (mddev->pers->check_reshape == NULL) 5950 return -EINVAL; 5951 else { 5952 mddev->new_layout = info->layout; 5953 rv = mddev->pers->check_reshape(mddev); 5954 if (rv) 5955 mddev->new_layout = mddev->layout; 5956 return rv; 5957 } 5958 } 5959 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) 5960 rv = update_size(mddev, (sector_t)info->size * 2); 5961 5962 if (mddev->raid_disks != info->raid_disks) 5963 rv = update_raid_disks(mddev, info->raid_disks); 5964 5965 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) { 5966 if (mddev->pers->quiesce == NULL) 5967 return -EINVAL; 5968 if (mddev->recovery || mddev->sync_thread) 5969 return -EBUSY; 5970 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) { 5971 /* add the bitmap */ 5972 if (mddev->bitmap) 5973 return -EEXIST; 5974 if (mddev->bitmap_info.default_offset == 0) 5975 return -EINVAL; 5976 mddev->bitmap_info.offset = 5977 mddev->bitmap_info.default_offset; 5978 mddev->pers->quiesce(mddev, 1); 5979 rv = bitmap_create(mddev); 5980 if (!rv) 5981 rv = bitmap_load(mddev); 5982 if (rv) 5983 bitmap_destroy(mddev); 5984 mddev->pers->quiesce(mddev, 0); 5985 } else { 5986 /* remove the bitmap */ 5987 if (!mddev->bitmap) 5988 return -ENOENT; 5989 if (mddev->bitmap->file) 5990 return -EINVAL; 5991 mddev->pers->quiesce(mddev, 1); 5992 bitmap_destroy(mddev); 5993 mddev->pers->quiesce(mddev, 0); 5994 mddev->bitmap_info.offset = 0; 5995 } 5996 } 5997 md_update_sb(mddev, 1); 5998 return rv; 5999 } 6000 6001 static int set_disk_faulty(struct mddev *mddev, dev_t dev) 6002 { 6003 struct md_rdev *rdev; 6004 6005 if (mddev->pers == NULL) 6006 return -ENODEV; 6007 6008 rdev = find_rdev(mddev, dev); 6009 if (!rdev) 6010 return -ENODEV; 6011 6012 md_error(mddev, rdev); 6013 if (!test_bit(Faulty, &rdev->flags)) 6014 return -EBUSY; 6015 return 0; 6016 } 6017 6018 /* 6019 * We have a problem here : there is no easy way to give a CHS 6020 * virtual geometry. We currently pretend that we have a 2 heads 6021 * 4 sectors (with a BIG number of cylinders...). This drives 6022 * dosfs just mad... ;-) 6023 */ 6024 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo) 6025 { 6026 struct mddev *mddev = bdev->bd_disk->private_data; 6027 6028 geo->heads = 2; 6029 geo->sectors = 4; 6030 geo->cylinders = mddev->array_sectors / 8; 6031 return 0; 6032 } 6033 6034 static int md_ioctl(struct block_device *bdev, fmode_t mode, 6035 unsigned int cmd, unsigned long arg) 6036 { 6037 int err = 0; 6038 void __user *argp = (void __user *)arg; 6039 struct mddev *mddev = NULL; 6040 int ro; 6041 6042 if (!capable(CAP_SYS_ADMIN)) 6043 return -EACCES; 6044 6045 /* 6046 * Commands dealing with the RAID driver but not any 6047 * particular array: 6048 */ 6049 switch (cmd) 6050 { 6051 case RAID_VERSION: 6052 err = get_version(argp); 6053 goto done; 6054 6055 case PRINT_RAID_DEBUG: 6056 err = 0; 6057 md_print_devices(); 6058 goto done; 6059 6060 #ifndef MODULE 6061 case RAID_AUTORUN: 6062 err = 0; 6063 autostart_arrays(arg); 6064 goto done; 6065 #endif 6066 default:; 6067 } 6068 6069 /* 6070 * Commands creating/starting a new array: 6071 */ 6072 6073 mddev = bdev->bd_disk->private_data; 6074 6075 if (!mddev) { 6076 BUG(); 6077 goto abort; 6078 } 6079 6080 err = mddev_lock(mddev); 6081 if (err) { 6082 printk(KERN_INFO 6083 "md: ioctl lock interrupted, reason %d, cmd %d\n", 6084 err, cmd); 6085 goto abort; 6086 } 6087 6088 switch (cmd) 6089 { 6090 case SET_ARRAY_INFO: 6091 { 6092 mdu_array_info_t info; 6093 if (!arg) 6094 memset(&info, 0, sizeof(info)); 6095 else if (copy_from_user(&info, argp, sizeof(info))) { 6096 err = -EFAULT; 6097 goto abort_unlock; 6098 } 6099 if (mddev->pers) { 6100 err = update_array_info(mddev, &info); 6101 if (err) { 6102 printk(KERN_WARNING "md: couldn't update" 6103 " array info. %d\n", err); 6104 goto abort_unlock; 6105 } 6106 goto done_unlock; 6107 } 6108 if (!list_empty(&mddev->disks)) { 6109 printk(KERN_WARNING 6110 "md: array %s already has disks!\n", 6111 mdname(mddev)); 6112 err = -EBUSY; 6113 goto abort_unlock; 6114 } 6115 if (mddev->raid_disks) { 6116 printk(KERN_WARNING 6117 "md: array %s already initialised!\n", 6118 mdname(mddev)); 6119 err = -EBUSY; 6120 goto abort_unlock; 6121 } 6122 err = set_array_info(mddev, &info); 6123 if (err) { 6124 printk(KERN_WARNING "md: couldn't set" 6125 " array info. %d\n", err); 6126 goto abort_unlock; 6127 } 6128 } 6129 goto done_unlock; 6130 6131 default:; 6132 } 6133 6134 /* 6135 * Commands querying/configuring an existing array: 6136 */ 6137 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY, 6138 * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */ 6139 if ((!mddev->raid_disks && !mddev->external) 6140 && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY 6141 && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE 6142 && cmd != GET_BITMAP_FILE) { 6143 err = -ENODEV; 6144 goto abort_unlock; 6145 } 6146 6147 /* 6148 * Commands even a read-only array can execute: 6149 */ 6150 switch (cmd) 6151 { 6152 case GET_ARRAY_INFO: 6153 err = get_array_info(mddev, argp); 6154 goto done_unlock; 6155 6156 case GET_BITMAP_FILE: 6157 err = get_bitmap_file(mddev, argp); 6158 goto done_unlock; 6159 6160 case GET_DISK_INFO: 6161 err = get_disk_info(mddev, argp); 6162 goto done_unlock; 6163 6164 case RESTART_ARRAY_RW: 6165 err = restart_array(mddev); 6166 goto done_unlock; 6167 6168 case STOP_ARRAY: 6169 err = do_md_stop(mddev, 0, 1); 6170 goto done_unlock; 6171 6172 case STOP_ARRAY_RO: 6173 err = md_set_readonly(mddev, 1); 6174 goto done_unlock; 6175 6176 case BLKROSET: 6177 if (get_user(ro, (int __user *)(arg))) { 6178 err = -EFAULT; 6179 goto done_unlock; 6180 } 6181 err = -EINVAL; 6182 6183 /* if the bdev is going readonly the value of mddev->ro 6184 * does not matter, no writes are coming 6185 */ 6186 if (ro) 6187 goto done_unlock; 6188 6189 /* are we are already prepared for writes? */ 6190 if (mddev->ro != 1) 6191 goto done_unlock; 6192 6193 /* transitioning to readauto need only happen for 6194 * arrays that call md_write_start 6195 */ 6196 if (mddev->pers) { 6197 err = restart_array(mddev); 6198 if (err == 0) { 6199 mddev->ro = 2; 6200 set_disk_ro(mddev->gendisk, 0); 6201 } 6202 } 6203 goto done_unlock; 6204 } 6205 6206 /* 6207 * The remaining ioctls are changing the state of the 6208 * superblock, so we do not allow them on read-only arrays. 6209 * However non-MD ioctls (e.g. get-size) will still come through 6210 * here and hit the 'default' below, so only disallow 6211 * 'md' ioctls, and switch to rw mode if started auto-readonly. 6212 */ 6213 if (_IOC_TYPE(cmd) == MD_MAJOR && mddev->ro && mddev->pers) { 6214 if (mddev->ro == 2) { 6215 mddev->ro = 0; 6216 sysfs_notify_dirent_safe(mddev->sysfs_state); 6217 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6218 md_wakeup_thread(mddev->thread); 6219 } else { 6220 err = -EROFS; 6221 goto abort_unlock; 6222 } 6223 } 6224 6225 switch (cmd) 6226 { 6227 case ADD_NEW_DISK: 6228 { 6229 mdu_disk_info_t info; 6230 if (copy_from_user(&info, argp, sizeof(info))) 6231 err = -EFAULT; 6232 else 6233 err = add_new_disk(mddev, &info); 6234 goto done_unlock; 6235 } 6236 6237 case HOT_REMOVE_DISK: 6238 err = hot_remove_disk(mddev, new_decode_dev(arg)); 6239 goto done_unlock; 6240 6241 case HOT_ADD_DISK: 6242 err = hot_add_disk(mddev, new_decode_dev(arg)); 6243 goto done_unlock; 6244 6245 case SET_DISK_FAULTY: 6246 err = set_disk_faulty(mddev, new_decode_dev(arg)); 6247 goto done_unlock; 6248 6249 case RUN_ARRAY: 6250 err = do_md_run(mddev); 6251 goto done_unlock; 6252 6253 case SET_BITMAP_FILE: 6254 err = set_bitmap_file(mddev, (int)arg); 6255 goto done_unlock; 6256 6257 default: 6258 err = -EINVAL; 6259 goto abort_unlock; 6260 } 6261 6262 done_unlock: 6263 abort_unlock: 6264 if (mddev->hold_active == UNTIL_IOCTL && 6265 err != -EINVAL) 6266 mddev->hold_active = 0; 6267 mddev_unlock(mddev); 6268 6269 return err; 6270 done: 6271 if (err) 6272 MD_BUG(); 6273 abort: 6274 return err; 6275 } 6276 #ifdef CONFIG_COMPAT 6277 static int md_compat_ioctl(struct block_device *bdev, fmode_t mode, 6278 unsigned int cmd, unsigned long arg) 6279 { 6280 switch (cmd) { 6281 case HOT_REMOVE_DISK: 6282 case HOT_ADD_DISK: 6283 case SET_DISK_FAULTY: 6284 case SET_BITMAP_FILE: 6285 /* These take in integer arg, do not convert */ 6286 break; 6287 default: 6288 arg = (unsigned long)compat_ptr(arg); 6289 break; 6290 } 6291 6292 return md_ioctl(bdev, mode, cmd, arg); 6293 } 6294 #endif /* CONFIG_COMPAT */ 6295 6296 static int md_open(struct block_device *bdev, fmode_t mode) 6297 { 6298 /* 6299 * Succeed if we can lock the mddev, which confirms that 6300 * it isn't being stopped right now. 6301 */ 6302 struct mddev *mddev = mddev_find(bdev->bd_dev); 6303 int err; 6304 6305 if (mddev->gendisk != bdev->bd_disk) { 6306 /* we are racing with mddev_put which is discarding this 6307 * bd_disk. 6308 */ 6309 mddev_put(mddev); 6310 /* Wait until bdev->bd_disk is definitely gone */ 6311 flush_workqueue(md_misc_wq); 6312 /* Then retry the open from the top */ 6313 return -ERESTARTSYS; 6314 } 6315 BUG_ON(mddev != bdev->bd_disk->private_data); 6316 6317 if ((err = mutex_lock_interruptible(&mddev->open_mutex))) 6318 goto out; 6319 6320 err = 0; 6321 atomic_inc(&mddev->openers); 6322 mutex_unlock(&mddev->open_mutex); 6323 6324 check_disk_change(bdev); 6325 out: 6326 return err; 6327 } 6328 6329 static int md_release(struct gendisk *disk, fmode_t mode) 6330 { 6331 struct mddev *mddev = disk->private_data; 6332 6333 BUG_ON(!mddev); 6334 atomic_dec(&mddev->openers); 6335 mddev_put(mddev); 6336 6337 return 0; 6338 } 6339 6340 static int md_media_changed(struct gendisk *disk) 6341 { 6342 struct mddev *mddev = disk->private_data; 6343 6344 return mddev->changed; 6345 } 6346 6347 static int md_revalidate(struct gendisk *disk) 6348 { 6349 struct mddev *mddev = disk->private_data; 6350 6351 mddev->changed = 0; 6352 return 0; 6353 } 6354 static const struct block_device_operations md_fops = 6355 { 6356 .owner = THIS_MODULE, 6357 .open = md_open, 6358 .release = md_release, 6359 .ioctl = md_ioctl, 6360 #ifdef CONFIG_COMPAT 6361 .compat_ioctl = md_compat_ioctl, 6362 #endif 6363 .getgeo = md_getgeo, 6364 .media_changed = md_media_changed, 6365 .revalidate_disk= md_revalidate, 6366 }; 6367 6368 static int md_thread(void * arg) 6369 { 6370 struct md_thread *thread = arg; 6371 6372 /* 6373 * md_thread is a 'system-thread', it's priority should be very 6374 * high. We avoid resource deadlocks individually in each 6375 * raid personality. (RAID5 does preallocation) We also use RR and 6376 * the very same RT priority as kswapd, thus we will never get 6377 * into a priority inversion deadlock. 6378 * 6379 * we definitely have to have equal or higher priority than 6380 * bdflush, otherwise bdflush will deadlock if there are too 6381 * many dirty RAID5 blocks. 6382 */ 6383 6384 allow_signal(SIGKILL); 6385 while (!kthread_should_stop()) { 6386 6387 /* We need to wait INTERRUPTIBLE so that 6388 * we don't add to the load-average. 6389 * That means we need to be sure no signals are 6390 * pending 6391 */ 6392 if (signal_pending(current)) 6393 flush_signals(current); 6394 6395 wait_event_interruptible_timeout 6396 (thread->wqueue, 6397 test_bit(THREAD_WAKEUP, &thread->flags) 6398 || kthread_should_stop(), 6399 thread->timeout); 6400 6401 clear_bit(THREAD_WAKEUP, &thread->flags); 6402 if (!kthread_should_stop()) 6403 thread->run(thread->mddev); 6404 } 6405 6406 return 0; 6407 } 6408 6409 void md_wakeup_thread(struct md_thread *thread) 6410 { 6411 if (thread) { 6412 pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm); 6413 set_bit(THREAD_WAKEUP, &thread->flags); 6414 wake_up(&thread->wqueue); 6415 } 6416 } 6417 6418 struct md_thread *md_register_thread(void (*run) (struct mddev *), struct mddev *mddev, 6419 const char *name) 6420 { 6421 struct md_thread *thread; 6422 6423 thread = kzalloc(sizeof(struct md_thread), GFP_KERNEL); 6424 if (!thread) 6425 return NULL; 6426 6427 init_waitqueue_head(&thread->wqueue); 6428 6429 thread->run = run; 6430 thread->mddev = mddev; 6431 thread->timeout = MAX_SCHEDULE_TIMEOUT; 6432 thread->tsk = kthread_run(md_thread, thread, 6433 "%s_%s", 6434 mdname(thread->mddev), 6435 name ?: mddev->pers->name); 6436 if (IS_ERR(thread->tsk)) { 6437 kfree(thread); 6438 return NULL; 6439 } 6440 return thread; 6441 } 6442 6443 void md_unregister_thread(struct md_thread **threadp) 6444 { 6445 struct md_thread *thread = *threadp; 6446 if (!thread) 6447 return; 6448 pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk)); 6449 /* Locking ensures that mddev_unlock does not wake_up a 6450 * non-existent thread 6451 */ 6452 spin_lock(&pers_lock); 6453 *threadp = NULL; 6454 spin_unlock(&pers_lock); 6455 6456 kthread_stop(thread->tsk); 6457 kfree(thread); 6458 } 6459 6460 void md_error(struct mddev *mddev, struct md_rdev *rdev) 6461 { 6462 if (!mddev) { 6463 MD_BUG(); 6464 return; 6465 } 6466 6467 if (!rdev || test_bit(Faulty, &rdev->flags)) 6468 return; 6469 6470 if (!mddev->pers || !mddev->pers->error_handler) 6471 return; 6472 mddev->pers->error_handler(mddev,rdev); 6473 if (mddev->degraded) 6474 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 6475 sysfs_notify_dirent_safe(rdev->sysfs_state); 6476 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 6477 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6478 md_wakeup_thread(mddev->thread); 6479 if (mddev->event_work.func) 6480 queue_work(md_misc_wq, &mddev->event_work); 6481 md_new_event_inintr(mddev); 6482 } 6483 6484 /* seq_file implementation /proc/mdstat */ 6485 6486 static void status_unused(struct seq_file *seq) 6487 { 6488 int i = 0; 6489 struct md_rdev *rdev; 6490 6491 seq_printf(seq, "unused devices: "); 6492 6493 list_for_each_entry(rdev, &pending_raid_disks, same_set) { 6494 char b[BDEVNAME_SIZE]; 6495 i++; 6496 seq_printf(seq, "%s ", 6497 bdevname(rdev->bdev,b)); 6498 } 6499 if (!i) 6500 seq_printf(seq, "<none>"); 6501 6502 seq_printf(seq, "\n"); 6503 } 6504 6505 6506 static void status_resync(struct seq_file *seq, struct mddev * mddev) 6507 { 6508 sector_t max_sectors, resync, res; 6509 unsigned long dt, db; 6510 sector_t rt; 6511 int scale; 6512 unsigned int per_milli; 6513 6514 resync = mddev->curr_resync - atomic_read(&mddev->recovery_active); 6515 6516 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 6517 max_sectors = mddev->resync_max_sectors; 6518 else 6519 max_sectors = mddev->dev_sectors; 6520 6521 /* 6522 * Should not happen. 6523 */ 6524 if (!max_sectors) { 6525 MD_BUG(); 6526 return; 6527 } 6528 /* Pick 'scale' such that (resync>>scale)*1000 will fit 6529 * in a sector_t, and (max_sectors>>scale) will fit in a 6530 * u32, as those are the requirements for sector_div. 6531 * Thus 'scale' must be at least 10 6532 */ 6533 scale = 10; 6534 if (sizeof(sector_t) > sizeof(unsigned long)) { 6535 while ( max_sectors/2 > (1ULL<<(scale+32))) 6536 scale++; 6537 } 6538 res = (resync>>scale)*1000; 6539 sector_div(res, (u32)((max_sectors>>scale)+1)); 6540 6541 per_milli = res; 6542 { 6543 int i, x = per_milli/50, y = 20-x; 6544 seq_printf(seq, "["); 6545 for (i = 0; i < x; i++) 6546 seq_printf(seq, "="); 6547 seq_printf(seq, ">"); 6548 for (i = 0; i < y; i++) 6549 seq_printf(seq, "."); 6550 seq_printf(seq, "] "); 6551 } 6552 seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)", 6553 (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)? 6554 "reshape" : 6555 (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)? 6556 "check" : 6557 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ? 6558 "resync" : "recovery"))), 6559 per_milli/10, per_milli % 10, 6560 (unsigned long long) resync/2, 6561 (unsigned long long) max_sectors/2); 6562 6563 /* 6564 * dt: time from mark until now 6565 * db: blocks written from mark until now 6566 * rt: remaining time 6567 * 6568 * rt is a sector_t, so could be 32bit or 64bit. 6569 * So we divide before multiply in case it is 32bit and close 6570 * to the limit. 6571 * We scale the divisor (db) by 32 to avoid losing precision 6572 * near the end of resync when the number of remaining sectors 6573 * is close to 'db'. 6574 * We then divide rt by 32 after multiplying by db to compensate. 6575 * The '+1' avoids division by zero if db is very small. 6576 */ 6577 dt = ((jiffies - mddev->resync_mark) / HZ); 6578 if (!dt) dt++; 6579 db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active)) 6580 - mddev->resync_mark_cnt; 6581 6582 rt = max_sectors - resync; /* number of remaining sectors */ 6583 sector_div(rt, db/32+1); 6584 rt *= dt; 6585 rt >>= 5; 6586 6587 seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60, 6588 ((unsigned long)rt % 60)/6); 6589 6590 seq_printf(seq, " speed=%ldK/sec", db/2/dt); 6591 } 6592 6593 static void *md_seq_start(struct seq_file *seq, loff_t *pos) 6594 { 6595 struct list_head *tmp; 6596 loff_t l = *pos; 6597 struct mddev *mddev; 6598 6599 if (l >= 0x10000) 6600 return NULL; 6601 if (!l--) 6602 /* header */ 6603 return (void*)1; 6604 6605 spin_lock(&all_mddevs_lock); 6606 list_for_each(tmp,&all_mddevs) 6607 if (!l--) { 6608 mddev = list_entry(tmp, struct mddev, all_mddevs); 6609 mddev_get(mddev); 6610 spin_unlock(&all_mddevs_lock); 6611 return mddev; 6612 } 6613 spin_unlock(&all_mddevs_lock); 6614 if (!l--) 6615 return (void*)2;/* tail */ 6616 return NULL; 6617 } 6618 6619 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos) 6620 { 6621 struct list_head *tmp; 6622 struct mddev *next_mddev, *mddev = v; 6623 6624 ++*pos; 6625 if (v == (void*)2) 6626 return NULL; 6627 6628 spin_lock(&all_mddevs_lock); 6629 if (v == (void*)1) 6630 tmp = all_mddevs.next; 6631 else 6632 tmp = mddev->all_mddevs.next; 6633 if (tmp != &all_mddevs) 6634 next_mddev = mddev_get(list_entry(tmp,struct mddev,all_mddevs)); 6635 else { 6636 next_mddev = (void*)2; 6637 *pos = 0x10000; 6638 } 6639 spin_unlock(&all_mddevs_lock); 6640 6641 if (v != (void*)1) 6642 mddev_put(mddev); 6643 return next_mddev; 6644 6645 } 6646 6647 static void md_seq_stop(struct seq_file *seq, void *v) 6648 { 6649 struct mddev *mddev = v; 6650 6651 if (mddev && v != (void*)1 && v != (void*)2) 6652 mddev_put(mddev); 6653 } 6654 6655 static int md_seq_show(struct seq_file *seq, void *v) 6656 { 6657 struct mddev *mddev = v; 6658 sector_t sectors; 6659 struct md_rdev *rdev; 6660 struct bitmap *bitmap; 6661 6662 if (v == (void*)1) { 6663 struct md_personality *pers; 6664 seq_printf(seq, "Personalities : "); 6665 spin_lock(&pers_lock); 6666 list_for_each_entry(pers, &pers_list, list) 6667 seq_printf(seq, "[%s] ", pers->name); 6668 6669 spin_unlock(&pers_lock); 6670 seq_printf(seq, "\n"); 6671 seq->poll_event = atomic_read(&md_event_count); 6672 return 0; 6673 } 6674 if (v == (void*)2) { 6675 status_unused(seq); 6676 return 0; 6677 } 6678 6679 if (mddev_lock(mddev) < 0) 6680 return -EINTR; 6681 6682 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) { 6683 seq_printf(seq, "%s : %sactive", mdname(mddev), 6684 mddev->pers ? "" : "in"); 6685 if (mddev->pers) { 6686 if (mddev->ro==1) 6687 seq_printf(seq, " (read-only)"); 6688 if (mddev->ro==2) 6689 seq_printf(seq, " (auto-read-only)"); 6690 seq_printf(seq, " %s", mddev->pers->name); 6691 } 6692 6693 sectors = 0; 6694 list_for_each_entry(rdev, &mddev->disks, same_set) { 6695 char b[BDEVNAME_SIZE]; 6696 seq_printf(seq, " %s[%d]", 6697 bdevname(rdev->bdev,b), rdev->desc_nr); 6698 if (test_bit(WriteMostly, &rdev->flags)) 6699 seq_printf(seq, "(W)"); 6700 if (test_bit(Faulty, &rdev->flags)) { 6701 seq_printf(seq, "(F)"); 6702 continue; 6703 } else if (rdev->raid_disk < 0) 6704 seq_printf(seq, "(S)"); /* spare */ 6705 sectors += rdev->sectors; 6706 } 6707 6708 if (!list_empty(&mddev->disks)) { 6709 if (mddev->pers) 6710 seq_printf(seq, "\n %llu blocks", 6711 (unsigned long long) 6712 mddev->array_sectors / 2); 6713 else 6714 seq_printf(seq, "\n %llu blocks", 6715 (unsigned long long)sectors / 2); 6716 } 6717 if (mddev->persistent) { 6718 if (mddev->major_version != 0 || 6719 mddev->minor_version != 90) { 6720 seq_printf(seq," super %d.%d", 6721 mddev->major_version, 6722 mddev->minor_version); 6723 } 6724 } else if (mddev->external) 6725 seq_printf(seq, " super external:%s", 6726 mddev->metadata_type); 6727 else 6728 seq_printf(seq, " super non-persistent"); 6729 6730 if (mddev->pers) { 6731 mddev->pers->status(seq, mddev); 6732 seq_printf(seq, "\n "); 6733 if (mddev->pers->sync_request) { 6734 if (mddev->curr_resync > 2) { 6735 status_resync(seq, mddev); 6736 seq_printf(seq, "\n "); 6737 } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2) 6738 seq_printf(seq, "\tresync=DELAYED\n "); 6739 else if (mddev->recovery_cp < MaxSector) 6740 seq_printf(seq, "\tresync=PENDING\n "); 6741 } 6742 } else 6743 seq_printf(seq, "\n "); 6744 6745 if ((bitmap = mddev->bitmap)) { 6746 unsigned long chunk_kb; 6747 unsigned long flags; 6748 spin_lock_irqsave(&bitmap->lock, flags); 6749 chunk_kb = mddev->bitmap_info.chunksize >> 10; 6750 seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], " 6751 "%lu%s chunk", 6752 bitmap->pages - bitmap->missing_pages, 6753 bitmap->pages, 6754 (bitmap->pages - bitmap->missing_pages) 6755 << (PAGE_SHIFT - 10), 6756 chunk_kb ? chunk_kb : mddev->bitmap_info.chunksize, 6757 chunk_kb ? "KB" : "B"); 6758 if (bitmap->file) { 6759 seq_printf(seq, ", file: "); 6760 seq_path(seq, &bitmap->file->f_path, " \t\n"); 6761 } 6762 6763 seq_printf(seq, "\n"); 6764 spin_unlock_irqrestore(&bitmap->lock, flags); 6765 } 6766 6767 seq_printf(seq, "\n"); 6768 } 6769 mddev_unlock(mddev); 6770 6771 return 0; 6772 } 6773 6774 static const struct seq_operations md_seq_ops = { 6775 .start = md_seq_start, 6776 .next = md_seq_next, 6777 .stop = md_seq_stop, 6778 .show = md_seq_show, 6779 }; 6780 6781 static int md_seq_open(struct inode *inode, struct file *file) 6782 { 6783 struct seq_file *seq; 6784 int error; 6785 6786 error = seq_open(file, &md_seq_ops); 6787 if (error) 6788 return error; 6789 6790 seq = file->private_data; 6791 seq->poll_event = atomic_read(&md_event_count); 6792 return error; 6793 } 6794 6795 static unsigned int mdstat_poll(struct file *filp, poll_table *wait) 6796 { 6797 struct seq_file *seq = filp->private_data; 6798 int mask; 6799 6800 poll_wait(filp, &md_event_waiters, wait); 6801 6802 /* always allow read */ 6803 mask = POLLIN | POLLRDNORM; 6804 6805 if (seq->poll_event != atomic_read(&md_event_count)) 6806 mask |= POLLERR | POLLPRI; 6807 return mask; 6808 } 6809 6810 static const struct file_operations md_seq_fops = { 6811 .owner = THIS_MODULE, 6812 .open = md_seq_open, 6813 .read = seq_read, 6814 .llseek = seq_lseek, 6815 .release = seq_release_private, 6816 .poll = mdstat_poll, 6817 }; 6818 6819 int register_md_personality(struct md_personality *p) 6820 { 6821 spin_lock(&pers_lock); 6822 list_add_tail(&p->list, &pers_list); 6823 printk(KERN_INFO "md: %s personality registered for level %d\n", p->name, p->level); 6824 spin_unlock(&pers_lock); 6825 return 0; 6826 } 6827 6828 int unregister_md_personality(struct md_personality *p) 6829 { 6830 printk(KERN_INFO "md: %s personality unregistered\n", p->name); 6831 spin_lock(&pers_lock); 6832 list_del_init(&p->list); 6833 spin_unlock(&pers_lock); 6834 return 0; 6835 } 6836 6837 static int is_mddev_idle(struct mddev *mddev, int init) 6838 { 6839 struct md_rdev * rdev; 6840 int idle; 6841 int curr_events; 6842 6843 idle = 1; 6844 rcu_read_lock(); 6845 rdev_for_each_rcu(rdev, mddev) { 6846 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk; 6847 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) + 6848 (int)part_stat_read(&disk->part0, sectors[1]) - 6849 atomic_read(&disk->sync_io); 6850 /* sync IO will cause sync_io to increase before the disk_stats 6851 * as sync_io is counted when a request starts, and 6852 * disk_stats is counted when it completes. 6853 * So resync activity will cause curr_events to be smaller than 6854 * when there was no such activity. 6855 * non-sync IO will cause disk_stat to increase without 6856 * increasing sync_io so curr_events will (eventually) 6857 * be larger than it was before. Once it becomes 6858 * substantially larger, the test below will cause 6859 * the array to appear non-idle, and resync will slow 6860 * down. 6861 * If there is a lot of outstanding resync activity when 6862 * we set last_event to curr_events, then all that activity 6863 * completing might cause the array to appear non-idle 6864 * and resync will be slowed down even though there might 6865 * not have been non-resync activity. This will only 6866 * happen once though. 'last_events' will soon reflect 6867 * the state where there is little or no outstanding 6868 * resync requests, and further resync activity will 6869 * always make curr_events less than last_events. 6870 * 6871 */ 6872 if (init || curr_events - rdev->last_events > 64) { 6873 rdev->last_events = curr_events; 6874 idle = 0; 6875 } 6876 } 6877 rcu_read_unlock(); 6878 return idle; 6879 } 6880 6881 void md_done_sync(struct mddev *mddev, int blocks, int ok) 6882 { 6883 /* another "blocks" (512byte) blocks have been synced */ 6884 atomic_sub(blocks, &mddev->recovery_active); 6885 wake_up(&mddev->recovery_wait); 6886 if (!ok) { 6887 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 6888 md_wakeup_thread(mddev->thread); 6889 // stop recovery, signal do_sync .... 6890 } 6891 } 6892 6893 6894 /* md_write_start(mddev, bi) 6895 * If we need to update some array metadata (e.g. 'active' flag 6896 * in superblock) before writing, schedule a superblock update 6897 * and wait for it to complete. 6898 */ 6899 void md_write_start(struct mddev *mddev, struct bio *bi) 6900 { 6901 int did_change = 0; 6902 if (bio_data_dir(bi) != WRITE) 6903 return; 6904 6905 BUG_ON(mddev->ro == 1); 6906 if (mddev->ro == 2) { 6907 /* need to switch to read/write */ 6908 mddev->ro = 0; 6909 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6910 md_wakeup_thread(mddev->thread); 6911 md_wakeup_thread(mddev->sync_thread); 6912 did_change = 1; 6913 } 6914 atomic_inc(&mddev->writes_pending); 6915 if (mddev->safemode == 1) 6916 mddev->safemode = 0; 6917 if (mddev->in_sync) { 6918 spin_lock_irq(&mddev->write_lock); 6919 if (mddev->in_sync) { 6920 mddev->in_sync = 0; 6921 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 6922 set_bit(MD_CHANGE_PENDING, &mddev->flags); 6923 md_wakeup_thread(mddev->thread); 6924 did_change = 1; 6925 } 6926 spin_unlock_irq(&mddev->write_lock); 6927 } 6928 if (did_change) 6929 sysfs_notify_dirent_safe(mddev->sysfs_state); 6930 wait_event(mddev->sb_wait, 6931 !test_bit(MD_CHANGE_PENDING, &mddev->flags)); 6932 } 6933 6934 void md_write_end(struct mddev *mddev) 6935 { 6936 if (atomic_dec_and_test(&mddev->writes_pending)) { 6937 if (mddev->safemode == 2) 6938 md_wakeup_thread(mddev->thread); 6939 else if (mddev->safemode_delay) 6940 mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay); 6941 } 6942 } 6943 6944 /* md_allow_write(mddev) 6945 * Calling this ensures that the array is marked 'active' so that writes 6946 * may proceed without blocking. It is important to call this before 6947 * attempting a GFP_KERNEL allocation while holding the mddev lock. 6948 * Must be called with mddev_lock held. 6949 * 6950 * In the ->external case MD_CHANGE_CLEAN can not be cleared until mddev->lock 6951 * is dropped, so return -EAGAIN after notifying userspace. 6952 */ 6953 int md_allow_write(struct mddev *mddev) 6954 { 6955 if (!mddev->pers) 6956 return 0; 6957 if (mddev->ro) 6958 return 0; 6959 if (!mddev->pers->sync_request) 6960 return 0; 6961 6962 spin_lock_irq(&mddev->write_lock); 6963 if (mddev->in_sync) { 6964 mddev->in_sync = 0; 6965 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 6966 set_bit(MD_CHANGE_PENDING, &mddev->flags); 6967 if (mddev->safemode_delay && 6968 mddev->safemode == 0) 6969 mddev->safemode = 1; 6970 spin_unlock_irq(&mddev->write_lock); 6971 md_update_sb(mddev, 0); 6972 sysfs_notify_dirent_safe(mddev->sysfs_state); 6973 } else 6974 spin_unlock_irq(&mddev->write_lock); 6975 6976 if (test_bit(MD_CHANGE_PENDING, &mddev->flags)) 6977 return -EAGAIN; 6978 else 6979 return 0; 6980 } 6981 EXPORT_SYMBOL_GPL(md_allow_write); 6982 6983 #define SYNC_MARKS 10 6984 #define SYNC_MARK_STEP (3*HZ) 6985 void md_do_sync(struct mddev *mddev) 6986 { 6987 struct mddev *mddev2; 6988 unsigned int currspeed = 0, 6989 window; 6990 sector_t max_sectors,j, io_sectors; 6991 unsigned long mark[SYNC_MARKS]; 6992 sector_t mark_cnt[SYNC_MARKS]; 6993 int last_mark,m; 6994 struct list_head *tmp; 6995 sector_t last_check; 6996 int skipped = 0; 6997 struct md_rdev *rdev; 6998 char *desc; 6999 7000 /* just incase thread restarts... */ 7001 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) 7002 return; 7003 if (mddev->ro) /* never try to sync a read-only array */ 7004 return; 7005 7006 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 7007 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) 7008 desc = "data-check"; 7009 else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 7010 desc = "requested-resync"; 7011 else 7012 desc = "resync"; 7013 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 7014 desc = "reshape"; 7015 else 7016 desc = "recovery"; 7017 7018 /* we overload curr_resync somewhat here. 7019 * 0 == not engaged in resync at all 7020 * 2 == checking that there is no conflict with another sync 7021 * 1 == like 2, but have yielded to allow conflicting resync to 7022 * commense 7023 * other == active in resync - this many blocks 7024 * 7025 * Before starting a resync we must have set curr_resync to 7026 * 2, and then checked that every "conflicting" array has curr_resync 7027 * less than ours. When we find one that is the same or higher 7028 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync 7029 * to 1 if we choose to yield (based arbitrarily on address of mddev structure). 7030 * This will mean we have to start checking from the beginning again. 7031 * 7032 */ 7033 7034 do { 7035 mddev->curr_resync = 2; 7036 7037 try_again: 7038 if (kthread_should_stop()) 7039 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 7040 7041 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 7042 goto skip; 7043 for_each_mddev(mddev2, tmp) { 7044 if (mddev2 == mddev) 7045 continue; 7046 if (!mddev->parallel_resync 7047 && mddev2->curr_resync 7048 && match_mddev_units(mddev, mddev2)) { 7049 DEFINE_WAIT(wq); 7050 if (mddev < mddev2 && mddev->curr_resync == 2) { 7051 /* arbitrarily yield */ 7052 mddev->curr_resync = 1; 7053 wake_up(&resync_wait); 7054 } 7055 if (mddev > mddev2 && mddev->curr_resync == 1) 7056 /* no need to wait here, we can wait the next 7057 * time 'round when curr_resync == 2 7058 */ 7059 continue; 7060 /* We need to wait 'interruptible' so as not to 7061 * contribute to the load average, and not to 7062 * be caught by 'softlockup' 7063 */ 7064 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE); 7065 if (!kthread_should_stop() && 7066 mddev2->curr_resync >= mddev->curr_resync) { 7067 printk(KERN_INFO "md: delaying %s of %s" 7068 " until %s has finished (they" 7069 " share one or more physical units)\n", 7070 desc, mdname(mddev), mdname(mddev2)); 7071 mddev_put(mddev2); 7072 if (signal_pending(current)) 7073 flush_signals(current); 7074 schedule(); 7075 finish_wait(&resync_wait, &wq); 7076 goto try_again; 7077 } 7078 finish_wait(&resync_wait, &wq); 7079 } 7080 } 7081 } while (mddev->curr_resync < 2); 7082 7083 j = 0; 7084 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 7085 /* resync follows the size requested by the personality, 7086 * which defaults to physical size, but can be virtual size 7087 */ 7088 max_sectors = mddev->resync_max_sectors; 7089 mddev->resync_mismatches = 0; 7090 /* we don't use the checkpoint if there's a bitmap */ 7091 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 7092 j = mddev->resync_min; 7093 else if (!mddev->bitmap) 7094 j = mddev->recovery_cp; 7095 7096 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 7097 max_sectors = mddev->dev_sectors; 7098 else { 7099 /* recovery follows the physical size of devices */ 7100 max_sectors = mddev->dev_sectors; 7101 j = MaxSector; 7102 rcu_read_lock(); 7103 list_for_each_entry_rcu(rdev, &mddev->disks, same_set) 7104 if (rdev->raid_disk >= 0 && 7105 !test_bit(Faulty, &rdev->flags) && 7106 !test_bit(In_sync, &rdev->flags) && 7107 rdev->recovery_offset < j) 7108 j = rdev->recovery_offset; 7109 rcu_read_unlock(); 7110 } 7111 7112 printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev)); 7113 printk(KERN_INFO "md: minimum _guaranteed_ speed:" 7114 " %d KB/sec/disk.\n", speed_min(mddev)); 7115 printk(KERN_INFO "md: using maximum available idle IO bandwidth " 7116 "(but not more than %d KB/sec) for %s.\n", 7117 speed_max(mddev), desc); 7118 7119 is_mddev_idle(mddev, 1); /* this initializes IO event counters */ 7120 7121 io_sectors = 0; 7122 for (m = 0; m < SYNC_MARKS; m++) { 7123 mark[m] = jiffies; 7124 mark_cnt[m] = io_sectors; 7125 } 7126 last_mark = 0; 7127 mddev->resync_mark = mark[last_mark]; 7128 mddev->resync_mark_cnt = mark_cnt[last_mark]; 7129 7130 /* 7131 * Tune reconstruction: 7132 */ 7133 window = 32*(PAGE_SIZE/512); 7134 printk(KERN_INFO "md: using %dk window, over a total of %lluk.\n", 7135 window/2, (unsigned long long)max_sectors/2); 7136 7137 atomic_set(&mddev->recovery_active, 0); 7138 last_check = 0; 7139 7140 if (j>2) { 7141 printk(KERN_INFO 7142 "md: resuming %s of %s from checkpoint.\n", 7143 desc, mdname(mddev)); 7144 mddev->curr_resync = j; 7145 } 7146 mddev->curr_resync_completed = j; 7147 7148 while (j < max_sectors) { 7149 sector_t sectors; 7150 7151 skipped = 0; 7152 7153 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 7154 ((mddev->curr_resync > mddev->curr_resync_completed && 7155 (mddev->curr_resync - mddev->curr_resync_completed) 7156 > (max_sectors >> 4)) || 7157 (j - mddev->curr_resync_completed)*2 7158 >= mddev->resync_max - mddev->curr_resync_completed 7159 )) { 7160 /* time to update curr_resync_completed */ 7161 wait_event(mddev->recovery_wait, 7162 atomic_read(&mddev->recovery_active) == 0); 7163 mddev->curr_resync_completed = j; 7164 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 7165 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 7166 } 7167 7168 while (j >= mddev->resync_max && !kthread_should_stop()) { 7169 /* As this condition is controlled by user-space, 7170 * we can block indefinitely, so use '_interruptible' 7171 * to avoid triggering warnings. 7172 */ 7173 flush_signals(current); /* just in case */ 7174 wait_event_interruptible(mddev->recovery_wait, 7175 mddev->resync_max > j 7176 || kthread_should_stop()); 7177 } 7178 7179 if (kthread_should_stop()) 7180 goto interrupted; 7181 7182 sectors = mddev->pers->sync_request(mddev, j, &skipped, 7183 currspeed < speed_min(mddev)); 7184 if (sectors == 0) { 7185 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 7186 goto out; 7187 } 7188 7189 if (!skipped) { /* actual IO requested */ 7190 io_sectors += sectors; 7191 atomic_add(sectors, &mddev->recovery_active); 7192 } 7193 7194 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 7195 break; 7196 7197 j += sectors; 7198 if (j>1) mddev->curr_resync = j; 7199 mddev->curr_mark_cnt = io_sectors; 7200 if (last_check == 0) 7201 /* this is the earliest that rebuild will be 7202 * visible in /proc/mdstat 7203 */ 7204 md_new_event(mddev); 7205 7206 if (last_check + window > io_sectors || j == max_sectors) 7207 continue; 7208 7209 last_check = io_sectors; 7210 repeat: 7211 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) { 7212 /* step marks */ 7213 int next = (last_mark+1) % SYNC_MARKS; 7214 7215 mddev->resync_mark = mark[next]; 7216 mddev->resync_mark_cnt = mark_cnt[next]; 7217 mark[next] = jiffies; 7218 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active); 7219 last_mark = next; 7220 } 7221 7222 7223 if (kthread_should_stop()) 7224 goto interrupted; 7225 7226 7227 /* 7228 * this loop exits only if either when we are slower than 7229 * the 'hard' speed limit, or the system was IO-idle for 7230 * a jiffy. 7231 * the system might be non-idle CPU-wise, but we only care 7232 * about not overloading the IO subsystem. (things like an 7233 * e2fsck being done on the RAID array should execute fast) 7234 */ 7235 cond_resched(); 7236 7237 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2 7238 /((jiffies-mddev->resync_mark)/HZ +1) +1; 7239 7240 if (currspeed > speed_min(mddev)) { 7241 if ((currspeed > speed_max(mddev)) || 7242 !is_mddev_idle(mddev, 0)) { 7243 msleep(500); 7244 goto repeat; 7245 } 7246 } 7247 } 7248 printk(KERN_INFO "md: %s: %s done.\n",mdname(mddev), desc); 7249 /* 7250 * this also signals 'finished resyncing' to md_stop 7251 */ 7252 out: 7253 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); 7254 7255 /* tell personality that we are finished */ 7256 mddev->pers->sync_request(mddev, max_sectors, &skipped, 1); 7257 7258 if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) && 7259 mddev->curr_resync > 2) { 7260 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 7261 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 7262 if (mddev->curr_resync >= mddev->recovery_cp) { 7263 printk(KERN_INFO 7264 "md: checkpointing %s of %s.\n", 7265 desc, mdname(mddev)); 7266 mddev->recovery_cp = mddev->curr_resync; 7267 } 7268 } else 7269 mddev->recovery_cp = MaxSector; 7270 } else { 7271 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 7272 mddev->curr_resync = MaxSector; 7273 rcu_read_lock(); 7274 list_for_each_entry_rcu(rdev, &mddev->disks, same_set) 7275 if (rdev->raid_disk >= 0 && 7276 mddev->delta_disks >= 0 && 7277 !test_bit(Faulty, &rdev->flags) && 7278 !test_bit(In_sync, &rdev->flags) && 7279 rdev->recovery_offset < mddev->curr_resync) 7280 rdev->recovery_offset = mddev->curr_resync; 7281 rcu_read_unlock(); 7282 } 7283 } 7284 set_bit(MD_CHANGE_DEVS, &mddev->flags); 7285 7286 skip: 7287 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 7288 /* We completed so min/max setting can be forgotten if used. */ 7289 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 7290 mddev->resync_min = 0; 7291 mddev->resync_max = MaxSector; 7292 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 7293 mddev->resync_min = mddev->curr_resync_completed; 7294 mddev->curr_resync = 0; 7295 wake_up(&resync_wait); 7296 set_bit(MD_RECOVERY_DONE, &mddev->recovery); 7297 md_wakeup_thread(mddev->thread); 7298 return; 7299 7300 interrupted: 7301 /* 7302 * got a signal, exit. 7303 */ 7304 printk(KERN_INFO 7305 "md: md_do_sync() got signal ... exiting\n"); 7306 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 7307 goto out; 7308 7309 } 7310 EXPORT_SYMBOL_GPL(md_do_sync); 7311 7312 static int remove_and_add_spares(struct mddev *mddev) 7313 { 7314 struct md_rdev *rdev; 7315 int spares = 0; 7316 7317 mddev->curr_resync_completed = 0; 7318 7319 list_for_each_entry(rdev, &mddev->disks, same_set) 7320 if (rdev->raid_disk >= 0 && 7321 !test_bit(Blocked, &rdev->flags) && 7322 (test_bit(Faulty, &rdev->flags) || 7323 ! test_bit(In_sync, &rdev->flags)) && 7324 atomic_read(&rdev->nr_pending)==0) { 7325 if (mddev->pers->hot_remove_disk( 7326 mddev, rdev->raid_disk)==0) { 7327 sysfs_unlink_rdev(mddev, rdev); 7328 rdev->raid_disk = -1; 7329 } 7330 } 7331 7332 if (mddev->degraded) { 7333 list_for_each_entry(rdev, &mddev->disks, same_set) { 7334 if (rdev->raid_disk >= 0 && 7335 !test_bit(In_sync, &rdev->flags) && 7336 !test_bit(Faulty, &rdev->flags)) 7337 spares++; 7338 if (rdev->raid_disk < 0 7339 && !test_bit(Faulty, &rdev->flags)) { 7340 rdev->recovery_offset = 0; 7341 if (mddev->pers-> 7342 hot_add_disk(mddev, rdev) == 0) { 7343 if (sysfs_link_rdev(mddev, rdev)) 7344 /* failure here is OK */; 7345 spares++; 7346 md_new_event(mddev); 7347 set_bit(MD_CHANGE_DEVS, &mddev->flags); 7348 } else 7349 break; 7350 } 7351 } 7352 } 7353 return spares; 7354 } 7355 7356 static void reap_sync_thread(struct mddev *mddev) 7357 { 7358 struct md_rdev *rdev; 7359 7360 /* resync has finished, collect result */ 7361 md_unregister_thread(&mddev->sync_thread); 7362 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 7363 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 7364 /* success...*/ 7365 /* activate any spares */ 7366 if (mddev->pers->spare_active(mddev)) 7367 sysfs_notify(&mddev->kobj, NULL, 7368 "degraded"); 7369 } 7370 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 7371 mddev->pers->finish_reshape) 7372 mddev->pers->finish_reshape(mddev); 7373 7374 /* If array is no-longer degraded, then any saved_raid_disk 7375 * information must be scrapped. Also if any device is now 7376 * In_sync we must scrape the saved_raid_disk for that device 7377 * do the superblock for an incrementally recovered device 7378 * written out. 7379 */ 7380 list_for_each_entry(rdev, &mddev->disks, same_set) 7381 if (!mddev->degraded || 7382 test_bit(In_sync, &rdev->flags)) 7383 rdev->saved_raid_disk = -1; 7384 7385 md_update_sb(mddev, 1); 7386 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 7387 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 7388 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 7389 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 7390 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 7391 /* flag recovery needed just to double check */ 7392 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7393 sysfs_notify_dirent_safe(mddev->sysfs_action); 7394 md_new_event(mddev); 7395 if (mddev->event_work.func) 7396 queue_work(md_misc_wq, &mddev->event_work); 7397 } 7398 7399 /* 7400 * This routine is regularly called by all per-raid-array threads to 7401 * deal with generic issues like resync and super-block update. 7402 * Raid personalities that don't have a thread (linear/raid0) do not 7403 * need this as they never do any recovery or update the superblock. 7404 * 7405 * It does not do any resync itself, but rather "forks" off other threads 7406 * to do that as needed. 7407 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in 7408 * "->recovery" and create a thread at ->sync_thread. 7409 * When the thread finishes it sets MD_RECOVERY_DONE 7410 * and wakeups up this thread which will reap the thread and finish up. 7411 * This thread also removes any faulty devices (with nr_pending == 0). 7412 * 7413 * The overall approach is: 7414 * 1/ if the superblock needs updating, update it. 7415 * 2/ If a recovery thread is running, don't do anything else. 7416 * 3/ If recovery has finished, clean up, possibly marking spares active. 7417 * 4/ If there are any faulty devices, remove them. 7418 * 5/ If array is degraded, try to add spares devices 7419 * 6/ If array has spares or is not in-sync, start a resync thread. 7420 */ 7421 void md_check_recovery(struct mddev *mddev) 7422 { 7423 if (mddev->suspended) 7424 return; 7425 7426 if (mddev->bitmap) 7427 bitmap_daemon_work(mddev); 7428 7429 if (signal_pending(current)) { 7430 if (mddev->pers->sync_request && !mddev->external) { 7431 printk(KERN_INFO "md: %s in immediate safe mode\n", 7432 mdname(mddev)); 7433 mddev->safemode = 2; 7434 } 7435 flush_signals(current); 7436 } 7437 7438 if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) 7439 return; 7440 if ( ! ( 7441 (mddev->flags & ~ (1<<MD_CHANGE_PENDING)) || 7442 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || 7443 test_bit(MD_RECOVERY_DONE, &mddev->recovery) || 7444 (mddev->external == 0 && mddev->safemode == 1) || 7445 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending) 7446 && !mddev->in_sync && mddev->recovery_cp == MaxSector) 7447 )) 7448 return; 7449 7450 if (mddev_trylock(mddev)) { 7451 int spares = 0; 7452 7453 if (mddev->ro) { 7454 /* Only thing we do on a ro array is remove 7455 * failed devices. 7456 */ 7457 struct md_rdev *rdev; 7458 list_for_each_entry(rdev, &mddev->disks, same_set) 7459 if (rdev->raid_disk >= 0 && 7460 !test_bit(Blocked, &rdev->flags) && 7461 test_bit(Faulty, &rdev->flags) && 7462 atomic_read(&rdev->nr_pending)==0) { 7463 if (mddev->pers->hot_remove_disk( 7464 mddev, rdev->raid_disk)==0) { 7465 sysfs_unlink_rdev(mddev, rdev); 7466 rdev->raid_disk = -1; 7467 } 7468 } 7469 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7470 goto unlock; 7471 } 7472 7473 if (!mddev->external) { 7474 int did_change = 0; 7475 spin_lock_irq(&mddev->write_lock); 7476 if (mddev->safemode && 7477 !atomic_read(&mddev->writes_pending) && 7478 !mddev->in_sync && 7479 mddev->recovery_cp == MaxSector) { 7480 mddev->in_sync = 1; 7481 did_change = 1; 7482 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 7483 } 7484 if (mddev->safemode == 1) 7485 mddev->safemode = 0; 7486 spin_unlock_irq(&mddev->write_lock); 7487 if (did_change) 7488 sysfs_notify_dirent_safe(mddev->sysfs_state); 7489 } 7490 7491 if (mddev->flags) 7492 md_update_sb(mddev, 0); 7493 7494 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && 7495 !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) { 7496 /* resync/recovery still happening */ 7497 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7498 goto unlock; 7499 } 7500 if (mddev->sync_thread) { 7501 reap_sync_thread(mddev); 7502 goto unlock; 7503 } 7504 /* Set RUNNING before clearing NEEDED to avoid 7505 * any transients in the value of "sync_action". 7506 */ 7507 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 7508 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7509 /* Clear some bits that don't mean anything, but 7510 * might be left set 7511 */ 7512 clear_bit(MD_RECOVERY_INTR, &mddev->recovery); 7513 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); 7514 7515 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) 7516 goto unlock; 7517 /* no recovery is running. 7518 * remove any failed drives, then 7519 * add spares if possible. 7520 * Spare are also removed and re-added, to allow 7521 * the personality to fail the re-add. 7522 */ 7523 7524 if (mddev->reshape_position != MaxSector) { 7525 if (mddev->pers->check_reshape == NULL || 7526 mddev->pers->check_reshape(mddev) != 0) 7527 /* Cannot proceed */ 7528 goto unlock; 7529 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 7530 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 7531 } else if ((spares = remove_and_add_spares(mddev))) { 7532 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 7533 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 7534 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 7535 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 7536 } else if (mddev->recovery_cp < MaxSector) { 7537 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 7538 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 7539 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 7540 /* nothing to be done ... */ 7541 goto unlock; 7542 7543 if (mddev->pers->sync_request) { 7544 if (spares && mddev->bitmap && ! mddev->bitmap->file) { 7545 /* We are adding a device or devices to an array 7546 * which has the bitmap stored on all devices. 7547 * So make sure all bitmap pages get written 7548 */ 7549 bitmap_write_all(mddev->bitmap); 7550 } 7551 mddev->sync_thread = md_register_thread(md_do_sync, 7552 mddev, 7553 "resync"); 7554 if (!mddev->sync_thread) { 7555 printk(KERN_ERR "%s: could not start resync" 7556 " thread...\n", 7557 mdname(mddev)); 7558 /* leave the spares where they are, it shouldn't hurt */ 7559 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 7560 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 7561 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 7562 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 7563 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 7564 } else 7565 md_wakeup_thread(mddev->sync_thread); 7566 sysfs_notify_dirent_safe(mddev->sysfs_action); 7567 md_new_event(mddev); 7568 } 7569 unlock: 7570 if (!mddev->sync_thread) { 7571 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 7572 if (test_and_clear_bit(MD_RECOVERY_RECOVER, 7573 &mddev->recovery)) 7574 if (mddev->sysfs_action) 7575 sysfs_notify_dirent_safe(mddev->sysfs_action); 7576 } 7577 mddev_unlock(mddev); 7578 } 7579 } 7580 7581 void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev) 7582 { 7583 sysfs_notify_dirent_safe(rdev->sysfs_state); 7584 wait_event_timeout(rdev->blocked_wait, 7585 !test_bit(Blocked, &rdev->flags) && 7586 !test_bit(BlockedBadBlocks, &rdev->flags), 7587 msecs_to_jiffies(5000)); 7588 rdev_dec_pending(rdev, mddev); 7589 } 7590 EXPORT_SYMBOL(md_wait_for_blocked_rdev); 7591 7592 7593 /* Bad block management. 7594 * We can record which blocks on each device are 'bad' and so just 7595 * fail those blocks, or that stripe, rather than the whole device. 7596 * Entries in the bad-block table are 64bits wide. This comprises: 7597 * Length of bad-range, in sectors: 0-511 for lengths 1-512 7598 * Start of bad-range, sector offset, 54 bits (allows 8 exbibytes) 7599 * A 'shift' can be set so that larger blocks are tracked and 7600 * consequently larger devices can be covered. 7601 * 'Acknowledged' flag - 1 bit. - the most significant bit. 7602 * 7603 * Locking of the bad-block table uses a seqlock so md_is_badblock 7604 * might need to retry if it is very unlucky. 7605 * We will sometimes want to check for bad blocks in a bi_end_io function, 7606 * so we use the write_seqlock_irq variant. 7607 * 7608 * When looking for a bad block we specify a range and want to 7609 * know if any block in the range is bad. So we binary-search 7610 * to the last range that starts at-or-before the given endpoint, 7611 * (or "before the sector after the target range") 7612 * then see if it ends after the given start. 7613 * We return 7614 * 0 if there are no known bad blocks in the range 7615 * 1 if there are known bad block which are all acknowledged 7616 * -1 if there are bad blocks which have not yet been acknowledged in metadata. 7617 * plus the start/length of the first bad section we overlap. 7618 */ 7619 int md_is_badblock(struct badblocks *bb, sector_t s, int sectors, 7620 sector_t *first_bad, int *bad_sectors) 7621 { 7622 int hi; 7623 int lo = 0; 7624 u64 *p = bb->page; 7625 int rv = 0; 7626 sector_t target = s + sectors; 7627 unsigned seq; 7628 7629 if (bb->shift > 0) { 7630 /* round the start down, and the end up */ 7631 s >>= bb->shift; 7632 target += (1<<bb->shift) - 1; 7633 target >>= bb->shift; 7634 sectors = target - s; 7635 } 7636 /* 'target' is now the first block after the bad range */ 7637 7638 retry: 7639 seq = read_seqbegin(&bb->lock); 7640 7641 hi = bb->count; 7642 7643 /* Binary search between lo and hi for 'target' 7644 * i.e. for the last range that starts before 'target' 7645 */ 7646 /* INVARIANT: ranges before 'lo' and at-or-after 'hi' 7647 * are known not to be the last range before target. 7648 * VARIANT: hi-lo is the number of possible 7649 * ranges, and decreases until it reaches 1 7650 */ 7651 while (hi - lo > 1) { 7652 int mid = (lo + hi) / 2; 7653 sector_t a = BB_OFFSET(p[mid]); 7654 if (a < target) 7655 /* This could still be the one, earlier ranges 7656 * could not. */ 7657 lo = mid; 7658 else 7659 /* This and later ranges are definitely out. */ 7660 hi = mid; 7661 } 7662 /* 'lo' might be the last that started before target, but 'hi' isn't */ 7663 if (hi > lo) { 7664 /* need to check all range that end after 's' to see if 7665 * any are unacknowledged. 7666 */ 7667 while (lo >= 0 && 7668 BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) { 7669 if (BB_OFFSET(p[lo]) < target) { 7670 /* starts before the end, and finishes after 7671 * the start, so they must overlap 7672 */ 7673 if (rv != -1 && BB_ACK(p[lo])) 7674 rv = 1; 7675 else 7676 rv = -1; 7677 *first_bad = BB_OFFSET(p[lo]); 7678 *bad_sectors = BB_LEN(p[lo]); 7679 } 7680 lo--; 7681 } 7682 } 7683 7684 if (read_seqretry(&bb->lock, seq)) 7685 goto retry; 7686 7687 return rv; 7688 } 7689 EXPORT_SYMBOL_GPL(md_is_badblock); 7690 7691 /* 7692 * Add a range of bad blocks to the table. 7693 * This might extend the table, or might contract it 7694 * if two adjacent ranges can be merged. 7695 * We binary-search to find the 'insertion' point, then 7696 * decide how best to handle it. 7697 */ 7698 static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors, 7699 int acknowledged) 7700 { 7701 u64 *p; 7702 int lo, hi; 7703 int rv = 1; 7704 7705 if (bb->shift < 0) 7706 /* badblocks are disabled */ 7707 return 0; 7708 7709 if (bb->shift) { 7710 /* round the start down, and the end up */ 7711 sector_t next = s + sectors; 7712 s >>= bb->shift; 7713 next += (1<<bb->shift) - 1; 7714 next >>= bb->shift; 7715 sectors = next - s; 7716 } 7717 7718 write_seqlock_irq(&bb->lock); 7719 7720 p = bb->page; 7721 lo = 0; 7722 hi = bb->count; 7723 /* Find the last range that starts at-or-before 's' */ 7724 while (hi - lo > 1) { 7725 int mid = (lo + hi) / 2; 7726 sector_t a = BB_OFFSET(p[mid]); 7727 if (a <= s) 7728 lo = mid; 7729 else 7730 hi = mid; 7731 } 7732 if (hi > lo && BB_OFFSET(p[lo]) > s) 7733 hi = lo; 7734 7735 if (hi > lo) { 7736 /* we found a range that might merge with the start 7737 * of our new range 7738 */ 7739 sector_t a = BB_OFFSET(p[lo]); 7740 sector_t e = a + BB_LEN(p[lo]); 7741 int ack = BB_ACK(p[lo]); 7742 if (e >= s) { 7743 /* Yes, we can merge with a previous range */ 7744 if (s == a && s + sectors >= e) 7745 /* new range covers old */ 7746 ack = acknowledged; 7747 else 7748 ack = ack && acknowledged; 7749 7750 if (e < s + sectors) 7751 e = s + sectors; 7752 if (e - a <= BB_MAX_LEN) { 7753 p[lo] = BB_MAKE(a, e-a, ack); 7754 s = e; 7755 } else { 7756 /* does not all fit in one range, 7757 * make p[lo] maximal 7758 */ 7759 if (BB_LEN(p[lo]) != BB_MAX_LEN) 7760 p[lo] = BB_MAKE(a, BB_MAX_LEN, ack); 7761 s = a + BB_MAX_LEN; 7762 } 7763 sectors = e - s; 7764 } 7765 } 7766 if (sectors && hi < bb->count) { 7767 /* 'hi' points to the first range that starts after 's'. 7768 * Maybe we can merge with the start of that range */ 7769 sector_t a = BB_OFFSET(p[hi]); 7770 sector_t e = a + BB_LEN(p[hi]); 7771 int ack = BB_ACK(p[hi]); 7772 if (a <= s + sectors) { 7773 /* merging is possible */ 7774 if (e <= s + sectors) { 7775 /* full overlap */ 7776 e = s + sectors; 7777 ack = acknowledged; 7778 } else 7779 ack = ack && acknowledged; 7780 7781 a = s; 7782 if (e - a <= BB_MAX_LEN) { 7783 p[hi] = BB_MAKE(a, e-a, ack); 7784 s = e; 7785 } else { 7786 p[hi] = BB_MAKE(a, BB_MAX_LEN, ack); 7787 s = a + BB_MAX_LEN; 7788 } 7789 sectors = e - s; 7790 lo = hi; 7791 hi++; 7792 } 7793 } 7794 if (sectors == 0 && hi < bb->count) { 7795 /* we might be able to combine lo and hi */ 7796 /* Note: 's' is at the end of 'lo' */ 7797 sector_t a = BB_OFFSET(p[hi]); 7798 int lolen = BB_LEN(p[lo]); 7799 int hilen = BB_LEN(p[hi]); 7800 int newlen = lolen + hilen - (s - a); 7801 if (s >= a && newlen < BB_MAX_LEN) { 7802 /* yes, we can combine them */ 7803 int ack = BB_ACK(p[lo]) && BB_ACK(p[hi]); 7804 p[lo] = BB_MAKE(BB_OFFSET(p[lo]), newlen, ack); 7805 memmove(p + hi, p + hi + 1, 7806 (bb->count - hi - 1) * 8); 7807 bb->count--; 7808 } 7809 } 7810 while (sectors) { 7811 /* didn't merge (it all). 7812 * Need to add a range just before 'hi' */ 7813 if (bb->count >= MD_MAX_BADBLOCKS) { 7814 /* No room for more */ 7815 rv = 0; 7816 break; 7817 } else { 7818 int this_sectors = sectors; 7819 memmove(p + hi + 1, p + hi, 7820 (bb->count - hi) * 8); 7821 bb->count++; 7822 7823 if (this_sectors > BB_MAX_LEN) 7824 this_sectors = BB_MAX_LEN; 7825 p[hi] = BB_MAKE(s, this_sectors, acknowledged); 7826 sectors -= this_sectors; 7827 s += this_sectors; 7828 } 7829 } 7830 7831 bb->changed = 1; 7832 if (!acknowledged) 7833 bb->unacked_exist = 1; 7834 write_sequnlock_irq(&bb->lock); 7835 7836 return rv; 7837 } 7838 7839 int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors, 7840 int acknowledged) 7841 { 7842 int rv = md_set_badblocks(&rdev->badblocks, 7843 s + rdev->data_offset, sectors, acknowledged); 7844 if (rv) { 7845 /* Make sure they get written out promptly */ 7846 set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags); 7847 md_wakeup_thread(rdev->mddev->thread); 7848 } 7849 return rv; 7850 } 7851 EXPORT_SYMBOL_GPL(rdev_set_badblocks); 7852 7853 /* 7854 * Remove a range of bad blocks from the table. 7855 * This may involve extending the table if we spilt a region, 7856 * but it must not fail. So if the table becomes full, we just 7857 * drop the remove request. 7858 */ 7859 static int md_clear_badblocks(struct badblocks *bb, sector_t s, int sectors) 7860 { 7861 u64 *p; 7862 int lo, hi; 7863 sector_t target = s + sectors; 7864 int rv = 0; 7865 7866 if (bb->shift > 0) { 7867 /* When clearing we round the start up and the end down. 7868 * This should not matter as the shift should align with 7869 * the block size and no rounding should ever be needed. 7870 * However it is better the think a block is bad when it 7871 * isn't than to think a block is not bad when it is. 7872 */ 7873 s += (1<<bb->shift) - 1; 7874 s >>= bb->shift; 7875 target >>= bb->shift; 7876 sectors = target - s; 7877 } 7878 7879 write_seqlock_irq(&bb->lock); 7880 7881 p = bb->page; 7882 lo = 0; 7883 hi = bb->count; 7884 /* Find the last range that starts before 'target' */ 7885 while (hi - lo > 1) { 7886 int mid = (lo + hi) / 2; 7887 sector_t a = BB_OFFSET(p[mid]); 7888 if (a < target) 7889 lo = mid; 7890 else 7891 hi = mid; 7892 } 7893 if (hi > lo) { 7894 /* p[lo] is the last range that could overlap the 7895 * current range. Earlier ranges could also overlap, 7896 * but only this one can overlap the end of the range. 7897 */ 7898 if (BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > target) { 7899 /* Partial overlap, leave the tail of this range */ 7900 int ack = BB_ACK(p[lo]); 7901 sector_t a = BB_OFFSET(p[lo]); 7902 sector_t end = a + BB_LEN(p[lo]); 7903 7904 if (a < s) { 7905 /* we need to split this range */ 7906 if (bb->count >= MD_MAX_BADBLOCKS) { 7907 rv = 0; 7908 goto out; 7909 } 7910 memmove(p+lo+1, p+lo, (bb->count - lo) * 8); 7911 bb->count++; 7912 p[lo] = BB_MAKE(a, s-a, ack); 7913 lo++; 7914 } 7915 p[lo] = BB_MAKE(target, end - target, ack); 7916 /* there is no longer an overlap */ 7917 hi = lo; 7918 lo--; 7919 } 7920 while (lo >= 0 && 7921 BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) { 7922 /* This range does overlap */ 7923 if (BB_OFFSET(p[lo]) < s) { 7924 /* Keep the early parts of this range. */ 7925 int ack = BB_ACK(p[lo]); 7926 sector_t start = BB_OFFSET(p[lo]); 7927 p[lo] = BB_MAKE(start, s - start, ack); 7928 /* now low doesn't overlap, so.. */ 7929 break; 7930 } 7931 lo--; 7932 } 7933 /* 'lo' is strictly before, 'hi' is strictly after, 7934 * anything between needs to be discarded 7935 */ 7936 if (hi - lo > 1) { 7937 memmove(p+lo+1, p+hi, (bb->count - hi) * 8); 7938 bb->count -= (hi - lo - 1); 7939 } 7940 } 7941 7942 bb->changed = 1; 7943 out: 7944 write_sequnlock_irq(&bb->lock); 7945 return rv; 7946 } 7947 7948 int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors) 7949 { 7950 return md_clear_badblocks(&rdev->badblocks, 7951 s + rdev->data_offset, 7952 sectors); 7953 } 7954 EXPORT_SYMBOL_GPL(rdev_clear_badblocks); 7955 7956 /* 7957 * Acknowledge all bad blocks in a list. 7958 * This only succeeds if ->changed is clear. It is used by 7959 * in-kernel metadata updates 7960 */ 7961 void md_ack_all_badblocks(struct badblocks *bb) 7962 { 7963 if (bb->page == NULL || bb->changed) 7964 /* no point even trying */ 7965 return; 7966 write_seqlock_irq(&bb->lock); 7967 7968 if (bb->changed == 0) { 7969 u64 *p = bb->page; 7970 int i; 7971 for (i = 0; i < bb->count ; i++) { 7972 if (!BB_ACK(p[i])) { 7973 sector_t start = BB_OFFSET(p[i]); 7974 int len = BB_LEN(p[i]); 7975 p[i] = BB_MAKE(start, len, 1); 7976 } 7977 } 7978 bb->unacked_exist = 0; 7979 } 7980 write_sequnlock_irq(&bb->lock); 7981 } 7982 EXPORT_SYMBOL_GPL(md_ack_all_badblocks); 7983 7984 /* sysfs access to bad-blocks list. 7985 * We present two files. 7986 * 'bad-blocks' lists sector numbers and lengths of ranges that 7987 * are recorded as bad. The list is truncated to fit within 7988 * the one-page limit of sysfs. 7989 * Writing "sector length" to this file adds an acknowledged 7990 * bad block list. 7991 * 'unacknowledged-bad-blocks' lists bad blocks that have not yet 7992 * been acknowledged. Writing to this file adds bad blocks 7993 * without acknowledging them. This is largely for testing. 7994 */ 7995 7996 static ssize_t 7997 badblocks_show(struct badblocks *bb, char *page, int unack) 7998 { 7999 size_t len; 8000 int i; 8001 u64 *p = bb->page; 8002 unsigned seq; 8003 8004 if (bb->shift < 0) 8005 return 0; 8006 8007 retry: 8008 seq = read_seqbegin(&bb->lock); 8009 8010 len = 0; 8011 i = 0; 8012 8013 while (len < PAGE_SIZE && i < bb->count) { 8014 sector_t s = BB_OFFSET(p[i]); 8015 unsigned int length = BB_LEN(p[i]); 8016 int ack = BB_ACK(p[i]); 8017 i++; 8018 8019 if (unack && ack) 8020 continue; 8021 8022 len += snprintf(page+len, PAGE_SIZE-len, "%llu %u\n", 8023 (unsigned long long)s << bb->shift, 8024 length << bb->shift); 8025 } 8026 if (unack && len == 0) 8027 bb->unacked_exist = 0; 8028 8029 if (read_seqretry(&bb->lock, seq)) 8030 goto retry; 8031 8032 return len; 8033 } 8034 8035 #define DO_DEBUG 1 8036 8037 static ssize_t 8038 badblocks_store(struct badblocks *bb, const char *page, size_t len, int unack) 8039 { 8040 unsigned long long sector; 8041 int length; 8042 char newline; 8043 #ifdef DO_DEBUG 8044 /* Allow clearing via sysfs *only* for testing/debugging. 8045 * Normally only a successful write may clear a badblock 8046 */ 8047 int clear = 0; 8048 if (page[0] == '-') { 8049 clear = 1; 8050 page++; 8051 } 8052 #endif /* DO_DEBUG */ 8053 8054 switch (sscanf(page, "%llu %d%c", §or, &length, &newline)) { 8055 case 3: 8056 if (newline != '\n') 8057 return -EINVAL; 8058 case 2: 8059 if (length <= 0) 8060 return -EINVAL; 8061 break; 8062 default: 8063 return -EINVAL; 8064 } 8065 8066 #ifdef DO_DEBUG 8067 if (clear) { 8068 md_clear_badblocks(bb, sector, length); 8069 return len; 8070 } 8071 #endif /* DO_DEBUG */ 8072 if (md_set_badblocks(bb, sector, length, !unack)) 8073 return len; 8074 else 8075 return -ENOSPC; 8076 } 8077 8078 static int md_notify_reboot(struct notifier_block *this, 8079 unsigned long code, void *x) 8080 { 8081 struct list_head *tmp; 8082 struct mddev *mddev; 8083 int need_delay = 0; 8084 8085 if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) { 8086 8087 printk(KERN_INFO "md: stopping all md devices.\n"); 8088 8089 for_each_mddev(mddev, tmp) { 8090 if (mddev_trylock(mddev)) { 8091 /* Force a switch to readonly even array 8092 * appears to still be in use. Hence 8093 * the '100'. 8094 */ 8095 md_set_readonly(mddev, 100); 8096 mddev_unlock(mddev); 8097 } 8098 need_delay = 1; 8099 } 8100 /* 8101 * certain more exotic SCSI devices are known to be 8102 * volatile wrt too early system reboots. While the 8103 * right place to handle this issue is the given 8104 * driver, we do want to have a safe RAID driver ... 8105 */ 8106 if (need_delay) 8107 mdelay(1000*1); 8108 } 8109 return NOTIFY_DONE; 8110 } 8111 8112 static struct notifier_block md_notifier = { 8113 .notifier_call = md_notify_reboot, 8114 .next = NULL, 8115 .priority = INT_MAX, /* before any real devices */ 8116 }; 8117 8118 static void md_geninit(void) 8119 { 8120 pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t)); 8121 8122 proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops); 8123 } 8124 8125 static int __init md_init(void) 8126 { 8127 int ret = -ENOMEM; 8128 8129 md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0); 8130 if (!md_wq) 8131 goto err_wq; 8132 8133 md_misc_wq = alloc_workqueue("md_misc", 0, 0); 8134 if (!md_misc_wq) 8135 goto err_misc_wq; 8136 8137 if ((ret = register_blkdev(MD_MAJOR, "md")) < 0) 8138 goto err_md; 8139 8140 if ((ret = register_blkdev(0, "mdp")) < 0) 8141 goto err_mdp; 8142 mdp_major = ret; 8143 8144 blk_register_region(MKDEV(MD_MAJOR, 0), 1UL<<MINORBITS, THIS_MODULE, 8145 md_probe, NULL, NULL); 8146 blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE, 8147 md_probe, NULL, NULL); 8148 8149 register_reboot_notifier(&md_notifier); 8150 raid_table_header = register_sysctl_table(raid_root_table); 8151 8152 md_geninit(); 8153 return 0; 8154 8155 err_mdp: 8156 unregister_blkdev(MD_MAJOR, "md"); 8157 err_md: 8158 destroy_workqueue(md_misc_wq); 8159 err_misc_wq: 8160 destroy_workqueue(md_wq); 8161 err_wq: 8162 return ret; 8163 } 8164 8165 #ifndef MODULE 8166 8167 /* 8168 * Searches all registered partitions for autorun RAID arrays 8169 * at boot time. 8170 */ 8171 8172 static LIST_HEAD(all_detected_devices); 8173 struct detected_devices_node { 8174 struct list_head list; 8175 dev_t dev; 8176 }; 8177 8178 void md_autodetect_dev(dev_t dev) 8179 { 8180 struct detected_devices_node *node_detected_dev; 8181 8182 node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL); 8183 if (node_detected_dev) { 8184 node_detected_dev->dev = dev; 8185 list_add_tail(&node_detected_dev->list, &all_detected_devices); 8186 } else { 8187 printk(KERN_CRIT "md: md_autodetect_dev: kzalloc failed" 8188 ", skipping dev(%d,%d)\n", MAJOR(dev), MINOR(dev)); 8189 } 8190 } 8191 8192 8193 static void autostart_arrays(int part) 8194 { 8195 struct md_rdev *rdev; 8196 struct detected_devices_node *node_detected_dev; 8197 dev_t dev; 8198 int i_scanned, i_passed; 8199 8200 i_scanned = 0; 8201 i_passed = 0; 8202 8203 printk(KERN_INFO "md: Autodetecting RAID arrays.\n"); 8204 8205 while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) { 8206 i_scanned++; 8207 node_detected_dev = list_entry(all_detected_devices.next, 8208 struct detected_devices_node, list); 8209 list_del(&node_detected_dev->list); 8210 dev = node_detected_dev->dev; 8211 kfree(node_detected_dev); 8212 rdev = md_import_device(dev,0, 90); 8213 if (IS_ERR(rdev)) 8214 continue; 8215 8216 if (test_bit(Faulty, &rdev->flags)) { 8217 MD_BUG(); 8218 continue; 8219 } 8220 set_bit(AutoDetected, &rdev->flags); 8221 list_add(&rdev->same_set, &pending_raid_disks); 8222 i_passed++; 8223 } 8224 8225 printk(KERN_INFO "md: Scanned %d and added %d devices.\n", 8226 i_scanned, i_passed); 8227 8228 autorun_devices(part); 8229 } 8230 8231 #endif /* !MODULE */ 8232 8233 static __exit void md_exit(void) 8234 { 8235 struct mddev *mddev; 8236 struct list_head *tmp; 8237 8238 blk_unregister_region(MKDEV(MD_MAJOR,0), 1U << MINORBITS); 8239 blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS); 8240 8241 unregister_blkdev(MD_MAJOR,"md"); 8242 unregister_blkdev(mdp_major, "mdp"); 8243 unregister_reboot_notifier(&md_notifier); 8244 unregister_sysctl_table(raid_table_header); 8245 remove_proc_entry("mdstat", NULL); 8246 for_each_mddev(mddev, tmp) { 8247 export_array(mddev); 8248 mddev->hold_active = 0; 8249 } 8250 destroy_workqueue(md_misc_wq); 8251 destroy_workqueue(md_wq); 8252 } 8253 8254 subsys_initcall(md_init); 8255 module_exit(md_exit) 8256 8257 static int get_ro(char *buffer, struct kernel_param *kp) 8258 { 8259 return sprintf(buffer, "%d", start_readonly); 8260 } 8261 static int set_ro(const char *val, struct kernel_param *kp) 8262 { 8263 char *e; 8264 int num = simple_strtoul(val, &e, 10); 8265 if (*val && (*e == '\0' || *e == '\n')) { 8266 start_readonly = num; 8267 return 0; 8268 } 8269 return -EINVAL; 8270 } 8271 8272 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR); 8273 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR); 8274 8275 module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR); 8276 8277 EXPORT_SYMBOL(register_md_personality); 8278 EXPORT_SYMBOL(unregister_md_personality); 8279 EXPORT_SYMBOL(md_error); 8280 EXPORT_SYMBOL(md_done_sync); 8281 EXPORT_SYMBOL(md_write_start); 8282 EXPORT_SYMBOL(md_write_end); 8283 EXPORT_SYMBOL(md_register_thread); 8284 EXPORT_SYMBOL(md_unregister_thread); 8285 EXPORT_SYMBOL(md_wakeup_thread); 8286 EXPORT_SYMBOL(md_check_recovery); 8287 MODULE_LICENSE("GPL"); 8288 MODULE_DESCRIPTION("MD RAID framework"); 8289 MODULE_ALIAS("md"); 8290 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR); 8291