1 /* 2 md.c : Multiple Devices driver for Linux 3 Copyright (C) 1998, 1999, 2000 Ingo Molnar 4 5 completely rewritten, based on the MD driver code from Marc Zyngier 6 7 Changes: 8 9 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar 10 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com> 11 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net> 12 - kerneld support by Boris Tobotras <boris@xtalk.msk.su> 13 - kmod support by: Cyrus Durgin 14 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com> 15 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au> 16 17 - lots of fixes and improvements to the RAID1/RAID5 and generic 18 RAID code (such as request based resynchronization): 19 20 Neil Brown <neilb@cse.unsw.edu.au>. 21 22 - persistent bitmap code 23 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc. 24 25 This program is free software; you can redistribute it and/or modify 26 it under the terms of the GNU General Public License as published by 27 the Free Software Foundation; either version 2, or (at your option) 28 any later version. 29 30 You should have received a copy of the GNU General Public License 31 (for example /usr/src/linux/COPYING); if not, write to the Free 32 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 33 */ 34 35 #include <linux/kthread.h> 36 #include <linux/blkdev.h> 37 #include <linux/sysctl.h> 38 #include <linux/seq_file.h> 39 #include <linux/fs.h> 40 #include <linux/poll.h> 41 #include <linux/ctype.h> 42 #include <linux/string.h> 43 #include <linux/hdreg.h> 44 #include <linux/proc_fs.h> 45 #include <linux/random.h> 46 #include <linux/module.h> 47 #include <linux/reboot.h> 48 #include <linux/file.h> 49 #include <linux/compat.h> 50 #include <linux/delay.h> 51 #include <linux/raid/md_p.h> 52 #include <linux/raid/md_u.h> 53 #include <linux/slab.h> 54 #include "md.h" 55 #include "bitmap.h" 56 57 #ifndef MODULE 58 static void autostart_arrays(int part); 59 #endif 60 61 /* pers_list is a list of registered personalities protected 62 * by pers_lock. 63 * pers_lock does extra service to protect accesses to 64 * mddev->thread when the mutex cannot be held. 65 */ 66 static LIST_HEAD(pers_list); 67 static DEFINE_SPINLOCK(pers_lock); 68 69 static void md_print_devices(void); 70 71 static DECLARE_WAIT_QUEUE_HEAD(resync_wait); 72 static struct workqueue_struct *md_wq; 73 static struct workqueue_struct *md_misc_wq; 74 75 #define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); } 76 77 /* 78 * Default number of read corrections we'll attempt on an rdev 79 * before ejecting it from the array. We divide the read error 80 * count by 2 for every hour elapsed between read errors. 81 */ 82 #define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20 83 /* 84 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit' 85 * is 1000 KB/sec, so the extra system load does not show up that much. 86 * Increase it if you want to have more _guaranteed_ speed. Note that 87 * the RAID driver will use the maximum available bandwidth if the IO 88 * subsystem is idle. There is also an 'absolute maximum' reconstruction 89 * speed limit - in case reconstruction slows down your system despite 90 * idle IO detection. 91 * 92 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max. 93 * or /sys/block/mdX/md/sync_speed_{min,max} 94 */ 95 96 static int sysctl_speed_limit_min = 1000; 97 static int sysctl_speed_limit_max = 200000; 98 static inline int speed_min(struct mddev *mddev) 99 { 100 return mddev->sync_speed_min ? 101 mddev->sync_speed_min : sysctl_speed_limit_min; 102 } 103 104 static inline int speed_max(struct mddev *mddev) 105 { 106 return mddev->sync_speed_max ? 107 mddev->sync_speed_max : sysctl_speed_limit_max; 108 } 109 110 static struct ctl_table_header *raid_table_header; 111 112 static ctl_table raid_table[] = { 113 { 114 .procname = "speed_limit_min", 115 .data = &sysctl_speed_limit_min, 116 .maxlen = sizeof(int), 117 .mode = S_IRUGO|S_IWUSR, 118 .proc_handler = proc_dointvec, 119 }, 120 { 121 .procname = "speed_limit_max", 122 .data = &sysctl_speed_limit_max, 123 .maxlen = sizeof(int), 124 .mode = S_IRUGO|S_IWUSR, 125 .proc_handler = proc_dointvec, 126 }, 127 { } 128 }; 129 130 static ctl_table raid_dir_table[] = { 131 { 132 .procname = "raid", 133 .maxlen = 0, 134 .mode = S_IRUGO|S_IXUGO, 135 .child = raid_table, 136 }, 137 { } 138 }; 139 140 static ctl_table raid_root_table[] = { 141 { 142 .procname = "dev", 143 .maxlen = 0, 144 .mode = 0555, 145 .child = raid_dir_table, 146 }, 147 { } 148 }; 149 150 static const struct block_device_operations md_fops; 151 152 static int start_readonly; 153 154 /* bio_clone_mddev 155 * like bio_clone, but with a local bio set 156 */ 157 158 static void mddev_bio_destructor(struct bio *bio) 159 { 160 struct mddev *mddev, **mddevp; 161 162 mddevp = (void*)bio; 163 mddev = mddevp[-1]; 164 165 bio_free(bio, mddev->bio_set); 166 } 167 168 struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, 169 struct mddev *mddev) 170 { 171 struct bio *b; 172 struct mddev **mddevp; 173 174 if (!mddev || !mddev->bio_set) 175 return bio_alloc(gfp_mask, nr_iovecs); 176 177 b = bio_alloc_bioset(gfp_mask, nr_iovecs, 178 mddev->bio_set); 179 if (!b) 180 return NULL; 181 mddevp = (void*)b; 182 mddevp[-1] = mddev; 183 b->bi_destructor = mddev_bio_destructor; 184 return b; 185 } 186 EXPORT_SYMBOL_GPL(bio_alloc_mddev); 187 188 struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask, 189 struct mddev *mddev) 190 { 191 struct bio *b; 192 struct mddev **mddevp; 193 194 if (!mddev || !mddev->bio_set) 195 return bio_clone(bio, gfp_mask); 196 197 b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, 198 mddev->bio_set); 199 if (!b) 200 return NULL; 201 mddevp = (void*)b; 202 mddevp[-1] = mddev; 203 b->bi_destructor = mddev_bio_destructor; 204 __bio_clone(b, bio); 205 if (bio_integrity(bio)) { 206 int ret; 207 208 ret = bio_integrity_clone(b, bio, gfp_mask, mddev->bio_set); 209 210 if (ret < 0) { 211 bio_put(b); 212 return NULL; 213 } 214 } 215 216 return b; 217 } 218 EXPORT_SYMBOL_GPL(bio_clone_mddev); 219 220 void md_trim_bio(struct bio *bio, int offset, int size) 221 { 222 /* 'bio' is a cloned bio which we need to trim to match 223 * the given offset and size. 224 * This requires adjusting bi_sector, bi_size, and bi_io_vec 225 */ 226 int i; 227 struct bio_vec *bvec; 228 int sofar = 0; 229 230 size <<= 9; 231 if (offset == 0 && size == bio->bi_size) 232 return; 233 234 bio->bi_sector += offset; 235 bio->bi_size = size; 236 offset <<= 9; 237 clear_bit(BIO_SEG_VALID, &bio->bi_flags); 238 239 while (bio->bi_idx < bio->bi_vcnt && 240 bio->bi_io_vec[bio->bi_idx].bv_len <= offset) { 241 /* remove this whole bio_vec */ 242 offset -= bio->bi_io_vec[bio->bi_idx].bv_len; 243 bio->bi_idx++; 244 } 245 if (bio->bi_idx < bio->bi_vcnt) { 246 bio->bi_io_vec[bio->bi_idx].bv_offset += offset; 247 bio->bi_io_vec[bio->bi_idx].bv_len -= offset; 248 } 249 /* avoid any complications with bi_idx being non-zero*/ 250 if (bio->bi_idx) { 251 memmove(bio->bi_io_vec, bio->bi_io_vec+bio->bi_idx, 252 (bio->bi_vcnt - bio->bi_idx) * sizeof(struct bio_vec)); 253 bio->bi_vcnt -= bio->bi_idx; 254 bio->bi_idx = 0; 255 } 256 /* Make sure vcnt and last bv are not too big */ 257 bio_for_each_segment(bvec, bio, i) { 258 if (sofar + bvec->bv_len > size) 259 bvec->bv_len = size - sofar; 260 if (bvec->bv_len == 0) { 261 bio->bi_vcnt = i; 262 break; 263 } 264 sofar += bvec->bv_len; 265 } 266 } 267 EXPORT_SYMBOL_GPL(md_trim_bio); 268 269 /* 270 * We have a system wide 'event count' that is incremented 271 * on any 'interesting' event, and readers of /proc/mdstat 272 * can use 'poll' or 'select' to find out when the event 273 * count increases. 274 * 275 * Events are: 276 * start array, stop array, error, add device, remove device, 277 * start build, activate spare 278 */ 279 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters); 280 static atomic_t md_event_count; 281 void md_new_event(struct mddev *mddev) 282 { 283 atomic_inc(&md_event_count); 284 wake_up(&md_event_waiters); 285 } 286 EXPORT_SYMBOL_GPL(md_new_event); 287 288 /* Alternate version that can be called from interrupts 289 * when calling sysfs_notify isn't needed. 290 */ 291 static void md_new_event_inintr(struct mddev *mddev) 292 { 293 atomic_inc(&md_event_count); 294 wake_up(&md_event_waiters); 295 } 296 297 /* 298 * Enables to iterate over all existing md arrays 299 * all_mddevs_lock protects this list. 300 */ 301 static LIST_HEAD(all_mddevs); 302 static DEFINE_SPINLOCK(all_mddevs_lock); 303 304 305 /* 306 * iterates through all used mddevs in the system. 307 * We take care to grab the all_mddevs_lock whenever navigating 308 * the list, and to always hold a refcount when unlocked. 309 * Any code which breaks out of this loop while own 310 * a reference to the current mddev and must mddev_put it. 311 */ 312 #define for_each_mddev(_mddev,_tmp) \ 313 \ 314 for (({ spin_lock(&all_mddevs_lock); \ 315 _tmp = all_mddevs.next; \ 316 _mddev = NULL;}); \ 317 ({ if (_tmp != &all_mddevs) \ 318 mddev_get(list_entry(_tmp, struct mddev, all_mddevs));\ 319 spin_unlock(&all_mddevs_lock); \ 320 if (_mddev) mddev_put(_mddev); \ 321 _mddev = list_entry(_tmp, struct mddev, all_mddevs); \ 322 _tmp != &all_mddevs;}); \ 323 ({ spin_lock(&all_mddevs_lock); \ 324 _tmp = _tmp->next;}) \ 325 ) 326 327 328 /* Rather than calling directly into the personality make_request function, 329 * IO requests come here first so that we can check if the device is 330 * being suspended pending a reconfiguration. 331 * We hold a refcount over the call to ->make_request. By the time that 332 * call has finished, the bio has been linked into some internal structure 333 * and so is visible to ->quiesce(), so we don't need the refcount any more. 334 */ 335 static void md_make_request(struct request_queue *q, struct bio *bio) 336 { 337 const int rw = bio_data_dir(bio); 338 struct mddev *mddev = q->queuedata; 339 int cpu; 340 unsigned int sectors; 341 342 if (mddev == NULL || mddev->pers == NULL 343 || !mddev->ready) { 344 bio_io_error(bio); 345 return; 346 } 347 smp_rmb(); /* Ensure implications of 'active' are visible */ 348 rcu_read_lock(); 349 if (mddev->suspended) { 350 DEFINE_WAIT(__wait); 351 for (;;) { 352 prepare_to_wait(&mddev->sb_wait, &__wait, 353 TASK_UNINTERRUPTIBLE); 354 if (!mddev->suspended) 355 break; 356 rcu_read_unlock(); 357 schedule(); 358 rcu_read_lock(); 359 } 360 finish_wait(&mddev->sb_wait, &__wait); 361 } 362 atomic_inc(&mddev->active_io); 363 rcu_read_unlock(); 364 365 /* 366 * save the sectors now since our bio can 367 * go away inside make_request 368 */ 369 sectors = bio_sectors(bio); 370 mddev->pers->make_request(mddev, bio); 371 372 cpu = part_stat_lock(); 373 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); 374 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors); 375 part_stat_unlock(); 376 377 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) 378 wake_up(&mddev->sb_wait); 379 } 380 381 /* mddev_suspend makes sure no new requests are submitted 382 * to the device, and that any requests that have been submitted 383 * are completely handled. 384 * Once ->stop is called and completes, the module will be completely 385 * unused. 386 */ 387 void mddev_suspend(struct mddev *mddev) 388 { 389 BUG_ON(mddev->suspended); 390 mddev->suspended = 1; 391 synchronize_rcu(); 392 wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0); 393 mddev->pers->quiesce(mddev, 1); 394 } 395 EXPORT_SYMBOL_GPL(mddev_suspend); 396 397 void mddev_resume(struct mddev *mddev) 398 { 399 mddev->suspended = 0; 400 wake_up(&mddev->sb_wait); 401 mddev->pers->quiesce(mddev, 0); 402 403 md_wakeup_thread(mddev->thread); 404 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ 405 } 406 EXPORT_SYMBOL_GPL(mddev_resume); 407 408 int mddev_congested(struct mddev *mddev, int bits) 409 { 410 return mddev->suspended; 411 } 412 EXPORT_SYMBOL(mddev_congested); 413 414 /* 415 * Generic flush handling for md 416 */ 417 418 static void md_end_flush(struct bio *bio, int err) 419 { 420 struct md_rdev *rdev = bio->bi_private; 421 struct mddev *mddev = rdev->mddev; 422 423 rdev_dec_pending(rdev, mddev); 424 425 if (atomic_dec_and_test(&mddev->flush_pending)) { 426 /* The pre-request flush has finished */ 427 queue_work(md_wq, &mddev->flush_work); 428 } 429 bio_put(bio); 430 } 431 432 static void md_submit_flush_data(struct work_struct *ws); 433 434 static void submit_flushes(struct work_struct *ws) 435 { 436 struct mddev *mddev = container_of(ws, struct mddev, flush_work); 437 struct md_rdev *rdev; 438 439 INIT_WORK(&mddev->flush_work, md_submit_flush_data); 440 atomic_set(&mddev->flush_pending, 1); 441 rcu_read_lock(); 442 list_for_each_entry_rcu(rdev, &mddev->disks, same_set) 443 if (rdev->raid_disk >= 0 && 444 !test_bit(Faulty, &rdev->flags)) { 445 /* Take two references, one is dropped 446 * when request finishes, one after 447 * we reclaim rcu_read_lock 448 */ 449 struct bio *bi; 450 atomic_inc(&rdev->nr_pending); 451 atomic_inc(&rdev->nr_pending); 452 rcu_read_unlock(); 453 bi = bio_alloc_mddev(GFP_KERNEL, 0, mddev); 454 bi->bi_end_io = md_end_flush; 455 bi->bi_private = rdev; 456 bi->bi_bdev = rdev->bdev; 457 atomic_inc(&mddev->flush_pending); 458 submit_bio(WRITE_FLUSH, bi); 459 rcu_read_lock(); 460 rdev_dec_pending(rdev, mddev); 461 } 462 rcu_read_unlock(); 463 if (atomic_dec_and_test(&mddev->flush_pending)) 464 queue_work(md_wq, &mddev->flush_work); 465 } 466 467 static void md_submit_flush_data(struct work_struct *ws) 468 { 469 struct mddev *mddev = container_of(ws, struct mddev, flush_work); 470 struct bio *bio = mddev->flush_bio; 471 472 if (bio->bi_size == 0) 473 /* an empty barrier - all done */ 474 bio_endio(bio, 0); 475 else { 476 bio->bi_rw &= ~REQ_FLUSH; 477 mddev->pers->make_request(mddev, bio); 478 } 479 480 mddev->flush_bio = NULL; 481 wake_up(&mddev->sb_wait); 482 } 483 484 void md_flush_request(struct mddev *mddev, struct bio *bio) 485 { 486 spin_lock_irq(&mddev->write_lock); 487 wait_event_lock_irq(mddev->sb_wait, 488 !mddev->flush_bio, 489 mddev->write_lock, /*nothing*/); 490 mddev->flush_bio = bio; 491 spin_unlock_irq(&mddev->write_lock); 492 493 INIT_WORK(&mddev->flush_work, submit_flushes); 494 queue_work(md_wq, &mddev->flush_work); 495 } 496 EXPORT_SYMBOL(md_flush_request); 497 498 /* Support for plugging. 499 * This mirrors the plugging support in request_queue, but does not 500 * require having a whole queue or request structures. 501 * We allocate an md_plug_cb for each md device and each thread it gets 502 * plugged on. This links tot the private plug_handle structure in the 503 * personality data where we keep a count of the number of outstanding 504 * plugs so other code can see if a plug is active. 505 */ 506 struct md_plug_cb { 507 struct blk_plug_cb cb; 508 struct mddev *mddev; 509 }; 510 511 static void plugger_unplug(struct blk_plug_cb *cb) 512 { 513 struct md_plug_cb *mdcb = container_of(cb, struct md_plug_cb, cb); 514 if (atomic_dec_and_test(&mdcb->mddev->plug_cnt)) 515 md_wakeup_thread(mdcb->mddev->thread); 516 kfree(mdcb); 517 } 518 519 /* Check that an unplug wakeup will come shortly. 520 * If not, wakeup the md thread immediately 521 */ 522 int mddev_check_plugged(struct mddev *mddev) 523 { 524 struct blk_plug *plug = current->plug; 525 struct md_plug_cb *mdcb; 526 527 if (!plug) 528 return 0; 529 530 list_for_each_entry(mdcb, &plug->cb_list, cb.list) { 531 if (mdcb->cb.callback == plugger_unplug && 532 mdcb->mddev == mddev) { 533 /* Already on the list, move to top */ 534 if (mdcb != list_first_entry(&plug->cb_list, 535 struct md_plug_cb, 536 cb.list)) 537 list_move(&mdcb->cb.list, &plug->cb_list); 538 return 1; 539 } 540 } 541 /* Not currently on the callback list */ 542 mdcb = kmalloc(sizeof(*mdcb), GFP_ATOMIC); 543 if (!mdcb) 544 return 0; 545 546 mdcb->mddev = mddev; 547 mdcb->cb.callback = plugger_unplug; 548 atomic_inc(&mddev->plug_cnt); 549 list_add(&mdcb->cb.list, &plug->cb_list); 550 return 1; 551 } 552 EXPORT_SYMBOL_GPL(mddev_check_plugged); 553 554 static inline struct mddev *mddev_get(struct mddev *mddev) 555 { 556 atomic_inc(&mddev->active); 557 return mddev; 558 } 559 560 static void mddev_delayed_delete(struct work_struct *ws); 561 562 static void mddev_put(struct mddev *mddev) 563 { 564 struct bio_set *bs = NULL; 565 566 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock)) 567 return; 568 if (!mddev->raid_disks && list_empty(&mddev->disks) && 569 mddev->ctime == 0 && !mddev->hold_active) { 570 /* Array is not configured at all, and not held active, 571 * so destroy it */ 572 list_del_init(&mddev->all_mddevs); 573 bs = mddev->bio_set; 574 mddev->bio_set = NULL; 575 if (mddev->gendisk) { 576 /* We did a probe so need to clean up. Call 577 * queue_work inside the spinlock so that 578 * flush_workqueue() after mddev_find will 579 * succeed in waiting for the work to be done. 580 */ 581 INIT_WORK(&mddev->del_work, mddev_delayed_delete); 582 queue_work(md_misc_wq, &mddev->del_work); 583 } else 584 kfree(mddev); 585 } 586 spin_unlock(&all_mddevs_lock); 587 if (bs) 588 bioset_free(bs); 589 } 590 591 void mddev_init(struct mddev *mddev) 592 { 593 mutex_init(&mddev->open_mutex); 594 mutex_init(&mddev->reconfig_mutex); 595 mutex_init(&mddev->bitmap_info.mutex); 596 INIT_LIST_HEAD(&mddev->disks); 597 INIT_LIST_HEAD(&mddev->all_mddevs); 598 init_timer(&mddev->safemode_timer); 599 atomic_set(&mddev->active, 1); 600 atomic_set(&mddev->openers, 0); 601 atomic_set(&mddev->active_io, 0); 602 atomic_set(&mddev->plug_cnt, 0); 603 spin_lock_init(&mddev->write_lock); 604 atomic_set(&mddev->flush_pending, 0); 605 init_waitqueue_head(&mddev->sb_wait); 606 init_waitqueue_head(&mddev->recovery_wait); 607 mddev->reshape_position = MaxSector; 608 mddev->resync_min = 0; 609 mddev->resync_max = MaxSector; 610 mddev->level = LEVEL_NONE; 611 } 612 EXPORT_SYMBOL_GPL(mddev_init); 613 614 static struct mddev * mddev_find(dev_t unit) 615 { 616 struct mddev *mddev, *new = NULL; 617 618 if (unit && MAJOR(unit) != MD_MAJOR) 619 unit &= ~((1<<MdpMinorShift)-1); 620 621 retry: 622 spin_lock(&all_mddevs_lock); 623 624 if (unit) { 625 list_for_each_entry(mddev, &all_mddevs, all_mddevs) 626 if (mddev->unit == unit) { 627 mddev_get(mddev); 628 spin_unlock(&all_mddevs_lock); 629 kfree(new); 630 return mddev; 631 } 632 633 if (new) { 634 list_add(&new->all_mddevs, &all_mddevs); 635 spin_unlock(&all_mddevs_lock); 636 new->hold_active = UNTIL_IOCTL; 637 return new; 638 } 639 } else if (new) { 640 /* find an unused unit number */ 641 static int next_minor = 512; 642 int start = next_minor; 643 int is_free = 0; 644 int dev = 0; 645 while (!is_free) { 646 dev = MKDEV(MD_MAJOR, next_minor); 647 next_minor++; 648 if (next_minor > MINORMASK) 649 next_minor = 0; 650 if (next_minor == start) { 651 /* Oh dear, all in use. */ 652 spin_unlock(&all_mddevs_lock); 653 kfree(new); 654 return NULL; 655 } 656 657 is_free = 1; 658 list_for_each_entry(mddev, &all_mddevs, all_mddevs) 659 if (mddev->unit == dev) { 660 is_free = 0; 661 break; 662 } 663 } 664 new->unit = dev; 665 new->md_minor = MINOR(dev); 666 new->hold_active = UNTIL_STOP; 667 list_add(&new->all_mddevs, &all_mddevs); 668 spin_unlock(&all_mddevs_lock); 669 return new; 670 } 671 spin_unlock(&all_mddevs_lock); 672 673 new = kzalloc(sizeof(*new), GFP_KERNEL); 674 if (!new) 675 return NULL; 676 677 new->unit = unit; 678 if (MAJOR(unit) == MD_MAJOR) 679 new->md_minor = MINOR(unit); 680 else 681 new->md_minor = MINOR(unit) >> MdpMinorShift; 682 683 mddev_init(new); 684 685 goto retry; 686 } 687 688 static inline int mddev_lock(struct mddev * mddev) 689 { 690 return mutex_lock_interruptible(&mddev->reconfig_mutex); 691 } 692 693 static inline int mddev_is_locked(struct mddev *mddev) 694 { 695 return mutex_is_locked(&mddev->reconfig_mutex); 696 } 697 698 static inline int mddev_trylock(struct mddev * mddev) 699 { 700 return mutex_trylock(&mddev->reconfig_mutex); 701 } 702 703 static struct attribute_group md_redundancy_group; 704 705 static void mddev_unlock(struct mddev * mddev) 706 { 707 if (mddev->to_remove) { 708 /* These cannot be removed under reconfig_mutex as 709 * an access to the files will try to take reconfig_mutex 710 * while holding the file unremovable, which leads to 711 * a deadlock. 712 * So hold set sysfs_active while the remove in happeing, 713 * and anything else which might set ->to_remove or my 714 * otherwise change the sysfs namespace will fail with 715 * -EBUSY if sysfs_active is still set. 716 * We set sysfs_active under reconfig_mutex and elsewhere 717 * test it under the same mutex to ensure its correct value 718 * is seen. 719 */ 720 struct attribute_group *to_remove = mddev->to_remove; 721 mddev->to_remove = NULL; 722 mddev->sysfs_active = 1; 723 mutex_unlock(&mddev->reconfig_mutex); 724 725 if (mddev->kobj.sd) { 726 if (to_remove != &md_redundancy_group) 727 sysfs_remove_group(&mddev->kobj, to_remove); 728 if (mddev->pers == NULL || 729 mddev->pers->sync_request == NULL) { 730 sysfs_remove_group(&mddev->kobj, &md_redundancy_group); 731 if (mddev->sysfs_action) 732 sysfs_put(mddev->sysfs_action); 733 mddev->sysfs_action = NULL; 734 } 735 } 736 mddev->sysfs_active = 0; 737 } else 738 mutex_unlock(&mddev->reconfig_mutex); 739 740 /* As we've dropped the mutex we need a spinlock to 741 * make sure the thread doesn't disappear 742 */ 743 spin_lock(&pers_lock); 744 md_wakeup_thread(mddev->thread); 745 spin_unlock(&pers_lock); 746 } 747 748 static struct md_rdev * find_rdev_nr(struct mddev *mddev, int nr) 749 { 750 struct md_rdev *rdev; 751 752 list_for_each_entry(rdev, &mddev->disks, same_set) 753 if (rdev->desc_nr == nr) 754 return rdev; 755 756 return NULL; 757 } 758 759 static struct md_rdev * find_rdev(struct mddev * mddev, dev_t dev) 760 { 761 struct md_rdev *rdev; 762 763 list_for_each_entry(rdev, &mddev->disks, same_set) 764 if (rdev->bdev->bd_dev == dev) 765 return rdev; 766 767 return NULL; 768 } 769 770 static struct md_personality *find_pers(int level, char *clevel) 771 { 772 struct md_personality *pers; 773 list_for_each_entry(pers, &pers_list, list) { 774 if (level != LEVEL_NONE && pers->level == level) 775 return pers; 776 if (strcmp(pers->name, clevel)==0) 777 return pers; 778 } 779 return NULL; 780 } 781 782 /* return the offset of the super block in 512byte sectors */ 783 static inline sector_t calc_dev_sboffset(struct md_rdev *rdev) 784 { 785 sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512; 786 return MD_NEW_SIZE_SECTORS(num_sectors); 787 } 788 789 static int alloc_disk_sb(struct md_rdev * rdev) 790 { 791 if (rdev->sb_page) 792 MD_BUG(); 793 794 rdev->sb_page = alloc_page(GFP_KERNEL); 795 if (!rdev->sb_page) { 796 printk(KERN_ALERT "md: out of memory.\n"); 797 return -ENOMEM; 798 } 799 800 return 0; 801 } 802 803 static void free_disk_sb(struct md_rdev * rdev) 804 { 805 if (rdev->sb_page) { 806 put_page(rdev->sb_page); 807 rdev->sb_loaded = 0; 808 rdev->sb_page = NULL; 809 rdev->sb_start = 0; 810 rdev->sectors = 0; 811 } 812 if (rdev->bb_page) { 813 put_page(rdev->bb_page); 814 rdev->bb_page = NULL; 815 } 816 } 817 818 819 static void super_written(struct bio *bio, int error) 820 { 821 struct md_rdev *rdev = bio->bi_private; 822 struct mddev *mddev = rdev->mddev; 823 824 if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) { 825 printk("md: super_written gets error=%d, uptodate=%d\n", 826 error, test_bit(BIO_UPTODATE, &bio->bi_flags)); 827 WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags)); 828 md_error(mddev, rdev); 829 } 830 831 if (atomic_dec_and_test(&mddev->pending_writes)) 832 wake_up(&mddev->sb_wait); 833 bio_put(bio); 834 } 835 836 void md_super_write(struct mddev *mddev, struct md_rdev *rdev, 837 sector_t sector, int size, struct page *page) 838 { 839 /* write first size bytes of page to sector of rdev 840 * Increment mddev->pending_writes before returning 841 * and decrement it on completion, waking up sb_wait 842 * if zero is reached. 843 * If an error occurred, call md_error 844 */ 845 struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev); 846 847 bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev; 848 bio->bi_sector = sector; 849 bio_add_page(bio, page, size, 0); 850 bio->bi_private = rdev; 851 bio->bi_end_io = super_written; 852 853 atomic_inc(&mddev->pending_writes); 854 submit_bio(WRITE_FLUSH_FUA, bio); 855 } 856 857 void md_super_wait(struct mddev *mddev) 858 { 859 /* wait for all superblock writes that were scheduled to complete */ 860 DEFINE_WAIT(wq); 861 for(;;) { 862 prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE); 863 if (atomic_read(&mddev->pending_writes)==0) 864 break; 865 schedule(); 866 } 867 finish_wait(&mddev->sb_wait, &wq); 868 } 869 870 static void bi_complete(struct bio *bio, int error) 871 { 872 complete((struct completion*)bio->bi_private); 873 } 874 875 int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, 876 struct page *page, int rw, bool metadata_op) 877 { 878 struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev); 879 struct completion event; 880 int ret; 881 882 rw |= REQ_SYNC; 883 884 bio->bi_bdev = (metadata_op && rdev->meta_bdev) ? 885 rdev->meta_bdev : rdev->bdev; 886 if (metadata_op) 887 bio->bi_sector = sector + rdev->sb_start; 888 else 889 bio->bi_sector = sector + rdev->data_offset; 890 bio_add_page(bio, page, size, 0); 891 init_completion(&event); 892 bio->bi_private = &event; 893 bio->bi_end_io = bi_complete; 894 submit_bio(rw, bio); 895 wait_for_completion(&event); 896 897 ret = test_bit(BIO_UPTODATE, &bio->bi_flags); 898 bio_put(bio); 899 return ret; 900 } 901 EXPORT_SYMBOL_GPL(sync_page_io); 902 903 static int read_disk_sb(struct md_rdev * rdev, int size) 904 { 905 char b[BDEVNAME_SIZE]; 906 if (!rdev->sb_page) { 907 MD_BUG(); 908 return -EINVAL; 909 } 910 if (rdev->sb_loaded) 911 return 0; 912 913 914 if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, true)) 915 goto fail; 916 rdev->sb_loaded = 1; 917 return 0; 918 919 fail: 920 printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n", 921 bdevname(rdev->bdev,b)); 922 return -EINVAL; 923 } 924 925 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2) 926 { 927 return sb1->set_uuid0 == sb2->set_uuid0 && 928 sb1->set_uuid1 == sb2->set_uuid1 && 929 sb1->set_uuid2 == sb2->set_uuid2 && 930 sb1->set_uuid3 == sb2->set_uuid3; 931 } 932 933 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2) 934 { 935 int ret; 936 mdp_super_t *tmp1, *tmp2; 937 938 tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL); 939 tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL); 940 941 if (!tmp1 || !tmp2) { 942 ret = 0; 943 printk(KERN_INFO "md.c sb_equal(): failed to allocate memory!\n"); 944 goto abort; 945 } 946 947 *tmp1 = *sb1; 948 *tmp2 = *sb2; 949 950 /* 951 * nr_disks is not constant 952 */ 953 tmp1->nr_disks = 0; 954 tmp2->nr_disks = 0; 955 956 ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0); 957 abort: 958 kfree(tmp1); 959 kfree(tmp2); 960 return ret; 961 } 962 963 964 static u32 md_csum_fold(u32 csum) 965 { 966 csum = (csum & 0xffff) + (csum >> 16); 967 return (csum & 0xffff) + (csum >> 16); 968 } 969 970 static unsigned int calc_sb_csum(mdp_super_t * sb) 971 { 972 u64 newcsum = 0; 973 u32 *sb32 = (u32*)sb; 974 int i; 975 unsigned int disk_csum, csum; 976 977 disk_csum = sb->sb_csum; 978 sb->sb_csum = 0; 979 980 for (i = 0; i < MD_SB_BYTES/4 ; i++) 981 newcsum += sb32[i]; 982 csum = (newcsum & 0xffffffff) + (newcsum>>32); 983 984 985 #ifdef CONFIG_ALPHA 986 /* This used to use csum_partial, which was wrong for several 987 * reasons including that different results are returned on 988 * different architectures. It isn't critical that we get exactly 989 * the same return value as before (we always csum_fold before 990 * testing, and that removes any differences). However as we 991 * know that csum_partial always returned a 16bit value on 992 * alphas, do a fold to maximise conformity to previous behaviour. 993 */ 994 sb->sb_csum = md_csum_fold(disk_csum); 995 #else 996 sb->sb_csum = disk_csum; 997 #endif 998 return csum; 999 } 1000 1001 1002 /* 1003 * Handle superblock details. 1004 * We want to be able to handle multiple superblock formats 1005 * so we have a common interface to them all, and an array of 1006 * different handlers. 1007 * We rely on user-space to write the initial superblock, and support 1008 * reading and updating of superblocks. 1009 * Interface methods are: 1010 * int load_super(struct md_rdev *dev, struct md_rdev *refdev, int minor_version) 1011 * loads and validates a superblock on dev. 1012 * if refdev != NULL, compare superblocks on both devices 1013 * Return: 1014 * 0 - dev has a superblock that is compatible with refdev 1015 * 1 - dev has a superblock that is compatible and newer than refdev 1016 * so dev should be used as the refdev in future 1017 * -EINVAL superblock incompatible or invalid 1018 * -othererror e.g. -EIO 1019 * 1020 * int validate_super(struct mddev *mddev, struct md_rdev *dev) 1021 * Verify that dev is acceptable into mddev. 1022 * The first time, mddev->raid_disks will be 0, and data from 1023 * dev should be merged in. Subsequent calls check that dev 1024 * is new enough. Return 0 or -EINVAL 1025 * 1026 * void sync_super(struct mddev *mddev, struct md_rdev *dev) 1027 * Update the superblock for rdev with data in mddev 1028 * This does not write to disc. 1029 * 1030 */ 1031 1032 struct super_type { 1033 char *name; 1034 struct module *owner; 1035 int (*load_super)(struct md_rdev *rdev, struct md_rdev *refdev, 1036 int minor_version); 1037 int (*validate_super)(struct mddev *mddev, struct md_rdev *rdev); 1038 void (*sync_super)(struct mddev *mddev, struct md_rdev *rdev); 1039 unsigned long long (*rdev_size_change)(struct md_rdev *rdev, 1040 sector_t num_sectors); 1041 }; 1042 1043 /* 1044 * Check that the given mddev has no bitmap. 1045 * 1046 * This function is called from the run method of all personalities that do not 1047 * support bitmaps. It prints an error message and returns non-zero if mddev 1048 * has a bitmap. Otherwise, it returns 0. 1049 * 1050 */ 1051 int md_check_no_bitmap(struct mddev *mddev) 1052 { 1053 if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset) 1054 return 0; 1055 printk(KERN_ERR "%s: bitmaps are not supported for %s\n", 1056 mdname(mddev), mddev->pers->name); 1057 return 1; 1058 } 1059 EXPORT_SYMBOL(md_check_no_bitmap); 1060 1061 /* 1062 * load_super for 0.90.0 1063 */ 1064 static int super_90_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version) 1065 { 1066 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 1067 mdp_super_t *sb; 1068 int ret; 1069 1070 /* 1071 * Calculate the position of the superblock (512byte sectors), 1072 * it's at the end of the disk. 1073 * 1074 * It also happens to be a multiple of 4Kb. 1075 */ 1076 rdev->sb_start = calc_dev_sboffset(rdev); 1077 1078 ret = read_disk_sb(rdev, MD_SB_BYTES); 1079 if (ret) return ret; 1080 1081 ret = -EINVAL; 1082 1083 bdevname(rdev->bdev, b); 1084 sb = page_address(rdev->sb_page); 1085 1086 if (sb->md_magic != MD_SB_MAGIC) { 1087 printk(KERN_ERR "md: invalid raid superblock magic on %s\n", 1088 b); 1089 goto abort; 1090 } 1091 1092 if (sb->major_version != 0 || 1093 sb->minor_version < 90 || 1094 sb->minor_version > 91) { 1095 printk(KERN_WARNING "Bad version number %d.%d on %s\n", 1096 sb->major_version, sb->minor_version, 1097 b); 1098 goto abort; 1099 } 1100 1101 if (sb->raid_disks <= 0) 1102 goto abort; 1103 1104 if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) { 1105 printk(KERN_WARNING "md: invalid superblock checksum on %s\n", 1106 b); 1107 goto abort; 1108 } 1109 1110 rdev->preferred_minor = sb->md_minor; 1111 rdev->data_offset = 0; 1112 rdev->sb_size = MD_SB_BYTES; 1113 rdev->badblocks.shift = -1; 1114 1115 if (sb->level == LEVEL_MULTIPATH) 1116 rdev->desc_nr = -1; 1117 else 1118 rdev->desc_nr = sb->this_disk.number; 1119 1120 if (!refdev) { 1121 ret = 1; 1122 } else { 1123 __u64 ev1, ev2; 1124 mdp_super_t *refsb = page_address(refdev->sb_page); 1125 if (!uuid_equal(refsb, sb)) { 1126 printk(KERN_WARNING "md: %s has different UUID to %s\n", 1127 b, bdevname(refdev->bdev,b2)); 1128 goto abort; 1129 } 1130 if (!sb_equal(refsb, sb)) { 1131 printk(KERN_WARNING "md: %s has same UUID" 1132 " but different superblock to %s\n", 1133 b, bdevname(refdev->bdev, b2)); 1134 goto abort; 1135 } 1136 ev1 = md_event(sb); 1137 ev2 = md_event(refsb); 1138 if (ev1 > ev2) 1139 ret = 1; 1140 else 1141 ret = 0; 1142 } 1143 rdev->sectors = rdev->sb_start; 1144 /* Limit to 4TB as metadata cannot record more than that */ 1145 if (rdev->sectors >= (2ULL << 32)) 1146 rdev->sectors = (2ULL << 32) - 2; 1147 1148 if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1) 1149 /* "this cannot possibly happen" ... */ 1150 ret = -EINVAL; 1151 1152 abort: 1153 return ret; 1154 } 1155 1156 /* 1157 * validate_super for 0.90.0 1158 */ 1159 static int super_90_validate(struct mddev *mddev, struct md_rdev *rdev) 1160 { 1161 mdp_disk_t *desc; 1162 mdp_super_t *sb = page_address(rdev->sb_page); 1163 __u64 ev1 = md_event(sb); 1164 1165 rdev->raid_disk = -1; 1166 clear_bit(Faulty, &rdev->flags); 1167 clear_bit(In_sync, &rdev->flags); 1168 clear_bit(WriteMostly, &rdev->flags); 1169 1170 if (mddev->raid_disks == 0) { 1171 mddev->major_version = 0; 1172 mddev->minor_version = sb->minor_version; 1173 mddev->patch_version = sb->patch_version; 1174 mddev->external = 0; 1175 mddev->chunk_sectors = sb->chunk_size >> 9; 1176 mddev->ctime = sb->ctime; 1177 mddev->utime = sb->utime; 1178 mddev->level = sb->level; 1179 mddev->clevel[0] = 0; 1180 mddev->layout = sb->layout; 1181 mddev->raid_disks = sb->raid_disks; 1182 mddev->dev_sectors = ((sector_t)sb->size) * 2; 1183 mddev->events = ev1; 1184 mddev->bitmap_info.offset = 0; 1185 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; 1186 1187 if (mddev->minor_version >= 91) { 1188 mddev->reshape_position = sb->reshape_position; 1189 mddev->delta_disks = sb->delta_disks; 1190 mddev->new_level = sb->new_level; 1191 mddev->new_layout = sb->new_layout; 1192 mddev->new_chunk_sectors = sb->new_chunk >> 9; 1193 } else { 1194 mddev->reshape_position = MaxSector; 1195 mddev->delta_disks = 0; 1196 mddev->new_level = mddev->level; 1197 mddev->new_layout = mddev->layout; 1198 mddev->new_chunk_sectors = mddev->chunk_sectors; 1199 } 1200 1201 if (sb->state & (1<<MD_SB_CLEAN)) 1202 mddev->recovery_cp = MaxSector; 1203 else { 1204 if (sb->events_hi == sb->cp_events_hi && 1205 sb->events_lo == sb->cp_events_lo) { 1206 mddev->recovery_cp = sb->recovery_cp; 1207 } else 1208 mddev->recovery_cp = 0; 1209 } 1210 1211 memcpy(mddev->uuid+0, &sb->set_uuid0, 4); 1212 memcpy(mddev->uuid+4, &sb->set_uuid1, 4); 1213 memcpy(mddev->uuid+8, &sb->set_uuid2, 4); 1214 memcpy(mddev->uuid+12,&sb->set_uuid3, 4); 1215 1216 mddev->max_disks = MD_SB_DISKS; 1217 1218 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) && 1219 mddev->bitmap_info.file == NULL) 1220 mddev->bitmap_info.offset = 1221 mddev->bitmap_info.default_offset; 1222 1223 } else if (mddev->pers == NULL) { 1224 /* Insist on good event counter while assembling, except 1225 * for spares (which don't need an event count) */ 1226 ++ev1; 1227 if (sb->disks[rdev->desc_nr].state & ( 1228 (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE))) 1229 if (ev1 < mddev->events) 1230 return -EINVAL; 1231 } else if (mddev->bitmap) { 1232 /* if adding to array with a bitmap, then we can accept an 1233 * older device ... but not too old. 1234 */ 1235 if (ev1 < mddev->bitmap->events_cleared) 1236 return 0; 1237 } else { 1238 if (ev1 < mddev->events) 1239 /* just a hot-add of a new device, leave raid_disk at -1 */ 1240 return 0; 1241 } 1242 1243 if (mddev->level != LEVEL_MULTIPATH) { 1244 desc = sb->disks + rdev->desc_nr; 1245 1246 if (desc->state & (1<<MD_DISK_FAULTY)) 1247 set_bit(Faulty, &rdev->flags); 1248 else if (desc->state & (1<<MD_DISK_SYNC) /* && 1249 desc->raid_disk < mddev->raid_disks */) { 1250 set_bit(In_sync, &rdev->flags); 1251 rdev->raid_disk = desc->raid_disk; 1252 } else if (desc->state & (1<<MD_DISK_ACTIVE)) { 1253 /* active but not in sync implies recovery up to 1254 * reshape position. We don't know exactly where 1255 * that is, so set to zero for now */ 1256 if (mddev->minor_version >= 91) { 1257 rdev->recovery_offset = 0; 1258 rdev->raid_disk = desc->raid_disk; 1259 } 1260 } 1261 if (desc->state & (1<<MD_DISK_WRITEMOSTLY)) 1262 set_bit(WriteMostly, &rdev->flags); 1263 } else /* MULTIPATH are always insync */ 1264 set_bit(In_sync, &rdev->flags); 1265 return 0; 1266 } 1267 1268 /* 1269 * sync_super for 0.90.0 1270 */ 1271 static void super_90_sync(struct mddev *mddev, struct md_rdev *rdev) 1272 { 1273 mdp_super_t *sb; 1274 struct md_rdev *rdev2; 1275 int next_spare = mddev->raid_disks; 1276 1277 1278 /* make rdev->sb match mddev data.. 1279 * 1280 * 1/ zero out disks 1281 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare); 1282 * 3/ any empty disks < next_spare become removed 1283 * 1284 * disks[0] gets initialised to REMOVED because 1285 * we cannot be sure from other fields if it has 1286 * been initialised or not. 1287 */ 1288 int i; 1289 int active=0, working=0,failed=0,spare=0,nr_disks=0; 1290 1291 rdev->sb_size = MD_SB_BYTES; 1292 1293 sb = page_address(rdev->sb_page); 1294 1295 memset(sb, 0, sizeof(*sb)); 1296 1297 sb->md_magic = MD_SB_MAGIC; 1298 sb->major_version = mddev->major_version; 1299 sb->patch_version = mddev->patch_version; 1300 sb->gvalid_words = 0; /* ignored */ 1301 memcpy(&sb->set_uuid0, mddev->uuid+0, 4); 1302 memcpy(&sb->set_uuid1, mddev->uuid+4, 4); 1303 memcpy(&sb->set_uuid2, mddev->uuid+8, 4); 1304 memcpy(&sb->set_uuid3, mddev->uuid+12,4); 1305 1306 sb->ctime = mddev->ctime; 1307 sb->level = mddev->level; 1308 sb->size = mddev->dev_sectors / 2; 1309 sb->raid_disks = mddev->raid_disks; 1310 sb->md_minor = mddev->md_minor; 1311 sb->not_persistent = 0; 1312 sb->utime = mddev->utime; 1313 sb->state = 0; 1314 sb->events_hi = (mddev->events>>32); 1315 sb->events_lo = (u32)mddev->events; 1316 1317 if (mddev->reshape_position == MaxSector) 1318 sb->minor_version = 90; 1319 else { 1320 sb->minor_version = 91; 1321 sb->reshape_position = mddev->reshape_position; 1322 sb->new_level = mddev->new_level; 1323 sb->delta_disks = mddev->delta_disks; 1324 sb->new_layout = mddev->new_layout; 1325 sb->new_chunk = mddev->new_chunk_sectors << 9; 1326 } 1327 mddev->minor_version = sb->minor_version; 1328 if (mddev->in_sync) 1329 { 1330 sb->recovery_cp = mddev->recovery_cp; 1331 sb->cp_events_hi = (mddev->events>>32); 1332 sb->cp_events_lo = (u32)mddev->events; 1333 if (mddev->recovery_cp == MaxSector) 1334 sb->state = (1<< MD_SB_CLEAN); 1335 } else 1336 sb->recovery_cp = 0; 1337 1338 sb->layout = mddev->layout; 1339 sb->chunk_size = mddev->chunk_sectors << 9; 1340 1341 if (mddev->bitmap && mddev->bitmap_info.file == NULL) 1342 sb->state |= (1<<MD_SB_BITMAP_PRESENT); 1343 1344 sb->disks[0].state = (1<<MD_DISK_REMOVED); 1345 list_for_each_entry(rdev2, &mddev->disks, same_set) { 1346 mdp_disk_t *d; 1347 int desc_nr; 1348 int is_active = test_bit(In_sync, &rdev2->flags); 1349 1350 if (rdev2->raid_disk >= 0 && 1351 sb->minor_version >= 91) 1352 /* we have nowhere to store the recovery_offset, 1353 * but if it is not below the reshape_position, 1354 * we can piggy-back on that. 1355 */ 1356 is_active = 1; 1357 if (rdev2->raid_disk < 0 || 1358 test_bit(Faulty, &rdev2->flags)) 1359 is_active = 0; 1360 if (is_active) 1361 desc_nr = rdev2->raid_disk; 1362 else 1363 desc_nr = next_spare++; 1364 rdev2->desc_nr = desc_nr; 1365 d = &sb->disks[rdev2->desc_nr]; 1366 nr_disks++; 1367 d->number = rdev2->desc_nr; 1368 d->major = MAJOR(rdev2->bdev->bd_dev); 1369 d->minor = MINOR(rdev2->bdev->bd_dev); 1370 if (is_active) 1371 d->raid_disk = rdev2->raid_disk; 1372 else 1373 d->raid_disk = rdev2->desc_nr; /* compatibility */ 1374 if (test_bit(Faulty, &rdev2->flags)) 1375 d->state = (1<<MD_DISK_FAULTY); 1376 else if (is_active) { 1377 d->state = (1<<MD_DISK_ACTIVE); 1378 if (test_bit(In_sync, &rdev2->flags)) 1379 d->state |= (1<<MD_DISK_SYNC); 1380 active++; 1381 working++; 1382 } else { 1383 d->state = 0; 1384 spare++; 1385 working++; 1386 } 1387 if (test_bit(WriteMostly, &rdev2->flags)) 1388 d->state |= (1<<MD_DISK_WRITEMOSTLY); 1389 } 1390 /* now set the "removed" and "faulty" bits on any missing devices */ 1391 for (i=0 ; i < mddev->raid_disks ; i++) { 1392 mdp_disk_t *d = &sb->disks[i]; 1393 if (d->state == 0 && d->number == 0) { 1394 d->number = i; 1395 d->raid_disk = i; 1396 d->state = (1<<MD_DISK_REMOVED); 1397 d->state |= (1<<MD_DISK_FAULTY); 1398 failed++; 1399 } 1400 } 1401 sb->nr_disks = nr_disks; 1402 sb->active_disks = active; 1403 sb->working_disks = working; 1404 sb->failed_disks = failed; 1405 sb->spare_disks = spare; 1406 1407 sb->this_disk = sb->disks[rdev->desc_nr]; 1408 sb->sb_csum = calc_sb_csum(sb); 1409 } 1410 1411 /* 1412 * rdev_size_change for 0.90.0 1413 */ 1414 static unsigned long long 1415 super_90_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) 1416 { 1417 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) 1418 return 0; /* component must fit device */ 1419 if (rdev->mddev->bitmap_info.offset) 1420 return 0; /* can't move bitmap */ 1421 rdev->sb_start = calc_dev_sboffset(rdev); 1422 if (!num_sectors || num_sectors > rdev->sb_start) 1423 num_sectors = rdev->sb_start; 1424 /* Limit to 4TB as metadata cannot record more than that. 1425 * 4TB == 2^32 KB, or 2*2^32 sectors. 1426 */ 1427 if (num_sectors >= (2ULL << 32)) 1428 num_sectors = (2ULL << 32) - 2; 1429 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, 1430 rdev->sb_page); 1431 md_super_wait(rdev->mddev); 1432 return num_sectors; 1433 } 1434 1435 1436 /* 1437 * version 1 superblock 1438 */ 1439 1440 static __le32 calc_sb_1_csum(struct mdp_superblock_1 * sb) 1441 { 1442 __le32 disk_csum; 1443 u32 csum; 1444 unsigned long long newcsum; 1445 int size = 256 + le32_to_cpu(sb->max_dev)*2; 1446 __le32 *isuper = (__le32*)sb; 1447 int i; 1448 1449 disk_csum = sb->sb_csum; 1450 sb->sb_csum = 0; 1451 newcsum = 0; 1452 for (i=0; size>=4; size -= 4 ) 1453 newcsum += le32_to_cpu(*isuper++); 1454 1455 if (size == 2) 1456 newcsum += le16_to_cpu(*(__le16*) isuper); 1457 1458 csum = (newcsum & 0xffffffff) + (newcsum >> 32); 1459 sb->sb_csum = disk_csum; 1460 return cpu_to_le32(csum); 1461 } 1462 1463 static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors, 1464 int acknowledged); 1465 static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_version) 1466 { 1467 struct mdp_superblock_1 *sb; 1468 int ret; 1469 sector_t sb_start; 1470 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 1471 int bmask; 1472 1473 /* 1474 * Calculate the position of the superblock in 512byte sectors. 1475 * It is always aligned to a 4K boundary and 1476 * depeding on minor_version, it can be: 1477 * 0: At least 8K, but less than 12K, from end of device 1478 * 1: At start of device 1479 * 2: 4K from start of device. 1480 */ 1481 switch(minor_version) { 1482 case 0: 1483 sb_start = i_size_read(rdev->bdev->bd_inode) >> 9; 1484 sb_start -= 8*2; 1485 sb_start &= ~(sector_t)(4*2-1); 1486 break; 1487 case 1: 1488 sb_start = 0; 1489 break; 1490 case 2: 1491 sb_start = 8; 1492 break; 1493 default: 1494 return -EINVAL; 1495 } 1496 rdev->sb_start = sb_start; 1497 1498 /* superblock is rarely larger than 1K, but it can be larger, 1499 * and it is safe to read 4k, so we do that 1500 */ 1501 ret = read_disk_sb(rdev, 4096); 1502 if (ret) return ret; 1503 1504 1505 sb = page_address(rdev->sb_page); 1506 1507 if (sb->magic != cpu_to_le32(MD_SB_MAGIC) || 1508 sb->major_version != cpu_to_le32(1) || 1509 le32_to_cpu(sb->max_dev) > (4096-256)/2 || 1510 le64_to_cpu(sb->super_offset) != rdev->sb_start || 1511 (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0) 1512 return -EINVAL; 1513 1514 if (calc_sb_1_csum(sb) != sb->sb_csum) { 1515 printk("md: invalid superblock checksum on %s\n", 1516 bdevname(rdev->bdev,b)); 1517 return -EINVAL; 1518 } 1519 if (le64_to_cpu(sb->data_size) < 10) { 1520 printk("md: data_size too small on %s\n", 1521 bdevname(rdev->bdev,b)); 1522 return -EINVAL; 1523 } 1524 1525 rdev->preferred_minor = 0xffff; 1526 rdev->data_offset = le64_to_cpu(sb->data_offset); 1527 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); 1528 1529 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; 1530 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; 1531 if (rdev->sb_size & bmask) 1532 rdev->sb_size = (rdev->sb_size | bmask) + 1; 1533 1534 if (minor_version 1535 && rdev->data_offset < sb_start + (rdev->sb_size/512)) 1536 return -EINVAL; 1537 1538 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH)) 1539 rdev->desc_nr = -1; 1540 else 1541 rdev->desc_nr = le32_to_cpu(sb->dev_number); 1542 1543 if (!rdev->bb_page) { 1544 rdev->bb_page = alloc_page(GFP_KERNEL); 1545 if (!rdev->bb_page) 1546 return -ENOMEM; 1547 } 1548 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) && 1549 rdev->badblocks.count == 0) { 1550 /* need to load the bad block list. 1551 * Currently we limit it to one page. 1552 */ 1553 s32 offset; 1554 sector_t bb_sector; 1555 u64 *bbp; 1556 int i; 1557 int sectors = le16_to_cpu(sb->bblog_size); 1558 if (sectors > (PAGE_SIZE / 512)) 1559 return -EINVAL; 1560 offset = le32_to_cpu(sb->bblog_offset); 1561 if (offset == 0) 1562 return -EINVAL; 1563 bb_sector = (long long)offset; 1564 if (!sync_page_io(rdev, bb_sector, sectors << 9, 1565 rdev->bb_page, READ, true)) 1566 return -EIO; 1567 bbp = (u64 *)page_address(rdev->bb_page); 1568 rdev->badblocks.shift = sb->bblog_shift; 1569 for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) { 1570 u64 bb = le64_to_cpu(*bbp); 1571 int count = bb & (0x3ff); 1572 u64 sector = bb >> 10; 1573 sector <<= sb->bblog_shift; 1574 count <<= sb->bblog_shift; 1575 if (bb + 1 == 0) 1576 break; 1577 if (md_set_badblocks(&rdev->badblocks, 1578 sector, count, 1) == 0) 1579 return -EINVAL; 1580 } 1581 } else if (sb->bblog_offset == 0) 1582 rdev->badblocks.shift = -1; 1583 1584 if (!refdev) { 1585 ret = 1; 1586 } else { 1587 __u64 ev1, ev2; 1588 struct mdp_superblock_1 *refsb = page_address(refdev->sb_page); 1589 1590 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 || 1591 sb->level != refsb->level || 1592 sb->layout != refsb->layout || 1593 sb->chunksize != refsb->chunksize) { 1594 printk(KERN_WARNING "md: %s has strangely different" 1595 " superblock to %s\n", 1596 bdevname(rdev->bdev,b), 1597 bdevname(refdev->bdev,b2)); 1598 return -EINVAL; 1599 } 1600 ev1 = le64_to_cpu(sb->events); 1601 ev2 = le64_to_cpu(refsb->events); 1602 1603 if (ev1 > ev2) 1604 ret = 1; 1605 else 1606 ret = 0; 1607 } 1608 if (minor_version) 1609 rdev->sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) - 1610 le64_to_cpu(sb->data_offset); 1611 else 1612 rdev->sectors = rdev->sb_start; 1613 if (rdev->sectors < le64_to_cpu(sb->data_size)) 1614 return -EINVAL; 1615 rdev->sectors = le64_to_cpu(sb->data_size); 1616 if (le64_to_cpu(sb->size) > rdev->sectors) 1617 return -EINVAL; 1618 return ret; 1619 } 1620 1621 static int super_1_validate(struct mddev *mddev, struct md_rdev *rdev) 1622 { 1623 struct mdp_superblock_1 *sb = page_address(rdev->sb_page); 1624 __u64 ev1 = le64_to_cpu(sb->events); 1625 1626 rdev->raid_disk = -1; 1627 clear_bit(Faulty, &rdev->flags); 1628 clear_bit(In_sync, &rdev->flags); 1629 clear_bit(WriteMostly, &rdev->flags); 1630 1631 if (mddev->raid_disks == 0) { 1632 mddev->major_version = 1; 1633 mddev->patch_version = 0; 1634 mddev->external = 0; 1635 mddev->chunk_sectors = le32_to_cpu(sb->chunksize); 1636 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1); 1637 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1); 1638 mddev->level = le32_to_cpu(sb->level); 1639 mddev->clevel[0] = 0; 1640 mddev->layout = le32_to_cpu(sb->layout); 1641 mddev->raid_disks = le32_to_cpu(sb->raid_disks); 1642 mddev->dev_sectors = le64_to_cpu(sb->size); 1643 mddev->events = ev1; 1644 mddev->bitmap_info.offset = 0; 1645 mddev->bitmap_info.default_offset = 1024 >> 9; 1646 1647 mddev->recovery_cp = le64_to_cpu(sb->resync_offset); 1648 memcpy(mddev->uuid, sb->set_uuid, 16); 1649 1650 mddev->max_disks = (4096-256)/2; 1651 1652 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) && 1653 mddev->bitmap_info.file == NULL ) 1654 mddev->bitmap_info.offset = 1655 (__s32)le32_to_cpu(sb->bitmap_offset); 1656 1657 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) { 1658 mddev->reshape_position = le64_to_cpu(sb->reshape_position); 1659 mddev->delta_disks = le32_to_cpu(sb->delta_disks); 1660 mddev->new_level = le32_to_cpu(sb->new_level); 1661 mddev->new_layout = le32_to_cpu(sb->new_layout); 1662 mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk); 1663 } else { 1664 mddev->reshape_position = MaxSector; 1665 mddev->delta_disks = 0; 1666 mddev->new_level = mddev->level; 1667 mddev->new_layout = mddev->layout; 1668 mddev->new_chunk_sectors = mddev->chunk_sectors; 1669 } 1670 1671 } else if (mddev->pers == NULL) { 1672 /* Insist of good event counter while assembling, except for 1673 * spares (which don't need an event count) */ 1674 ++ev1; 1675 if (rdev->desc_nr >= 0 && 1676 rdev->desc_nr < le32_to_cpu(sb->max_dev) && 1677 le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < 0xfffe) 1678 if (ev1 < mddev->events) 1679 return -EINVAL; 1680 } else if (mddev->bitmap) { 1681 /* If adding to array with a bitmap, then we can accept an 1682 * older device, but not too old. 1683 */ 1684 if (ev1 < mddev->bitmap->events_cleared) 1685 return 0; 1686 } else { 1687 if (ev1 < mddev->events) 1688 /* just a hot-add of a new device, leave raid_disk at -1 */ 1689 return 0; 1690 } 1691 if (mddev->level != LEVEL_MULTIPATH) { 1692 int role; 1693 if (rdev->desc_nr < 0 || 1694 rdev->desc_nr >= le32_to_cpu(sb->max_dev)) { 1695 role = 0xffff; 1696 rdev->desc_nr = -1; 1697 } else 1698 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]); 1699 switch(role) { 1700 case 0xffff: /* spare */ 1701 break; 1702 case 0xfffe: /* faulty */ 1703 set_bit(Faulty, &rdev->flags); 1704 break; 1705 default: 1706 if ((le32_to_cpu(sb->feature_map) & 1707 MD_FEATURE_RECOVERY_OFFSET)) 1708 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset); 1709 else 1710 set_bit(In_sync, &rdev->flags); 1711 rdev->raid_disk = role; 1712 break; 1713 } 1714 if (sb->devflags & WriteMostly1) 1715 set_bit(WriteMostly, &rdev->flags); 1716 if (le32_to_cpu(sb->feature_map) & MD_FEATURE_REPLACEMENT) 1717 set_bit(Replacement, &rdev->flags); 1718 } else /* MULTIPATH are always insync */ 1719 set_bit(In_sync, &rdev->flags); 1720 1721 return 0; 1722 } 1723 1724 static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev) 1725 { 1726 struct mdp_superblock_1 *sb; 1727 struct md_rdev *rdev2; 1728 int max_dev, i; 1729 /* make rdev->sb match mddev and rdev data. */ 1730 1731 sb = page_address(rdev->sb_page); 1732 1733 sb->feature_map = 0; 1734 sb->pad0 = 0; 1735 sb->recovery_offset = cpu_to_le64(0); 1736 memset(sb->pad1, 0, sizeof(sb->pad1)); 1737 memset(sb->pad3, 0, sizeof(sb->pad3)); 1738 1739 sb->utime = cpu_to_le64((__u64)mddev->utime); 1740 sb->events = cpu_to_le64(mddev->events); 1741 if (mddev->in_sync) 1742 sb->resync_offset = cpu_to_le64(mddev->recovery_cp); 1743 else 1744 sb->resync_offset = cpu_to_le64(0); 1745 1746 sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors)); 1747 1748 sb->raid_disks = cpu_to_le32(mddev->raid_disks); 1749 sb->size = cpu_to_le64(mddev->dev_sectors); 1750 sb->chunksize = cpu_to_le32(mddev->chunk_sectors); 1751 sb->level = cpu_to_le32(mddev->level); 1752 sb->layout = cpu_to_le32(mddev->layout); 1753 1754 if (test_bit(WriteMostly, &rdev->flags)) 1755 sb->devflags |= WriteMostly1; 1756 else 1757 sb->devflags &= ~WriteMostly1; 1758 1759 if (mddev->bitmap && mddev->bitmap_info.file == NULL) { 1760 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset); 1761 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET); 1762 } 1763 1764 if (rdev->raid_disk >= 0 && 1765 !test_bit(In_sync, &rdev->flags)) { 1766 sb->feature_map |= 1767 cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET); 1768 sb->recovery_offset = 1769 cpu_to_le64(rdev->recovery_offset); 1770 } 1771 if (test_bit(Replacement, &rdev->flags)) 1772 sb->feature_map |= 1773 cpu_to_le32(MD_FEATURE_REPLACEMENT); 1774 1775 if (mddev->reshape_position != MaxSector) { 1776 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE); 1777 sb->reshape_position = cpu_to_le64(mddev->reshape_position); 1778 sb->new_layout = cpu_to_le32(mddev->new_layout); 1779 sb->delta_disks = cpu_to_le32(mddev->delta_disks); 1780 sb->new_level = cpu_to_le32(mddev->new_level); 1781 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors); 1782 } 1783 1784 if (rdev->badblocks.count == 0) 1785 /* Nothing to do for bad blocks*/ ; 1786 else if (sb->bblog_offset == 0) 1787 /* Cannot record bad blocks on this device */ 1788 md_error(mddev, rdev); 1789 else { 1790 struct badblocks *bb = &rdev->badblocks; 1791 u64 *bbp = (u64 *)page_address(rdev->bb_page); 1792 u64 *p = bb->page; 1793 sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS); 1794 if (bb->changed) { 1795 unsigned seq; 1796 1797 retry: 1798 seq = read_seqbegin(&bb->lock); 1799 1800 memset(bbp, 0xff, PAGE_SIZE); 1801 1802 for (i = 0 ; i < bb->count ; i++) { 1803 u64 internal_bb = *p++; 1804 u64 store_bb = ((BB_OFFSET(internal_bb) << 10) 1805 | BB_LEN(internal_bb)); 1806 *bbp++ = cpu_to_le64(store_bb); 1807 } 1808 if (read_seqretry(&bb->lock, seq)) 1809 goto retry; 1810 1811 bb->sector = (rdev->sb_start + 1812 (int)le32_to_cpu(sb->bblog_offset)); 1813 bb->size = le16_to_cpu(sb->bblog_size); 1814 bb->changed = 0; 1815 } 1816 } 1817 1818 max_dev = 0; 1819 list_for_each_entry(rdev2, &mddev->disks, same_set) 1820 if (rdev2->desc_nr+1 > max_dev) 1821 max_dev = rdev2->desc_nr+1; 1822 1823 if (max_dev > le32_to_cpu(sb->max_dev)) { 1824 int bmask; 1825 sb->max_dev = cpu_to_le32(max_dev); 1826 rdev->sb_size = max_dev * 2 + 256; 1827 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; 1828 if (rdev->sb_size & bmask) 1829 rdev->sb_size = (rdev->sb_size | bmask) + 1; 1830 } else 1831 max_dev = le32_to_cpu(sb->max_dev); 1832 1833 for (i=0; i<max_dev;i++) 1834 sb->dev_roles[i] = cpu_to_le16(0xfffe); 1835 1836 list_for_each_entry(rdev2, &mddev->disks, same_set) { 1837 i = rdev2->desc_nr; 1838 if (test_bit(Faulty, &rdev2->flags)) 1839 sb->dev_roles[i] = cpu_to_le16(0xfffe); 1840 else if (test_bit(In_sync, &rdev2->flags)) 1841 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); 1842 else if (rdev2->raid_disk >= 0) 1843 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); 1844 else 1845 sb->dev_roles[i] = cpu_to_le16(0xffff); 1846 } 1847 1848 sb->sb_csum = calc_sb_1_csum(sb); 1849 } 1850 1851 static unsigned long long 1852 super_1_rdev_size_change(struct md_rdev *rdev, sector_t num_sectors) 1853 { 1854 struct mdp_superblock_1 *sb; 1855 sector_t max_sectors; 1856 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) 1857 return 0; /* component must fit device */ 1858 if (rdev->sb_start < rdev->data_offset) { 1859 /* minor versions 1 and 2; superblock before data */ 1860 max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9; 1861 max_sectors -= rdev->data_offset; 1862 if (!num_sectors || num_sectors > max_sectors) 1863 num_sectors = max_sectors; 1864 } else if (rdev->mddev->bitmap_info.offset) { 1865 /* minor version 0 with bitmap we can't move */ 1866 return 0; 1867 } else { 1868 /* minor version 0; superblock after data */ 1869 sector_t sb_start; 1870 sb_start = (i_size_read(rdev->bdev->bd_inode) >> 9) - 8*2; 1871 sb_start &= ~(sector_t)(4*2 - 1); 1872 max_sectors = rdev->sectors + sb_start - rdev->sb_start; 1873 if (!num_sectors || num_sectors > max_sectors) 1874 num_sectors = max_sectors; 1875 rdev->sb_start = sb_start; 1876 } 1877 sb = page_address(rdev->sb_page); 1878 sb->data_size = cpu_to_le64(num_sectors); 1879 sb->super_offset = rdev->sb_start; 1880 sb->sb_csum = calc_sb_1_csum(sb); 1881 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, 1882 rdev->sb_page); 1883 md_super_wait(rdev->mddev); 1884 return num_sectors; 1885 } 1886 1887 static struct super_type super_types[] = { 1888 [0] = { 1889 .name = "0.90.0", 1890 .owner = THIS_MODULE, 1891 .load_super = super_90_load, 1892 .validate_super = super_90_validate, 1893 .sync_super = super_90_sync, 1894 .rdev_size_change = super_90_rdev_size_change, 1895 }, 1896 [1] = { 1897 .name = "md-1", 1898 .owner = THIS_MODULE, 1899 .load_super = super_1_load, 1900 .validate_super = super_1_validate, 1901 .sync_super = super_1_sync, 1902 .rdev_size_change = super_1_rdev_size_change, 1903 }, 1904 }; 1905 1906 static void sync_super(struct mddev *mddev, struct md_rdev *rdev) 1907 { 1908 if (mddev->sync_super) { 1909 mddev->sync_super(mddev, rdev); 1910 return; 1911 } 1912 1913 BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types)); 1914 1915 super_types[mddev->major_version].sync_super(mddev, rdev); 1916 } 1917 1918 static int match_mddev_units(struct mddev *mddev1, struct mddev *mddev2) 1919 { 1920 struct md_rdev *rdev, *rdev2; 1921 1922 rcu_read_lock(); 1923 rdev_for_each_rcu(rdev, mddev1) 1924 rdev_for_each_rcu(rdev2, mddev2) 1925 if (rdev->bdev->bd_contains == 1926 rdev2->bdev->bd_contains) { 1927 rcu_read_unlock(); 1928 return 1; 1929 } 1930 rcu_read_unlock(); 1931 return 0; 1932 } 1933 1934 static LIST_HEAD(pending_raid_disks); 1935 1936 /* 1937 * Try to register data integrity profile for an mddev 1938 * 1939 * This is called when an array is started and after a disk has been kicked 1940 * from the array. It only succeeds if all working and active component devices 1941 * are integrity capable with matching profiles. 1942 */ 1943 int md_integrity_register(struct mddev *mddev) 1944 { 1945 struct md_rdev *rdev, *reference = NULL; 1946 1947 if (list_empty(&mddev->disks)) 1948 return 0; /* nothing to do */ 1949 if (!mddev->gendisk || blk_get_integrity(mddev->gendisk)) 1950 return 0; /* shouldn't register, or already is */ 1951 list_for_each_entry(rdev, &mddev->disks, same_set) { 1952 /* skip spares and non-functional disks */ 1953 if (test_bit(Faulty, &rdev->flags)) 1954 continue; 1955 if (rdev->raid_disk < 0) 1956 continue; 1957 if (!reference) { 1958 /* Use the first rdev as the reference */ 1959 reference = rdev; 1960 continue; 1961 } 1962 /* does this rdev's profile match the reference profile? */ 1963 if (blk_integrity_compare(reference->bdev->bd_disk, 1964 rdev->bdev->bd_disk) < 0) 1965 return -EINVAL; 1966 } 1967 if (!reference || !bdev_get_integrity(reference->bdev)) 1968 return 0; 1969 /* 1970 * All component devices are integrity capable and have matching 1971 * profiles, register the common profile for the md device. 1972 */ 1973 if (blk_integrity_register(mddev->gendisk, 1974 bdev_get_integrity(reference->bdev)) != 0) { 1975 printk(KERN_ERR "md: failed to register integrity for %s\n", 1976 mdname(mddev)); 1977 return -EINVAL; 1978 } 1979 printk(KERN_NOTICE "md: data integrity enabled on %s\n", mdname(mddev)); 1980 if (bioset_integrity_create(mddev->bio_set, BIO_POOL_SIZE)) { 1981 printk(KERN_ERR "md: failed to create integrity pool for %s\n", 1982 mdname(mddev)); 1983 return -EINVAL; 1984 } 1985 return 0; 1986 } 1987 EXPORT_SYMBOL(md_integrity_register); 1988 1989 /* Disable data integrity if non-capable/non-matching disk is being added */ 1990 void md_integrity_add_rdev(struct md_rdev *rdev, struct mddev *mddev) 1991 { 1992 struct blk_integrity *bi_rdev = bdev_get_integrity(rdev->bdev); 1993 struct blk_integrity *bi_mddev = blk_get_integrity(mddev->gendisk); 1994 1995 if (!bi_mddev) /* nothing to do */ 1996 return; 1997 if (rdev->raid_disk < 0) /* skip spares */ 1998 return; 1999 if (bi_rdev && blk_integrity_compare(mddev->gendisk, 2000 rdev->bdev->bd_disk) >= 0) 2001 return; 2002 printk(KERN_NOTICE "disabling data integrity on %s\n", mdname(mddev)); 2003 blk_integrity_unregister(mddev->gendisk); 2004 } 2005 EXPORT_SYMBOL(md_integrity_add_rdev); 2006 2007 static int bind_rdev_to_array(struct md_rdev * rdev, struct mddev * mddev) 2008 { 2009 char b[BDEVNAME_SIZE]; 2010 struct kobject *ko; 2011 char *s; 2012 int err; 2013 2014 if (rdev->mddev) { 2015 MD_BUG(); 2016 return -EINVAL; 2017 } 2018 2019 /* prevent duplicates */ 2020 if (find_rdev(mddev, rdev->bdev->bd_dev)) 2021 return -EEXIST; 2022 2023 /* make sure rdev->sectors exceeds mddev->dev_sectors */ 2024 if (rdev->sectors && (mddev->dev_sectors == 0 || 2025 rdev->sectors < mddev->dev_sectors)) { 2026 if (mddev->pers) { 2027 /* Cannot change size, so fail 2028 * If mddev->level <= 0, then we don't care 2029 * about aligning sizes (e.g. linear) 2030 */ 2031 if (mddev->level > 0) 2032 return -ENOSPC; 2033 } else 2034 mddev->dev_sectors = rdev->sectors; 2035 } 2036 2037 /* Verify rdev->desc_nr is unique. 2038 * If it is -1, assign a free number, else 2039 * check number is not in use 2040 */ 2041 if (rdev->desc_nr < 0) { 2042 int choice = 0; 2043 if (mddev->pers) choice = mddev->raid_disks; 2044 while (find_rdev_nr(mddev, choice)) 2045 choice++; 2046 rdev->desc_nr = choice; 2047 } else { 2048 if (find_rdev_nr(mddev, rdev->desc_nr)) 2049 return -EBUSY; 2050 } 2051 if (mddev->max_disks && rdev->desc_nr >= mddev->max_disks) { 2052 printk(KERN_WARNING "md: %s: array is limited to %d devices\n", 2053 mdname(mddev), mddev->max_disks); 2054 return -EBUSY; 2055 } 2056 bdevname(rdev->bdev,b); 2057 while ( (s=strchr(b, '/')) != NULL) 2058 *s = '!'; 2059 2060 rdev->mddev = mddev; 2061 printk(KERN_INFO "md: bind<%s>\n", b); 2062 2063 if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b))) 2064 goto fail; 2065 2066 ko = &part_to_dev(rdev->bdev->bd_part)->kobj; 2067 if (sysfs_create_link(&rdev->kobj, ko, "block")) 2068 /* failure here is OK */; 2069 rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state"); 2070 2071 list_add_rcu(&rdev->same_set, &mddev->disks); 2072 bd_link_disk_holder(rdev->bdev, mddev->gendisk); 2073 2074 /* May as well allow recovery to be retried once */ 2075 mddev->recovery_disabled++; 2076 2077 return 0; 2078 2079 fail: 2080 printk(KERN_WARNING "md: failed to register dev-%s for %s\n", 2081 b, mdname(mddev)); 2082 return err; 2083 } 2084 2085 static void md_delayed_delete(struct work_struct *ws) 2086 { 2087 struct md_rdev *rdev = container_of(ws, struct md_rdev, del_work); 2088 kobject_del(&rdev->kobj); 2089 kobject_put(&rdev->kobj); 2090 } 2091 2092 static void unbind_rdev_from_array(struct md_rdev * rdev) 2093 { 2094 char b[BDEVNAME_SIZE]; 2095 if (!rdev->mddev) { 2096 MD_BUG(); 2097 return; 2098 } 2099 bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk); 2100 list_del_rcu(&rdev->same_set); 2101 printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b)); 2102 rdev->mddev = NULL; 2103 sysfs_remove_link(&rdev->kobj, "block"); 2104 sysfs_put(rdev->sysfs_state); 2105 rdev->sysfs_state = NULL; 2106 kfree(rdev->badblocks.page); 2107 rdev->badblocks.count = 0; 2108 rdev->badblocks.page = NULL; 2109 /* We need to delay this, otherwise we can deadlock when 2110 * writing to 'remove' to "dev/state". We also need 2111 * to delay it due to rcu usage. 2112 */ 2113 synchronize_rcu(); 2114 INIT_WORK(&rdev->del_work, md_delayed_delete); 2115 kobject_get(&rdev->kobj); 2116 queue_work(md_misc_wq, &rdev->del_work); 2117 } 2118 2119 /* 2120 * prevent the device from being mounted, repartitioned or 2121 * otherwise reused by a RAID array (or any other kernel 2122 * subsystem), by bd_claiming the device. 2123 */ 2124 static int lock_rdev(struct md_rdev *rdev, dev_t dev, int shared) 2125 { 2126 int err = 0; 2127 struct block_device *bdev; 2128 char b[BDEVNAME_SIZE]; 2129 2130 bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, 2131 shared ? (struct md_rdev *)lock_rdev : rdev); 2132 if (IS_ERR(bdev)) { 2133 printk(KERN_ERR "md: could not open %s.\n", 2134 __bdevname(dev, b)); 2135 return PTR_ERR(bdev); 2136 } 2137 rdev->bdev = bdev; 2138 return err; 2139 } 2140 2141 static void unlock_rdev(struct md_rdev *rdev) 2142 { 2143 struct block_device *bdev = rdev->bdev; 2144 rdev->bdev = NULL; 2145 if (!bdev) 2146 MD_BUG(); 2147 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 2148 } 2149 2150 void md_autodetect_dev(dev_t dev); 2151 2152 static void export_rdev(struct md_rdev * rdev) 2153 { 2154 char b[BDEVNAME_SIZE]; 2155 printk(KERN_INFO "md: export_rdev(%s)\n", 2156 bdevname(rdev->bdev,b)); 2157 if (rdev->mddev) 2158 MD_BUG(); 2159 free_disk_sb(rdev); 2160 #ifndef MODULE 2161 if (test_bit(AutoDetected, &rdev->flags)) 2162 md_autodetect_dev(rdev->bdev->bd_dev); 2163 #endif 2164 unlock_rdev(rdev); 2165 kobject_put(&rdev->kobj); 2166 } 2167 2168 static void kick_rdev_from_array(struct md_rdev * rdev) 2169 { 2170 unbind_rdev_from_array(rdev); 2171 export_rdev(rdev); 2172 } 2173 2174 static void export_array(struct mddev *mddev) 2175 { 2176 struct md_rdev *rdev, *tmp; 2177 2178 rdev_for_each(rdev, tmp, mddev) { 2179 if (!rdev->mddev) { 2180 MD_BUG(); 2181 continue; 2182 } 2183 kick_rdev_from_array(rdev); 2184 } 2185 if (!list_empty(&mddev->disks)) 2186 MD_BUG(); 2187 mddev->raid_disks = 0; 2188 mddev->major_version = 0; 2189 } 2190 2191 static void print_desc(mdp_disk_t *desc) 2192 { 2193 printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number, 2194 desc->major,desc->minor,desc->raid_disk,desc->state); 2195 } 2196 2197 static void print_sb_90(mdp_super_t *sb) 2198 { 2199 int i; 2200 2201 printk(KERN_INFO 2202 "md: SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n", 2203 sb->major_version, sb->minor_version, sb->patch_version, 2204 sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3, 2205 sb->ctime); 2206 printk(KERN_INFO "md: L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n", 2207 sb->level, sb->size, sb->nr_disks, sb->raid_disks, 2208 sb->md_minor, sb->layout, sb->chunk_size); 2209 printk(KERN_INFO "md: UT:%08x ST:%d AD:%d WD:%d" 2210 " FD:%d SD:%d CSUM:%08x E:%08lx\n", 2211 sb->utime, sb->state, sb->active_disks, sb->working_disks, 2212 sb->failed_disks, sb->spare_disks, 2213 sb->sb_csum, (unsigned long)sb->events_lo); 2214 2215 printk(KERN_INFO); 2216 for (i = 0; i < MD_SB_DISKS; i++) { 2217 mdp_disk_t *desc; 2218 2219 desc = sb->disks + i; 2220 if (desc->number || desc->major || desc->minor || 2221 desc->raid_disk || (desc->state && (desc->state != 4))) { 2222 printk(" D %2d: ", i); 2223 print_desc(desc); 2224 } 2225 } 2226 printk(KERN_INFO "md: THIS: "); 2227 print_desc(&sb->this_disk); 2228 } 2229 2230 static void print_sb_1(struct mdp_superblock_1 *sb) 2231 { 2232 __u8 *uuid; 2233 2234 uuid = sb->set_uuid; 2235 printk(KERN_INFO 2236 "md: SB: (V:%u) (F:0x%08x) Array-ID:<%pU>\n" 2237 "md: Name: \"%s\" CT:%llu\n", 2238 le32_to_cpu(sb->major_version), 2239 le32_to_cpu(sb->feature_map), 2240 uuid, 2241 sb->set_name, 2242 (unsigned long long)le64_to_cpu(sb->ctime) 2243 & MD_SUPERBLOCK_1_TIME_SEC_MASK); 2244 2245 uuid = sb->device_uuid; 2246 printk(KERN_INFO 2247 "md: L%u SZ%llu RD:%u LO:%u CS:%u DO:%llu DS:%llu SO:%llu" 2248 " RO:%llu\n" 2249 "md: Dev:%08x UUID: %pU\n" 2250 "md: (F:0x%08x) UT:%llu Events:%llu ResyncOffset:%llu CSUM:0x%08x\n" 2251 "md: (MaxDev:%u) \n", 2252 le32_to_cpu(sb->level), 2253 (unsigned long long)le64_to_cpu(sb->size), 2254 le32_to_cpu(sb->raid_disks), 2255 le32_to_cpu(sb->layout), 2256 le32_to_cpu(sb->chunksize), 2257 (unsigned long long)le64_to_cpu(sb->data_offset), 2258 (unsigned long long)le64_to_cpu(sb->data_size), 2259 (unsigned long long)le64_to_cpu(sb->super_offset), 2260 (unsigned long long)le64_to_cpu(sb->recovery_offset), 2261 le32_to_cpu(sb->dev_number), 2262 uuid, 2263 sb->devflags, 2264 (unsigned long long)le64_to_cpu(sb->utime) & MD_SUPERBLOCK_1_TIME_SEC_MASK, 2265 (unsigned long long)le64_to_cpu(sb->events), 2266 (unsigned long long)le64_to_cpu(sb->resync_offset), 2267 le32_to_cpu(sb->sb_csum), 2268 le32_to_cpu(sb->max_dev) 2269 ); 2270 } 2271 2272 static void print_rdev(struct md_rdev *rdev, int major_version) 2273 { 2274 char b[BDEVNAME_SIZE]; 2275 printk(KERN_INFO "md: rdev %s, Sect:%08llu F:%d S:%d DN:%u\n", 2276 bdevname(rdev->bdev, b), (unsigned long long)rdev->sectors, 2277 test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags), 2278 rdev->desc_nr); 2279 if (rdev->sb_loaded) { 2280 printk(KERN_INFO "md: rdev superblock (MJ:%d):\n", major_version); 2281 switch (major_version) { 2282 case 0: 2283 print_sb_90(page_address(rdev->sb_page)); 2284 break; 2285 case 1: 2286 print_sb_1(page_address(rdev->sb_page)); 2287 break; 2288 } 2289 } else 2290 printk(KERN_INFO "md: no rdev superblock!\n"); 2291 } 2292 2293 static void md_print_devices(void) 2294 { 2295 struct list_head *tmp; 2296 struct md_rdev *rdev; 2297 struct mddev *mddev; 2298 char b[BDEVNAME_SIZE]; 2299 2300 printk("\n"); 2301 printk("md: **********************************\n"); 2302 printk("md: * <COMPLETE RAID STATE PRINTOUT> *\n"); 2303 printk("md: **********************************\n"); 2304 for_each_mddev(mddev, tmp) { 2305 2306 if (mddev->bitmap) 2307 bitmap_print_sb(mddev->bitmap); 2308 else 2309 printk("%s: ", mdname(mddev)); 2310 list_for_each_entry(rdev, &mddev->disks, same_set) 2311 printk("<%s>", bdevname(rdev->bdev,b)); 2312 printk("\n"); 2313 2314 list_for_each_entry(rdev, &mddev->disks, same_set) 2315 print_rdev(rdev, mddev->major_version); 2316 } 2317 printk("md: **********************************\n"); 2318 printk("\n"); 2319 } 2320 2321 2322 static void sync_sbs(struct mddev * mddev, int nospares) 2323 { 2324 /* Update each superblock (in-memory image), but 2325 * if we are allowed to, skip spares which already 2326 * have the right event counter, or have one earlier 2327 * (which would mean they aren't being marked as dirty 2328 * with the rest of the array) 2329 */ 2330 struct md_rdev *rdev; 2331 list_for_each_entry(rdev, &mddev->disks, same_set) { 2332 if (rdev->sb_events == mddev->events || 2333 (nospares && 2334 rdev->raid_disk < 0 && 2335 rdev->sb_events+1 == mddev->events)) { 2336 /* Don't update this superblock */ 2337 rdev->sb_loaded = 2; 2338 } else { 2339 sync_super(mddev, rdev); 2340 rdev->sb_loaded = 1; 2341 } 2342 } 2343 } 2344 2345 static void md_update_sb(struct mddev * mddev, int force_change) 2346 { 2347 struct md_rdev *rdev; 2348 int sync_req; 2349 int nospares = 0; 2350 int any_badblocks_changed = 0; 2351 2352 repeat: 2353 /* First make sure individual recovery_offsets are correct */ 2354 list_for_each_entry(rdev, &mddev->disks, same_set) { 2355 if (rdev->raid_disk >= 0 && 2356 mddev->delta_disks >= 0 && 2357 !test_bit(In_sync, &rdev->flags) && 2358 mddev->curr_resync_completed > rdev->recovery_offset) 2359 rdev->recovery_offset = mddev->curr_resync_completed; 2360 2361 } 2362 if (!mddev->persistent) { 2363 clear_bit(MD_CHANGE_CLEAN, &mddev->flags); 2364 clear_bit(MD_CHANGE_DEVS, &mddev->flags); 2365 if (!mddev->external) { 2366 clear_bit(MD_CHANGE_PENDING, &mddev->flags); 2367 list_for_each_entry(rdev, &mddev->disks, same_set) { 2368 if (rdev->badblocks.changed) { 2369 md_ack_all_badblocks(&rdev->badblocks); 2370 md_error(mddev, rdev); 2371 } 2372 clear_bit(Blocked, &rdev->flags); 2373 clear_bit(BlockedBadBlocks, &rdev->flags); 2374 wake_up(&rdev->blocked_wait); 2375 } 2376 } 2377 wake_up(&mddev->sb_wait); 2378 return; 2379 } 2380 2381 spin_lock_irq(&mddev->write_lock); 2382 2383 mddev->utime = get_seconds(); 2384 2385 if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags)) 2386 force_change = 1; 2387 if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags)) 2388 /* just a clean<-> dirty transition, possibly leave spares alone, 2389 * though if events isn't the right even/odd, we will have to do 2390 * spares after all 2391 */ 2392 nospares = 1; 2393 if (force_change) 2394 nospares = 0; 2395 if (mddev->degraded) 2396 /* If the array is degraded, then skipping spares is both 2397 * dangerous and fairly pointless. 2398 * Dangerous because a device that was removed from the array 2399 * might have a event_count that still looks up-to-date, 2400 * so it can be re-added without a resync. 2401 * Pointless because if there are any spares to skip, 2402 * then a recovery will happen and soon that array won't 2403 * be degraded any more and the spare can go back to sleep then. 2404 */ 2405 nospares = 0; 2406 2407 sync_req = mddev->in_sync; 2408 2409 /* If this is just a dirty<->clean transition, and the array is clean 2410 * and 'events' is odd, we can roll back to the previous clean state */ 2411 if (nospares 2412 && (mddev->in_sync && mddev->recovery_cp == MaxSector) 2413 && mddev->can_decrease_events 2414 && mddev->events != 1) { 2415 mddev->events--; 2416 mddev->can_decrease_events = 0; 2417 } else { 2418 /* otherwise we have to go forward and ... */ 2419 mddev->events ++; 2420 mddev->can_decrease_events = nospares; 2421 } 2422 2423 if (!mddev->events) { 2424 /* 2425 * oops, this 64-bit counter should never wrap. 2426 * Either we are in around ~1 trillion A.C., assuming 2427 * 1 reboot per second, or we have a bug: 2428 */ 2429 MD_BUG(); 2430 mddev->events --; 2431 } 2432 2433 list_for_each_entry(rdev, &mddev->disks, same_set) { 2434 if (rdev->badblocks.changed) 2435 any_badblocks_changed++; 2436 if (test_bit(Faulty, &rdev->flags)) 2437 set_bit(FaultRecorded, &rdev->flags); 2438 } 2439 2440 sync_sbs(mddev, nospares); 2441 spin_unlock_irq(&mddev->write_lock); 2442 2443 pr_debug("md: updating %s RAID superblock on device (in sync %d)\n", 2444 mdname(mddev), mddev->in_sync); 2445 2446 bitmap_update_sb(mddev->bitmap); 2447 list_for_each_entry(rdev, &mddev->disks, same_set) { 2448 char b[BDEVNAME_SIZE]; 2449 2450 if (rdev->sb_loaded != 1) 2451 continue; /* no noise on spare devices */ 2452 2453 if (!test_bit(Faulty, &rdev->flags) && 2454 rdev->saved_raid_disk == -1) { 2455 md_super_write(mddev,rdev, 2456 rdev->sb_start, rdev->sb_size, 2457 rdev->sb_page); 2458 pr_debug("md: (write) %s's sb offset: %llu\n", 2459 bdevname(rdev->bdev, b), 2460 (unsigned long long)rdev->sb_start); 2461 rdev->sb_events = mddev->events; 2462 if (rdev->badblocks.size) { 2463 md_super_write(mddev, rdev, 2464 rdev->badblocks.sector, 2465 rdev->badblocks.size << 9, 2466 rdev->bb_page); 2467 rdev->badblocks.size = 0; 2468 } 2469 2470 } else if (test_bit(Faulty, &rdev->flags)) 2471 pr_debug("md: %s (skipping faulty)\n", 2472 bdevname(rdev->bdev, b)); 2473 else 2474 pr_debug("(skipping incremental s/r "); 2475 2476 if (mddev->level == LEVEL_MULTIPATH) 2477 /* only need to write one superblock... */ 2478 break; 2479 } 2480 md_super_wait(mddev); 2481 /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */ 2482 2483 spin_lock_irq(&mddev->write_lock); 2484 if (mddev->in_sync != sync_req || 2485 test_bit(MD_CHANGE_DEVS, &mddev->flags)) { 2486 /* have to write it out again */ 2487 spin_unlock_irq(&mddev->write_lock); 2488 goto repeat; 2489 } 2490 clear_bit(MD_CHANGE_PENDING, &mddev->flags); 2491 spin_unlock_irq(&mddev->write_lock); 2492 wake_up(&mddev->sb_wait); 2493 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 2494 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 2495 2496 list_for_each_entry(rdev, &mddev->disks, same_set) { 2497 if (test_and_clear_bit(FaultRecorded, &rdev->flags)) 2498 clear_bit(Blocked, &rdev->flags); 2499 2500 if (any_badblocks_changed) 2501 md_ack_all_badblocks(&rdev->badblocks); 2502 clear_bit(BlockedBadBlocks, &rdev->flags); 2503 wake_up(&rdev->blocked_wait); 2504 } 2505 } 2506 2507 /* words written to sysfs files may, or may not, be \n terminated. 2508 * We want to accept with case. For this we use cmd_match. 2509 */ 2510 static int cmd_match(const char *cmd, const char *str) 2511 { 2512 /* See if cmd, written into a sysfs file, matches 2513 * str. They must either be the same, or cmd can 2514 * have a trailing newline 2515 */ 2516 while (*cmd && *str && *cmd == *str) { 2517 cmd++; 2518 str++; 2519 } 2520 if (*cmd == '\n') 2521 cmd++; 2522 if (*str || *cmd) 2523 return 0; 2524 return 1; 2525 } 2526 2527 struct rdev_sysfs_entry { 2528 struct attribute attr; 2529 ssize_t (*show)(struct md_rdev *, char *); 2530 ssize_t (*store)(struct md_rdev *, const char *, size_t); 2531 }; 2532 2533 static ssize_t 2534 state_show(struct md_rdev *rdev, char *page) 2535 { 2536 char *sep = ""; 2537 size_t len = 0; 2538 2539 if (test_bit(Faulty, &rdev->flags) || 2540 rdev->badblocks.unacked_exist) { 2541 len+= sprintf(page+len, "%sfaulty",sep); 2542 sep = ","; 2543 } 2544 if (test_bit(In_sync, &rdev->flags)) { 2545 len += sprintf(page+len, "%sin_sync",sep); 2546 sep = ","; 2547 } 2548 if (test_bit(WriteMostly, &rdev->flags)) { 2549 len += sprintf(page+len, "%swrite_mostly",sep); 2550 sep = ","; 2551 } 2552 if (test_bit(Blocked, &rdev->flags) || 2553 (rdev->badblocks.unacked_exist 2554 && !test_bit(Faulty, &rdev->flags))) { 2555 len += sprintf(page+len, "%sblocked", sep); 2556 sep = ","; 2557 } 2558 if (!test_bit(Faulty, &rdev->flags) && 2559 !test_bit(In_sync, &rdev->flags)) { 2560 len += sprintf(page+len, "%sspare", sep); 2561 sep = ","; 2562 } 2563 if (test_bit(WriteErrorSeen, &rdev->flags)) { 2564 len += sprintf(page+len, "%swrite_error", sep); 2565 sep = ","; 2566 } 2567 if (test_bit(WantReplacement, &rdev->flags)) { 2568 len += sprintf(page+len, "%swant_replacement", sep); 2569 sep = ","; 2570 } 2571 if (test_bit(Replacement, &rdev->flags)) { 2572 len += sprintf(page+len, "%sreplacement", sep); 2573 sep = ","; 2574 } 2575 2576 return len+sprintf(page+len, "\n"); 2577 } 2578 2579 static ssize_t 2580 state_store(struct md_rdev *rdev, const char *buf, size_t len) 2581 { 2582 /* can write 2583 * faulty - simulates an error 2584 * remove - disconnects the device 2585 * writemostly - sets write_mostly 2586 * -writemostly - clears write_mostly 2587 * blocked - sets the Blocked flags 2588 * -blocked - clears the Blocked and possibly simulates an error 2589 * insync - sets Insync providing device isn't active 2590 * write_error - sets WriteErrorSeen 2591 * -write_error - clears WriteErrorSeen 2592 */ 2593 int err = -EINVAL; 2594 if (cmd_match(buf, "faulty") && rdev->mddev->pers) { 2595 md_error(rdev->mddev, rdev); 2596 if (test_bit(Faulty, &rdev->flags)) 2597 err = 0; 2598 else 2599 err = -EBUSY; 2600 } else if (cmd_match(buf, "remove")) { 2601 if (rdev->raid_disk >= 0) 2602 err = -EBUSY; 2603 else { 2604 struct mddev *mddev = rdev->mddev; 2605 kick_rdev_from_array(rdev); 2606 if (mddev->pers) 2607 md_update_sb(mddev, 1); 2608 md_new_event(mddev); 2609 err = 0; 2610 } 2611 } else if (cmd_match(buf, "writemostly")) { 2612 set_bit(WriteMostly, &rdev->flags); 2613 err = 0; 2614 } else if (cmd_match(buf, "-writemostly")) { 2615 clear_bit(WriteMostly, &rdev->flags); 2616 err = 0; 2617 } else if (cmd_match(buf, "blocked")) { 2618 set_bit(Blocked, &rdev->flags); 2619 err = 0; 2620 } else if (cmd_match(buf, "-blocked")) { 2621 if (!test_bit(Faulty, &rdev->flags) && 2622 rdev->badblocks.unacked_exist) { 2623 /* metadata handler doesn't understand badblocks, 2624 * so we need to fail the device 2625 */ 2626 md_error(rdev->mddev, rdev); 2627 } 2628 clear_bit(Blocked, &rdev->flags); 2629 clear_bit(BlockedBadBlocks, &rdev->flags); 2630 wake_up(&rdev->blocked_wait); 2631 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); 2632 md_wakeup_thread(rdev->mddev->thread); 2633 2634 err = 0; 2635 } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) { 2636 set_bit(In_sync, &rdev->flags); 2637 err = 0; 2638 } else if (cmd_match(buf, "write_error")) { 2639 set_bit(WriteErrorSeen, &rdev->flags); 2640 err = 0; 2641 } else if (cmd_match(buf, "-write_error")) { 2642 clear_bit(WriteErrorSeen, &rdev->flags); 2643 err = 0; 2644 } else if (cmd_match(buf, "want_replacement")) { 2645 /* Any non-spare device that is not a replacement can 2646 * become want_replacement at any time, but we then need to 2647 * check if recovery is needed. 2648 */ 2649 if (rdev->raid_disk >= 0 && 2650 !test_bit(Replacement, &rdev->flags)) 2651 set_bit(WantReplacement, &rdev->flags); 2652 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); 2653 md_wakeup_thread(rdev->mddev->thread); 2654 err = 0; 2655 } else if (cmd_match(buf, "-want_replacement")) { 2656 /* Clearing 'want_replacement' is always allowed. 2657 * Once replacements starts it is too late though. 2658 */ 2659 err = 0; 2660 clear_bit(WantReplacement, &rdev->flags); 2661 } else if (cmd_match(buf, "replacement")) { 2662 /* Can only set a device as a replacement when array has not 2663 * yet been started. Once running, replacement is automatic 2664 * from spares, or by assigning 'slot'. 2665 */ 2666 if (rdev->mddev->pers) 2667 err = -EBUSY; 2668 else { 2669 set_bit(Replacement, &rdev->flags); 2670 err = 0; 2671 } 2672 } else if (cmd_match(buf, "-replacement")) { 2673 /* Similarly, can only clear Replacement before start */ 2674 if (rdev->mddev->pers) 2675 err = -EBUSY; 2676 else { 2677 clear_bit(Replacement, &rdev->flags); 2678 err = 0; 2679 } 2680 } 2681 if (!err) 2682 sysfs_notify_dirent_safe(rdev->sysfs_state); 2683 return err ? err : len; 2684 } 2685 static struct rdev_sysfs_entry rdev_state = 2686 __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store); 2687 2688 static ssize_t 2689 errors_show(struct md_rdev *rdev, char *page) 2690 { 2691 return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors)); 2692 } 2693 2694 static ssize_t 2695 errors_store(struct md_rdev *rdev, const char *buf, size_t len) 2696 { 2697 char *e; 2698 unsigned long n = simple_strtoul(buf, &e, 10); 2699 if (*buf && (*e == 0 || *e == '\n')) { 2700 atomic_set(&rdev->corrected_errors, n); 2701 return len; 2702 } 2703 return -EINVAL; 2704 } 2705 static struct rdev_sysfs_entry rdev_errors = 2706 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store); 2707 2708 static ssize_t 2709 slot_show(struct md_rdev *rdev, char *page) 2710 { 2711 if (rdev->raid_disk < 0) 2712 return sprintf(page, "none\n"); 2713 else 2714 return sprintf(page, "%d\n", rdev->raid_disk); 2715 } 2716 2717 static ssize_t 2718 slot_store(struct md_rdev *rdev, const char *buf, size_t len) 2719 { 2720 char *e; 2721 int err; 2722 int slot = simple_strtoul(buf, &e, 10); 2723 if (strncmp(buf, "none", 4)==0) 2724 slot = -1; 2725 else if (e==buf || (*e && *e!= '\n')) 2726 return -EINVAL; 2727 if (rdev->mddev->pers && slot == -1) { 2728 /* Setting 'slot' on an active array requires also 2729 * updating the 'rd%d' link, and communicating 2730 * with the personality with ->hot_*_disk. 2731 * For now we only support removing 2732 * failed/spare devices. This normally happens automatically, 2733 * but not when the metadata is externally managed. 2734 */ 2735 if (rdev->raid_disk == -1) 2736 return -EEXIST; 2737 /* personality does all needed checks */ 2738 if (rdev->mddev->pers->hot_remove_disk == NULL) 2739 return -EINVAL; 2740 err = rdev->mddev->pers-> 2741 hot_remove_disk(rdev->mddev, rdev); 2742 if (err) 2743 return err; 2744 sysfs_unlink_rdev(rdev->mddev, rdev); 2745 rdev->raid_disk = -1; 2746 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); 2747 md_wakeup_thread(rdev->mddev->thread); 2748 } else if (rdev->mddev->pers) { 2749 /* Activating a spare .. or possibly reactivating 2750 * if we ever get bitmaps working here. 2751 */ 2752 2753 if (rdev->raid_disk != -1) 2754 return -EBUSY; 2755 2756 if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery)) 2757 return -EBUSY; 2758 2759 if (rdev->mddev->pers->hot_add_disk == NULL) 2760 return -EINVAL; 2761 2762 if (slot >= rdev->mddev->raid_disks && 2763 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks) 2764 return -ENOSPC; 2765 2766 rdev->raid_disk = slot; 2767 if (test_bit(In_sync, &rdev->flags)) 2768 rdev->saved_raid_disk = slot; 2769 else 2770 rdev->saved_raid_disk = -1; 2771 clear_bit(In_sync, &rdev->flags); 2772 err = rdev->mddev->pers-> 2773 hot_add_disk(rdev->mddev, rdev); 2774 if (err) { 2775 rdev->raid_disk = -1; 2776 return err; 2777 } else 2778 sysfs_notify_dirent_safe(rdev->sysfs_state); 2779 if (sysfs_link_rdev(rdev->mddev, rdev)) 2780 /* failure here is OK */; 2781 /* don't wakeup anyone, leave that to userspace. */ 2782 } else { 2783 if (slot >= rdev->mddev->raid_disks && 2784 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks) 2785 return -ENOSPC; 2786 rdev->raid_disk = slot; 2787 /* assume it is working */ 2788 clear_bit(Faulty, &rdev->flags); 2789 clear_bit(WriteMostly, &rdev->flags); 2790 set_bit(In_sync, &rdev->flags); 2791 sysfs_notify_dirent_safe(rdev->sysfs_state); 2792 } 2793 return len; 2794 } 2795 2796 2797 static struct rdev_sysfs_entry rdev_slot = 2798 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store); 2799 2800 static ssize_t 2801 offset_show(struct md_rdev *rdev, char *page) 2802 { 2803 return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset); 2804 } 2805 2806 static ssize_t 2807 offset_store(struct md_rdev *rdev, const char *buf, size_t len) 2808 { 2809 char *e; 2810 unsigned long long offset = simple_strtoull(buf, &e, 10); 2811 if (e==buf || (*e && *e != '\n')) 2812 return -EINVAL; 2813 if (rdev->mddev->pers && rdev->raid_disk >= 0) 2814 return -EBUSY; 2815 if (rdev->sectors && rdev->mddev->external) 2816 /* Must set offset before size, so overlap checks 2817 * can be sane */ 2818 return -EBUSY; 2819 rdev->data_offset = offset; 2820 return len; 2821 } 2822 2823 static struct rdev_sysfs_entry rdev_offset = 2824 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store); 2825 2826 static ssize_t 2827 rdev_size_show(struct md_rdev *rdev, char *page) 2828 { 2829 return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2); 2830 } 2831 2832 static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2) 2833 { 2834 /* check if two start/length pairs overlap */ 2835 if (s1+l1 <= s2) 2836 return 0; 2837 if (s2+l2 <= s1) 2838 return 0; 2839 return 1; 2840 } 2841 2842 static int strict_blocks_to_sectors(const char *buf, sector_t *sectors) 2843 { 2844 unsigned long long blocks; 2845 sector_t new; 2846 2847 if (strict_strtoull(buf, 10, &blocks) < 0) 2848 return -EINVAL; 2849 2850 if (blocks & 1ULL << (8 * sizeof(blocks) - 1)) 2851 return -EINVAL; /* sector conversion overflow */ 2852 2853 new = blocks * 2; 2854 if (new != blocks * 2) 2855 return -EINVAL; /* unsigned long long to sector_t overflow */ 2856 2857 *sectors = new; 2858 return 0; 2859 } 2860 2861 static ssize_t 2862 rdev_size_store(struct md_rdev *rdev, const char *buf, size_t len) 2863 { 2864 struct mddev *my_mddev = rdev->mddev; 2865 sector_t oldsectors = rdev->sectors; 2866 sector_t sectors; 2867 2868 if (strict_blocks_to_sectors(buf, §ors) < 0) 2869 return -EINVAL; 2870 if (my_mddev->pers && rdev->raid_disk >= 0) { 2871 if (my_mddev->persistent) { 2872 sectors = super_types[my_mddev->major_version]. 2873 rdev_size_change(rdev, sectors); 2874 if (!sectors) 2875 return -EBUSY; 2876 } else if (!sectors) 2877 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) - 2878 rdev->data_offset; 2879 } 2880 if (sectors < my_mddev->dev_sectors) 2881 return -EINVAL; /* component must fit device */ 2882 2883 rdev->sectors = sectors; 2884 if (sectors > oldsectors && my_mddev->external) { 2885 /* need to check that all other rdevs with the same ->bdev 2886 * do not overlap. We need to unlock the mddev to avoid 2887 * a deadlock. We have already changed rdev->sectors, and if 2888 * we have to change it back, we will have the lock again. 2889 */ 2890 struct mddev *mddev; 2891 int overlap = 0; 2892 struct list_head *tmp; 2893 2894 mddev_unlock(my_mddev); 2895 for_each_mddev(mddev, tmp) { 2896 struct md_rdev *rdev2; 2897 2898 mddev_lock(mddev); 2899 list_for_each_entry(rdev2, &mddev->disks, same_set) 2900 if (rdev->bdev == rdev2->bdev && 2901 rdev != rdev2 && 2902 overlaps(rdev->data_offset, rdev->sectors, 2903 rdev2->data_offset, 2904 rdev2->sectors)) { 2905 overlap = 1; 2906 break; 2907 } 2908 mddev_unlock(mddev); 2909 if (overlap) { 2910 mddev_put(mddev); 2911 break; 2912 } 2913 } 2914 mddev_lock(my_mddev); 2915 if (overlap) { 2916 /* Someone else could have slipped in a size 2917 * change here, but doing so is just silly. 2918 * We put oldsectors back because we *know* it is 2919 * safe, and trust userspace not to race with 2920 * itself 2921 */ 2922 rdev->sectors = oldsectors; 2923 return -EBUSY; 2924 } 2925 } 2926 return len; 2927 } 2928 2929 static struct rdev_sysfs_entry rdev_size = 2930 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store); 2931 2932 2933 static ssize_t recovery_start_show(struct md_rdev *rdev, char *page) 2934 { 2935 unsigned long long recovery_start = rdev->recovery_offset; 2936 2937 if (test_bit(In_sync, &rdev->flags) || 2938 recovery_start == MaxSector) 2939 return sprintf(page, "none\n"); 2940 2941 return sprintf(page, "%llu\n", recovery_start); 2942 } 2943 2944 static ssize_t recovery_start_store(struct md_rdev *rdev, const char *buf, size_t len) 2945 { 2946 unsigned long long recovery_start; 2947 2948 if (cmd_match(buf, "none")) 2949 recovery_start = MaxSector; 2950 else if (strict_strtoull(buf, 10, &recovery_start)) 2951 return -EINVAL; 2952 2953 if (rdev->mddev->pers && 2954 rdev->raid_disk >= 0) 2955 return -EBUSY; 2956 2957 rdev->recovery_offset = recovery_start; 2958 if (recovery_start == MaxSector) 2959 set_bit(In_sync, &rdev->flags); 2960 else 2961 clear_bit(In_sync, &rdev->flags); 2962 return len; 2963 } 2964 2965 static struct rdev_sysfs_entry rdev_recovery_start = 2966 __ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store); 2967 2968 2969 static ssize_t 2970 badblocks_show(struct badblocks *bb, char *page, int unack); 2971 static ssize_t 2972 badblocks_store(struct badblocks *bb, const char *page, size_t len, int unack); 2973 2974 static ssize_t bb_show(struct md_rdev *rdev, char *page) 2975 { 2976 return badblocks_show(&rdev->badblocks, page, 0); 2977 } 2978 static ssize_t bb_store(struct md_rdev *rdev, const char *page, size_t len) 2979 { 2980 int rv = badblocks_store(&rdev->badblocks, page, len, 0); 2981 /* Maybe that ack was all we needed */ 2982 if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags)) 2983 wake_up(&rdev->blocked_wait); 2984 return rv; 2985 } 2986 static struct rdev_sysfs_entry rdev_bad_blocks = 2987 __ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store); 2988 2989 2990 static ssize_t ubb_show(struct md_rdev *rdev, char *page) 2991 { 2992 return badblocks_show(&rdev->badblocks, page, 1); 2993 } 2994 static ssize_t ubb_store(struct md_rdev *rdev, const char *page, size_t len) 2995 { 2996 return badblocks_store(&rdev->badblocks, page, len, 1); 2997 } 2998 static struct rdev_sysfs_entry rdev_unack_bad_blocks = 2999 __ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store); 3000 3001 static struct attribute *rdev_default_attrs[] = { 3002 &rdev_state.attr, 3003 &rdev_errors.attr, 3004 &rdev_slot.attr, 3005 &rdev_offset.attr, 3006 &rdev_size.attr, 3007 &rdev_recovery_start.attr, 3008 &rdev_bad_blocks.attr, 3009 &rdev_unack_bad_blocks.attr, 3010 NULL, 3011 }; 3012 static ssize_t 3013 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 3014 { 3015 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); 3016 struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj); 3017 struct mddev *mddev = rdev->mddev; 3018 ssize_t rv; 3019 3020 if (!entry->show) 3021 return -EIO; 3022 3023 rv = mddev ? mddev_lock(mddev) : -EBUSY; 3024 if (!rv) { 3025 if (rdev->mddev == NULL) 3026 rv = -EBUSY; 3027 else 3028 rv = entry->show(rdev, page); 3029 mddev_unlock(mddev); 3030 } 3031 return rv; 3032 } 3033 3034 static ssize_t 3035 rdev_attr_store(struct kobject *kobj, struct attribute *attr, 3036 const char *page, size_t length) 3037 { 3038 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); 3039 struct md_rdev *rdev = container_of(kobj, struct md_rdev, kobj); 3040 ssize_t rv; 3041 struct mddev *mddev = rdev->mddev; 3042 3043 if (!entry->store) 3044 return -EIO; 3045 if (!capable(CAP_SYS_ADMIN)) 3046 return -EACCES; 3047 rv = mddev ? mddev_lock(mddev): -EBUSY; 3048 if (!rv) { 3049 if (rdev->mddev == NULL) 3050 rv = -EBUSY; 3051 else 3052 rv = entry->store(rdev, page, length); 3053 mddev_unlock(mddev); 3054 } 3055 return rv; 3056 } 3057 3058 static void rdev_free(struct kobject *ko) 3059 { 3060 struct md_rdev *rdev = container_of(ko, struct md_rdev, kobj); 3061 kfree(rdev); 3062 } 3063 static const struct sysfs_ops rdev_sysfs_ops = { 3064 .show = rdev_attr_show, 3065 .store = rdev_attr_store, 3066 }; 3067 static struct kobj_type rdev_ktype = { 3068 .release = rdev_free, 3069 .sysfs_ops = &rdev_sysfs_ops, 3070 .default_attrs = rdev_default_attrs, 3071 }; 3072 3073 int md_rdev_init(struct md_rdev *rdev) 3074 { 3075 rdev->desc_nr = -1; 3076 rdev->saved_raid_disk = -1; 3077 rdev->raid_disk = -1; 3078 rdev->flags = 0; 3079 rdev->data_offset = 0; 3080 rdev->sb_events = 0; 3081 rdev->last_read_error.tv_sec = 0; 3082 rdev->last_read_error.tv_nsec = 0; 3083 rdev->sb_loaded = 0; 3084 rdev->bb_page = NULL; 3085 atomic_set(&rdev->nr_pending, 0); 3086 atomic_set(&rdev->read_errors, 0); 3087 atomic_set(&rdev->corrected_errors, 0); 3088 3089 INIT_LIST_HEAD(&rdev->same_set); 3090 init_waitqueue_head(&rdev->blocked_wait); 3091 3092 /* Add space to store bad block list. 3093 * This reserves the space even on arrays where it cannot 3094 * be used - I wonder if that matters 3095 */ 3096 rdev->badblocks.count = 0; 3097 rdev->badblocks.shift = 0; 3098 rdev->badblocks.page = kmalloc(PAGE_SIZE, GFP_KERNEL); 3099 seqlock_init(&rdev->badblocks.lock); 3100 if (rdev->badblocks.page == NULL) 3101 return -ENOMEM; 3102 3103 return 0; 3104 } 3105 EXPORT_SYMBOL_GPL(md_rdev_init); 3106 /* 3107 * Import a device. If 'super_format' >= 0, then sanity check the superblock 3108 * 3109 * mark the device faulty if: 3110 * 3111 * - the device is nonexistent (zero size) 3112 * - the device has no valid superblock 3113 * 3114 * a faulty rdev _never_ has rdev->sb set. 3115 */ 3116 static struct md_rdev *md_import_device(dev_t newdev, int super_format, int super_minor) 3117 { 3118 char b[BDEVNAME_SIZE]; 3119 int err; 3120 struct md_rdev *rdev; 3121 sector_t size; 3122 3123 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); 3124 if (!rdev) { 3125 printk(KERN_ERR "md: could not alloc mem for new device!\n"); 3126 return ERR_PTR(-ENOMEM); 3127 } 3128 3129 err = md_rdev_init(rdev); 3130 if (err) 3131 goto abort_free; 3132 err = alloc_disk_sb(rdev); 3133 if (err) 3134 goto abort_free; 3135 3136 err = lock_rdev(rdev, newdev, super_format == -2); 3137 if (err) 3138 goto abort_free; 3139 3140 kobject_init(&rdev->kobj, &rdev_ktype); 3141 3142 size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS; 3143 if (!size) { 3144 printk(KERN_WARNING 3145 "md: %s has zero or unknown size, marking faulty!\n", 3146 bdevname(rdev->bdev,b)); 3147 err = -EINVAL; 3148 goto abort_free; 3149 } 3150 3151 if (super_format >= 0) { 3152 err = super_types[super_format]. 3153 load_super(rdev, NULL, super_minor); 3154 if (err == -EINVAL) { 3155 printk(KERN_WARNING 3156 "md: %s does not have a valid v%d.%d " 3157 "superblock, not importing!\n", 3158 bdevname(rdev->bdev,b), 3159 super_format, super_minor); 3160 goto abort_free; 3161 } 3162 if (err < 0) { 3163 printk(KERN_WARNING 3164 "md: could not read %s's sb, not importing!\n", 3165 bdevname(rdev->bdev,b)); 3166 goto abort_free; 3167 } 3168 } 3169 if (super_format == -1) 3170 /* hot-add for 0.90, or non-persistent: so no badblocks */ 3171 rdev->badblocks.shift = -1; 3172 3173 return rdev; 3174 3175 abort_free: 3176 if (rdev->bdev) 3177 unlock_rdev(rdev); 3178 free_disk_sb(rdev); 3179 kfree(rdev->badblocks.page); 3180 kfree(rdev); 3181 return ERR_PTR(err); 3182 } 3183 3184 /* 3185 * Check a full RAID array for plausibility 3186 */ 3187 3188 3189 static void analyze_sbs(struct mddev * mddev) 3190 { 3191 int i; 3192 struct md_rdev *rdev, *freshest, *tmp; 3193 char b[BDEVNAME_SIZE]; 3194 3195 freshest = NULL; 3196 rdev_for_each(rdev, tmp, mddev) 3197 switch (super_types[mddev->major_version]. 3198 load_super(rdev, freshest, mddev->minor_version)) { 3199 case 1: 3200 freshest = rdev; 3201 break; 3202 case 0: 3203 break; 3204 default: 3205 printk( KERN_ERR \ 3206 "md: fatal superblock inconsistency in %s" 3207 " -- removing from array\n", 3208 bdevname(rdev->bdev,b)); 3209 kick_rdev_from_array(rdev); 3210 } 3211 3212 3213 super_types[mddev->major_version]. 3214 validate_super(mddev, freshest); 3215 3216 i = 0; 3217 rdev_for_each(rdev, tmp, mddev) { 3218 if (mddev->max_disks && 3219 (rdev->desc_nr >= mddev->max_disks || 3220 i > mddev->max_disks)) { 3221 printk(KERN_WARNING 3222 "md: %s: %s: only %d devices permitted\n", 3223 mdname(mddev), bdevname(rdev->bdev, b), 3224 mddev->max_disks); 3225 kick_rdev_from_array(rdev); 3226 continue; 3227 } 3228 if (rdev != freshest) 3229 if (super_types[mddev->major_version]. 3230 validate_super(mddev, rdev)) { 3231 printk(KERN_WARNING "md: kicking non-fresh %s" 3232 " from array!\n", 3233 bdevname(rdev->bdev,b)); 3234 kick_rdev_from_array(rdev); 3235 continue; 3236 } 3237 if (mddev->level == LEVEL_MULTIPATH) { 3238 rdev->desc_nr = i++; 3239 rdev->raid_disk = rdev->desc_nr; 3240 set_bit(In_sync, &rdev->flags); 3241 } else if (rdev->raid_disk >= (mddev->raid_disks - min(0, mddev->delta_disks))) { 3242 rdev->raid_disk = -1; 3243 clear_bit(In_sync, &rdev->flags); 3244 } 3245 } 3246 } 3247 3248 /* Read a fixed-point number. 3249 * Numbers in sysfs attributes should be in "standard" units where 3250 * possible, so time should be in seconds. 3251 * However we internally use a a much smaller unit such as 3252 * milliseconds or jiffies. 3253 * This function takes a decimal number with a possible fractional 3254 * component, and produces an integer which is the result of 3255 * multiplying that number by 10^'scale'. 3256 * all without any floating-point arithmetic. 3257 */ 3258 int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale) 3259 { 3260 unsigned long result = 0; 3261 long decimals = -1; 3262 while (isdigit(*cp) || (*cp == '.' && decimals < 0)) { 3263 if (*cp == '.') 3264 decimals = 0; 3265 else if (decimals < scale) { 3266 unsigned int value; 3267 value = *cp - '0'; 3268 result = result * 10 + value; 3269 if (decimals >= 0) 3270 decimals++; 3271 } 3272 cp++; 3273 } 3274 if (*cp == '\n') 3275 cp++; 3276 if (*cp) 3277 return -EINVAL; 3278 if (decimals < 0) 3279 decimals = 0; 3280 while (decimals < scale) { 3281 result *= 10; 3282 decimals ++; 3283 } 3284 *res = result; 3285 return 0; 3286 } 3287 3288 3289 static void md_safemode_timeout(unsigned long data); 3290 3291 static ssize_t 3292 safe_delay_show(struct mddev *mddev, char *page) 3293 { 3294 int msec = (mddev->safemode_delay*1000)/HZ; 3295 return sprintf(page, "%d.%03d\n", msec/1000, msec%1000); 3296 } 3297 static ssize_t 3298 safe_delay_store(struct mddev *mddev, const char *cbuf, size_t len) 3299 { 3300 unsigned long msec; 3301 3302 if (strict_strtoul_scaled(cbuf, &msec, 3) < 0) 3303 return -EINVAL; 3304 if (msec == 0) 3305 mddev->safemode_delay = 0; 3306 else { 3307 unsigned long old_delay = mddev->safemode_delay; 3308 mddev->safemode_delay = (msec*HZ)/1000; 3309 if (mddev->safemode_delay == 0) 3310 mddev->safemode_delay = 1; 3311 if (mddev->safemode_delay < old_delay) 3312 md_safemode_timeout((unsigned long)mddev); 3313 } 3314 return len; 3315 } 3316 static struct md_sysfs_entry md_safe_delay = 3317 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store); 3318 3319 static ssize_t 3320 level_show(struct mddev *mddev, char *page) 3321 { 3322 struct md_personality *p = mddev->pers; 3323 if (p) 3324 return sprintf(page, "%s\n", p->name); 3325 else if (mddev->clevel[0]) 3326 return sprintf(page, "%s\n", mddev->clevel); 3327 else if (mddev->level != LEVEL_NONE) 3328 return sprintf(page, "%d\n", mddev->level); 3329 else 3330 return 0; 3331 } 3332 3333 static ssize_t 3334 level_store(struct mddev *mddev, const char *buf, size_t len) 3335 { 3336 char clevel[16]; 3337 ssize_t rv = len; 3338 struct md_personality *pers; 3339 long level; 3340 void *priv; 3341 struct md_rdev *rdev; 3342 3343 if (mddev->pers == NULL) { 3344 if (len == 0) 3345 return 0; 3346 if (len >= sizeof(mddev->clevel)) 3347 return -ENOSPC; 3348 strncpy(mddev->clevel, buf, len); 3349 if (mddev->clevel[len-1] == '\n') 3350 len--; 3351 mddev->clevel[len] = 0; 3352 mddev->level = LEVEL_NONE; 3353 return rv; 3354 } 3355 3356 /* request to change the personality. Need to ensure: 3357 * - array is not engaged in resync/recovery/reshape 3358 * - old personality can be suspended 3359 * - new personality will access other array. 3360 */ 3361 3362 if (mddev->sync_thread || 3363 mddev->reshape_position != MaxSector || 3364 mddev->sysfs_active) 3365 return -EBUSY; 3366 3367 if (!mddev->pers->quiesce) { 3368 printk(KERN_WARNING "md: %s: %s does not support online personality change\n", 3369 mdname(mddev), mddev->pers->name); 3370 return -EINVAL; 3371 } 3372 3373 /* Now find the new personality */ 3374 if (len == 0 || len >= sizeof(clevel)) 3375 return -EINVAL; 3376 strncpy(clevel, buf, len); 3377 if (clevel[len-1] == '\n') 3378 len--; 3379 clevel[len] = 0; 3380 if (strict_strtol(clevel, 10, &level)) 3381 level = LEVEL_NONE; 3382 3383 if (request_module("md-%s", clevel) != 0) 3384 request_module("md-level-%s", clevel); 3385 spin_lock(&pers_lock); 3386 pers = find_pers(level, clevel); 3387 if (!pers || !try_module_get(pers->owner)) { 3388 spin_unlock(&pers_lock); 3389 printk(KERN_WARNING "md: personality %s not loaded\n", clevel); 3390 return -EINVAL; 3391 } 3392 spin_unlock(&pers_lock); 3393 3394 if (pers == mddev->pers) { 3395 /* Nothing to do! */ 3396 module_put(pers->owner); 3397 return rv; 3398 } 3399 if (!pers->takeover) { 3400 module_put(pers->owner); 3401 printk(KERN_WARNING "md: %s: %s does not support personality takeover\n", 3402 mdname(mddev), clevel); 3403 return -EINVAL; 3404 } 3405 3406 list_for_each_entry(rdev, &mddev->disks, same_set) 3407 rdev->new_raid_disk = rdev->raid_disk; 3408 3409 /* ->takeover must set new_* and/or delta_disks 3410 * if it succeeds, and may set them when it fails. 3411 */ 3412 priv = pers->takeover(mddev); 3413 if (IS_ERR(priv)) { 3414 mddev->new_level = mddev->level; 3415 mddev->new_layout = mddev->layout; 3416 mddev->new_chunk_sectors = mddev->chunk_sectors; 3417 mddev->raid_disks -= mddev->delta_disks; 3418 mddev->delta_disks = 0; 3419 module_put(pers->owner); 3420 printk(KERN_WARNING "md: %s: %s would not accept array\n", 3421 mdname(mddev), clevel); 3422 return PTR_ERR(priv); 3423 } 3424 3425 /* Looks like we have a winner */ 3426 mddev_suspend(mddev); 3427 mddev->pers->stop(mddev); 3428 3429 if (mddev->pers->sync_request == NULL && 3430 pers->sync_request != NULL) { 3431 /* need to add the md_redundancy_group */ 3432 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group)) 3433 printk(KERN_WARNING 3434 "md: cannot register extra attributes for %s\n", 3435 mdname(mddev)); 3436 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, NULL, "sync_action"); 3437 } 3438 if (mddev->pers->sync_request != NULL && 3439 pers->sync_request == NULL) { 3440 /* need to remove the md_redundancy_group */ 3441 if (mddev->to_remove == NULL) 3442 mddev->to_remove = &md_redundancy_group; 3443 } 3444 3445 if (mddev->pers->sync_request == NULL && 3446 mddev->external) { 3447 /* We are converting from a no-redundancy array 3448 * to a redundancy array and metadata is managed 3449 * externally so we need to be sure that writes 3450 * won't block due to a need to transition 3451 * clean->dirty 3452 * until external management is started. 3453 */ 3454 mddev->in_sync = 0; 3455 mddev->safemode_delay = 0; 3456 mddev->safemode = 0; 3457 } 3458 3459 list_for_each_entry(rdev, &mddev->disks, same_set) { 3460 if (rdev->raid_disk < 0) 3461 continue; 3462 if (rdev->new_raid_disk >= mddev->raid_disks) 3463 rdev->new_raid_disk = -1; 3464 if (rdev->new_raid_disk == rdev->raid_disk) 3465 continue; 3466 sysfs_unlink_rdev(mddev, rdev); 3467 } 3468 list_for_each_entry(rdev, &mddev->disks, same_set) { 3469 if (rdev->raid_disk < 0) 3470 continue; 3471 if (rdev->new_raid_disk == rdev->raid_disk) 3472 continue; 3473 rdev->raid_disk = rdev->new_raid_disk; 3474 if (rdev->raid_disk < 0) 3475 clear_bit(In_sync, &rdev->flags); 3476 else { 3477 if (sysfs_link_rdev(mddev, rdev)) 3478 printk(KERN_WARNING "md: cannot register rd%d" 3479 " for %s after level change\n", 3480 rdev->raid_disk, mdname(mddev)); 3481 } 3482 } 3483 3484 module_put(mddev->pers->owner); 3485 mddev->pers = pers; 3486 mddev->private = priv; 3487 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); 3488 mddev->level = mddev->new_level; 3489 mddev->layout = mddev->new_layout; 3490 mddev->chunk_sectors = mddev->new_chunk_sectors; 3491 mddev->delta_disks = 0; 3492 mddev->degraded = 0; 3493 if (mddev->pers->sync_request == NULL) { 3494 /* this is now an array without redundancy, so 3495 * it must always be in_sync 3496 */ 3497 mddev->in_sync = 1; 3498 del_timer_sync(&mddev->safemode_timer); 3499 } 3500 pers->run(mddev); 3501 mddev_resume(mddev); 3502 set_bit(MD_CHANGE_DEVS, &mddev->flags); 3503 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3504 md_wakeup_thread(mddev->thread); 3505 sysfs_notify(&mddev->kobj, NULL, "level"); 3506 md_new_event(mddev); 3507 return rv; 3508 } 3509 3510 static struct md_sysfs_entry md_level = 3511 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store); 3512 3513 3514 static ssize_t 3515 layout_show(struct mddev *mddev, char *page) 3516 { 3517 /* just a number, not meaningful for all levels */ 3518 if (mddev->reshape_position != MaxSector && 3519 mddev->layout != mddev->new_layout) 3520 return sprintf(page, "%d (%d)\n", 3521 mddev->new_layout, mddev->layout); 3522 return sprintf(page, "%d\n", mddev->layout); 3523 } 3524 3525 static ssize_t 3526 layout_store(struct mddev *mddev, const char *buf, size_t len) 3527 { 3528 char *e; 3529 unsigned long n = simple_strtoul(buf, &e, 10); 3530 3531 if (!*buf || (*e && *e != '\n')) 3532 return -EINVAL; 3533 3534 if (mddev->pers) { 3535 int err; 3536 if (mddev->pers->check_reshape == NULL) 3537 return -EBUSY; 3538 mddev->new_layout = n; 3539 err = mddev->pers->check_reshape(mddev); 3540 if (err) { 3541 mddev->new_layout = mddev->layout; 3542 return err; 3543 } 3544 } else { 3545 mddev->new_layout = n; 3546 if (mddev->reshape_position == MaxSector) 3547 mddev->layout = n; 3548 } 3549 return len; 3550 } 3551 static struct md_sysfs_entry md_layout = 3552 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store); 3553 3554 3555 static ssize_t 3556 raid_disks_show(struct mddev *mddev, char *page) 3557 { 3558 if (mddev->raid_disks == 0) 3559 return 0; 3560 if (mddev->reshape_position != MaxSector && 3561 mddev->delta_disks != 0) 3562 return sprintf(page, "%d (%d)\n", mddev->raid_disks, 3563 mddev->raid_disks - mddev->delta_disks); 3564 return sprintf(page, "%d\n", mddev->raid_disks); 3565 } 3566 3567 static int update_raid_disks(struct mddev *mddev, int raid_disks); 3568 3569 static ssize_t 3570 raid_disks_store(struct mddev *mddev, const char *buf, size_t len) 3571 { 3572 char *e; 3573 int rv = 0; 3574 unsigned long n = simple_strtoul(buf, &e, 10); 3575 3576 if (!*buf || (*e && *e != '\n')) 3577 return -EINVAL; 3578 3579 if (mddev->pers) 3580 rv = update_raid_disks(mddev, n); 3581 else if (mddev->reshape_position != MaxSector) { 3582 int olddisks = mddev->raid_disks - mddev->delta_disks; 3583 mddev->delta_disks = n - olddisks; 3584 mddev->raid_disks = n; 3585 } else 3586 mddev->raid_disks = n; 3587 return rv ? rv : len; 3588 } 3589 static struct md_sysfs_entry md_raid_disks = 3590 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store); 3591 3592 static ssize_t 3593 chunk_size_show(struct mddev *mddev, char *page) 3594 { 3595 if (mddev->reshape_position != MaxSector && 3596 mddev->chunk_sectors != mddev->new_chunk_sectors) 3597 return sprintf(page, "%d (%d)\n", 3598 mddev->new_chunk_sectors << 9, 3599 mddev->chunk_sectors << 9); 3600 return sprintf(page, "%d\n", mddev->chunk_sectors << 9); 3601 } 3602 3603 static ssize_t 3604 chunk_size_store(struct mddev *mddev, const char *buf, size_t len) 3605 { 3606 char *e; 3607 unsigned long n = simple_strtoul(buf, &e, 10); 3608 3609 if (!*buf || (*e && *e != '\n')) 3610 return -EINVAL; 3611 3612 if (mddev->pers) { 3613 int err; 3614 if (mddev->pers->check_reshape == NULL) 3615 return -EBUSY; 3616 mddev->new_chunk_sectors = n >> 9; 3617 err = mddev->pers->check_reshape(mddev); 3618 if (err) { 3619 mddev->new_chunk_sectors = mddev->chunk_sectors; 3620 return err; 3621 } 3622 } else { 3623 mddev->new_chunk_sectors = n >> 9; 3624 if (mddev->reshape_position == MaxSector) 3625 mddev->chunk_sectors = n >> 9; 3626 } 3627 return len; 3628 } 3629 static struct md_sysfs_entry md_chunk_size = 3630 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store); 3631 3632 static ssize_t 3633 resync_start_show(struct mddev *mddev, char *page) 3634 { 3635 if (mddev->recovery_cp == MaxSector) 3636 return sprintf(page, "none\n"); 3637 return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp); 3638 } 3639 3640 static ssize_t 3641 resync_start_store(struct mddev *mddev, const char *buf, size_t len) 3642 { 3643 char *e; 3644 unsigned long long n = simple_strtoull(buf, &e, 10); 3645 3646 if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) 3647 return -EBUSY; 3648 if (cmd_match(buf, "none")) 3649 n = MaxSector; 3650 else if (!*buf || (*e && *e != '\n')) 3651 return -EINVAL; 3652 3653 mddev->recovery_cp = n; 3654 return len; 3655 } 3656 static struct md_sysfs_entry md_resync_start = 3657 __ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store); 3658 3659 /* 3660 * The array state can be: 3661 * 3662 * clear 3663 * No devices, no size, no level 3664 * Equivalent to STOP_ARRAY ioctl 3665 * inactive 3666 * May have some settings, but array is not active 3667 * all IO results in error 3668 * When written, doesn't tear down array, but just stops it 3669 * suspended (not supported yet) 3670 * All IO requests will block. The array can be reconfigured. 3671 * Writing this, if accepted, will block until array is quiescent 3672 * readonly 3673 * no resync can happen. no superblocks get written. 3674 * write requests fail 3675 * read-auto 3676 * like readonly, but behaves like 'clean' on a write request. 3677 * 3678 * clean - no pending writes, but otherwise active. 3679 * When written to inactive array, starts without resync 3680 * If a write request arrives then 3681 * if metadata is known, mark 'dirty' and switch to 'active'. 3682 * if not known, block and switch to write-pending 3683 * If written to an active array that has pending writes, then fails. 3684 * active 3685 * fully active: IO and resync can be happening. 3686 * When written to inactive array, starts with resync 3687 * 3688 * write-pending 3689 * clean, but writes are blocked waiting for 'active' to be written. 3690 * 3691 * active-idle 3692 * like active, but no writes have been seen for a while (100msec). 3693 * 3694 */ 3695 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active, 3696 write_pending, active_idle, bad_word}; 3697 static char *array_states[] = { 3698 "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active", 3699 "write-pending", "active-idle", NULL }; 3700 3701 static int match_word(const char *word, char **list) 3702 { 3703 int n; 3704 for (n=0; list[n]; n++) 3705 if (cmd_match(word, list[n])) 3706 break; 3707 return n; 3708 } 3709 3710 static ssize_t 3711 array_state_show(struct mddev *mddev, char *page) 3712 { 3713 enum array_state st = inactive; 3714 3715 if (mddev->pers) 3716 switch(mddev->ro) { 3717 case 1: 3718 st = readonly; 3719 break; 3720 case 2: 3721 st = read_auto; 3722 break; 3723 case 0: 3724 if (mddev->in_sync) 3725 st = clean; 3726 else if (test_bit(MD_CHANGE_PENDING, &mddev->flags)) 3727 st = write_pending; 3728 else if (mddev->safemode) 3729 st = active_idle; 3730 else 3731 st = active; 3732 } 3733 else { 3734 if (list_empty(&mddev->disks) && 3735 mddev->raid_disks == 0 && 3736 mddev->dev_sectors == 0) 3737 st = clear; 3738 else 3739 st = inactive; 3740 } 3741 return sprintf(page, "%s\n", array_states[st]); 3742 } 3743 3744 static int do_md_stop(struct mddev * mddev, int ro, int is_open); 3745 static int md_set_readonly(struct mddev * mddev, int is_open); 3746 static int do_md_run(struct mddev * mddev); 3747 static int restart_array(struct mddev *mddev); 3748 3749 static ssize_t 3750 array_state_store(struct mddev *mddev, const char *buf, size_t len) 3751 { 3752 int err = -EINVAL; 3753 enum array_state st = match_word(buf, array_states); 3754 switch(st) { 3755 case bad_word: 3756 break; 3757 case clear: 3758 /* stopping an active array */ 3759 if (atomic_read(&mddev->openers) > 0) 3760 return -EBUSY; 3761 err = do_md_stop(mddev, 0, 0); 3762 break; 3763 case inactive: 3764 /* stopping an active array */ 3765 if (mddev->pers) { 3766 if (atomic_read(&mddev->openers) > 0) 3767 return -EBUSY; 3768 err = do_md_stop(mddev, 2, 0); 3769 } else 3770 err = 0; /* already inactive */ 3771 break; 3772 case suspended: 3773 break; /* not supported yet */ 3774 case readonly: 3775 if (mddev->pers) 3776 err = md_set_readonly(mddev, 0); 3777 else { 3778 mddev->ro = 1; 3779 set_disk_ro(mddev->gendisk, 1); 3780 err = do_md_run(mddev); 3781 } 3782 break; 3783 case read_auto: 3784 if (mddev->pers) { 3785 if (mddev->ro == 0) 3786 err = md_set_readonly(mddev, 0); 3787 else if (mddev->ro == 1) 3788 err = restart_array(mddev); 3789 if (err == 0) { 3790 mddev->ro = 2; 3791 set_disk_ro(mddev->gendisk, 0); 3792 } 3793 } else { 3794 mddev->ro = 2; 3795 err = do_md_run(mddev); 3796 } 3797 break; 3798 case clean: 3799 if (mddev->pers) { 3800 restart_array(mddev); 3801 spin_lock_irq(&mddev->write_lock); 3802 if (atomic_read(&mddev->writes_pending) == 0) { 3803 if (mddev->in_sync == 0) { 3804 mddev->in_sync = 1; 3805 if (mddev->safemode == 1) 3806 mddev->safemode = 0; 3807 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 3808 } 3809 err = 0; 3810 } else 3811 err = -EBUSY; 3812 spin_unlock_irq(&mddev->write_lock); 3813 } else 3814 err = -EINVAL; 3815 break; 3816 case active: 3817 if (mddev->pers) { 3818 restart_array(mddev); 3819 clear_bit(MD_CHANGE_PENDING, &mddev->flags); 3820 wake_up(&mddev->sb_wait); 3821 err = 0; 3822 } else { 3823 mddev->ro = 0; 3824 set_disk_ro(mddev->gendisk, 0); 3825 err = do_md_run(mddev); 3826 } 3827 break; 3828 case write_pending: 3829 case active_idle: 3830 /* these cannot be set */ 3831 break; 3832 } 3833 if (err) 3834 return err; 3835 else { 3836 if (mddev->hold_active == UNTIL_IOCTL) 3837 mddev->hold_active = 0; 3838 sysfs_notify_dirent_safe(mddev->sysfs_state); 3839 return len; 3840 } 3841 } 3842 static struct md_sysfs_entry md_array_state = 3843 __ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store); 3844 3845 static ssize_t 3846 max_corrected_read_errors_show(struct mddev *mddev, char *page) { 3847 return sprintf(page, "%d\n", 3848 atomic_read(&mddev->max_corr_read_errors)); 3849 } 3850 3851 static ssize_t 3852 max_corrected_read_errors_store(struct mddev *mddev, const char *buf, size_t len) 3853 { 3854 char *e; 3855 unsigned long n = simple_strtoul(buf, &e, 10); 3856 3857 if (*buf && (*e == 0 || *e == '\n')) { 3858 atomic_set(&mddev->max_corr_read_errors, n); 3859 return len; 3860 } 3861 return -EINVAL; 3862 } 3863 3864 static struct md_sysfs_entry max_corr_read_errors = 3865 __ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show, 3866 max_corrected_read_errors_store); 3867 3868 static ssize_t 3869 null_show(struct mddev *mddev, char *page) 3870 { 3871 return -EINVAL; 3872 } 3873 3874 static ssize_t 3875 new_dev_store(struct mddev *mddev, const char *buf, size_t len) 3876 { 3877 /* buf must be %d:%d\n? giving major and minor numbers */ 3878 /* The new device is added to the array. 3879 * If the array has a persistent superblock, we read the 3880 * superblock to initialise info and check validity. 3881 * Otherwise, only checking done is that in bind_rdev_to_array, 3882 * which mainly checks size. 3883 */ 3884 char *e; 3885 int major = simple_strtoul(buf, &e, 10); 3886 int minor; 3887 dev_t dev; 3888 struct md_rdev *rdev; 3889 int err; 3890 3891 if (!*buf || *e != ':' || !e[1] || e[1] == '\n') 3892 return -EINVAL; 3893 minor = simple_strtoul(e+1, &e, 10); 3894 if (*e && *e != '\n') 3895 return -EINVAL; 3896 dev = MKDEV(major, minor); 3897 if (major != MAJOR(dev) || 3898 minor != MINOR(dev)) 3899 return -EOVERFLOW; 3900 3901 3902 if (mddev->persistent) { 3903 rdev = md_import_device(dev, mddev->major_version, 3904 mddev->minor_version); 3905 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) { 3906 struct md_rdev *rdev0 3907 = list_entry(mddev->disks.next, 3908 struct md_rdev, same_set); 3909 err = super_types[mddev->major_version] 3910 .load_super(rdev, rdev0, mddev->minor_version); 3911 if (err < 0) 3912 goto out; 3913 } 3914 } else if (mddev->external) 3915 rdev = md_import_device(dev, -2, -1); 3916 else 3917 rdev = md_import_device(dev, -1, -1); 3918 3919 if (IS_ERR(rdev)) 3920 return PTR_ERR(rdev); 3921 err = bind_rdev_to_array(rdev, mddev); 3922 out: 3923 if (err) 3924 export_rdev(rdev); 3925 return err ? err : len; 3926 } 3927 3928 static struct md_sysfs_entry md_new_device = 3929 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store); 3930 3931 static ssize_t 3932 bitmap_store(struct mddev *mddev, const char *buf, size_t len) 3933 { 3934 char *end; 3935 unsigned long chunk, end_chunk; 3936 3937 if (!mddev->bitmap) 3938 goto out; 3939 /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */ 3940 while (*buf) { 3941 chunk = end_chunk = simple_strtoul(buf, &end, 0); 3942 if (buf == end) break; 3943 if (*end == '-') { /* range */ 3944 buf = end + 1; 3945 end_chunk = simple_strtoul(buf, &end, 0); 3946 if (buf == end) break; 3947 } 3948 if (*end && !isspace(*end)) break; 3949 bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk); 3950 buf = skip_spaces(end); 3951 } 3952 bitmap_unplug(mddev->bitmap); /* flush the bits to disk */ 3953 out: 3954 return len; 3955 } 3956 3957 static struct md_sysfs_entry md_bitmap = 3958 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store); 3959 3960 static ssize_t 3961 size_show(struct mddev *mddev, char *page) 3962 { 3963 return sprintf(page, "%llu\n", 3964 (unsigned long long)mddev->dev_sectors / 2); 3965 } 3966 3967 static int update_size(struct mddev *mddev, sector_t num_sectors); 3968 3969 static ssize_t 3970 size_store(struct mddev *mddev, const char *buf, size_t len) 3971 { 3972 /* If array is inactive, we can reduce the component size, but 3973 * not increase it (except from 0). 3974 * If array is active, we can try an on-line resize 3975 */ 3976 sector_t sectors; 3977 int err = strict_blocks_to_sectors(buf, §ors); 3978 3979 if (err < 0) 3980 return err; 3981 if (mddev->pers) { 3982 err = update_size(mddev, sectors); 3983 md_update_sb(mddev, 1); 3984 } else { 3985 if (mddev->dev_sectors == 0 || 3986 mddev->dev_sectors > sectors) 3987 mddev->dev_sectors = sectors; 3988 else 3989 err = -ENOSPC; 3990 } 3991 return err ? err : len; 3992 } 3993 3994 static struct md_sysfs_entry md_size = 3995 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store); 3996 3997 3998 /* Metdata version. 3999 * This is one of 4000 * 'none' for arrays with no metadata (good luck...) 4001 * 'external' for arrays with externally managed metadata, 4002 * or N.M for internally known formats 4003 */ 4004 static ssize_t 4005 metadata_show(struct mddev *mddev, char *page) 4006 { 4007 if (mddev->persistent) 4008 return sprintf(page, "%d.%d\n", 4009 mddev->major_version, mddev->minor_version); 4010 else if (mddev->external) 4011 return sprintf(page, "external:%s\n", mddev->metadata_type); 4012 else 4013 return sprintf(page, "none\n"); 4014 } 4015 4016 static ssize_t 4017 metadata_store(struct mddev *mddev, const char *buf, size_t len) 4018 { 4019 int major, minor; 4020 char *e; 4021 /* Changing the details of 'external' metadata is 4022 * always permitted. Otherwise there must be 4023 * no devices attached to the array. 4024 */ 4025 if (mddev->external && strncmp(buf, "external:", 9) == 0) 4026 ; 4027 else if (!list_empty(&mddev->disks)) 4028 return -EBUSY; 4029 4030 if (cmd_match(buf, "none")) { 4031 mddev->persistent = 0; 4032 mddev->external = 0; 4033 mddev->major_version = 0; 4034 mddev->minor_version = 90; 4035 return len; 4036 } 4037 if (strncmp(buf, "external:", 9) == 0) { 4038 size_t namelen = len-9; 4039 if (namelen >= sizeof(mddev->metadata_type)) 4040 namelen = sizeof(mddev->metadata_type)-1; 4041 strncpy(mddev->metadata_type, buf+9, namelen); 4042 mddev->metadata_type[namelen] = 0; 4043 if (namelen && mddev->metadata_type[namelen-1] == '\n') 4044 mddev->metadata_type[--namelen] = 0; 4045 mddev->persistent = 0; 4046 mddev->external = 1; 4047 mddev->major_version = 0; 4048 mddev->minor_version = 90; 4049 return len; 4050 } 4051 major = simple_strtoul(buf, &e, 10); 4052 if (e==buf || *e != '.') 4053 return -EINVAL; 4054 buf = e+1; 4055 minor = simple_strtoul(buf, &e, 10); 4056 if (e==buf || (*e && *e != '\n') ) 4057 return -EINVAL; 4058 if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL) 4059 return -ENOENT; 4060 mddev->major_version = major; 4061 mddev->minor_version = minor; 4062 mddev->persistent = 1; 4063 mddev->external = 0; 4064 return len; 4065 } 4066 4067 static struct md_sysfs_entry md_metadata = 4068 __ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store); 4069 4070 static ssize_t 4071 action_show(struct mddev *mddev, char *page) 4072 { 4073 char *type = "idle"; 4074 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) 4075 type = "frozen"; 4076 else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 4077 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) { 4078 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 4079 type = "reshape"; 4080 else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 4081 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 4082 type = "resync"; 4083 else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) 4084 type = "check"; 4085 else 4086 type = "repair"; 4087 } else if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) 4088 type = "recover"; 4089 } 4090 return sprintf(page, "%s\n", type); 4091 } 4092 4093 static void reap_sync_thread(struct mddev *mddev); 4094 4095 static ssize_t 4096 action_store(struct mddev *mddev, const char *page, size_t len) 4097 { 4098 if (!mddev->pers || !mddev->pers->sync_request) 4099 return -EINVAL; 4100 4101 if (cmd_match(page, "frozen")) 4102 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4103 else 4104 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4105 4106 if (cmd_match(page, "idle") || cmd_match(page, "frozen")) { 4107 if (mddev->sync_thread) { 4108 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 4109 reap_sync_thread(mddev); 4110 } 4111 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 4112 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) 4113 return -EBUSY; 4114 else if (cmd_match(page, "resync")) 4115 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4116 else if (cmd_match(page, "recover")) { 4117 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 4118 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4119 } else if (cmd_match(page, "reshape")) { 4120 int err; 4121 if (mddev->pers->start_reshape == NULL) 4122 return -EINVAL; 4123 err = mddev->pers->start_reshape(mddev); 4124 if (err) 4125 return err; 4126 sysfs_notify(&mddev->kobj, NULL, "degraded"); 4127 } else { 4128 if (cmd_match(page, "check")) 4129 set_bit(MD_RECOVERY_CHECK, &mddev->recovery); 4130 else if (!cmd_match(page, "repair")) 4131 return -EINVAL; 4132 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 4133 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 4134 } 4135 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4136 md_wakeup_thread(mddev->thread); 4137 sysfs_notify_dirent_safe(mddev->sysfs_action); 4138 return len; 4139 } 4140 4141 static ssize_t 4142 mismatch_cnt_show(struct mddev *mddev, char *page) 4143 { 4144 return sprintf(page, "%llu\n", 4145 (unsigned long long) mddev->resync_mismatches); 4146 } 4147 4148 static struct md_sysfs_entry md_scan_mode = 4149 __ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store); 4150 4151 4152 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt); 4153 4154 static ssize_t 4155 sync_min_show(struct mddev *mddev, char *page) 4156 { 4157 return sprintf(page, "%d (%s)\n", speed_min(mddev), 4158 mddev->sync_speed_min ? "local": "system"); 4159 } 4160 4161 static ssize_t 4162 sync_min_store(struct mddev *mddev, const char *buf, size_t len) 4163 { 4164 int min; 4165 char *e; 4166 if (strncmp(buf, "system", 6)==0) { 4167 mddev->sync_speed_min = 0; 4168 return len; 4169 } 4170 min = simple_strtoul(buf, &e, 10); 4171 if (buf == e || (*e && *e != '\n') || min <= 0) 4172 return -EINVAL; 4173 mddev->sync_speed_min = min; 4174 return len; 4175 } 4176 4177 static struct md_sysfs_entry md_sync_min = 4178 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store); 4179 4180 static ssize_t 4181 sync_max_show(struct mddev *mddev, char *page) 4182 { 4183 return sprintf(page, "%d (%s)\n", speed_max(mddev), 4184 mddev->sync_speed_max ? "local": "system"); 4185 } 4186 4187 static ssize_t 4188 sync_max_store(struct mddev *mddev, const char *buf, size_t len) 4189 { 4190 int max; 4191 char *e; 4192 if (strncmp(buf, "system", 6)==0) { 4193 mddev->sync_speed_max = 0; 4194 return len; 4195 } 4196 max = simple_strtoul(buf, &e, 10); 4197 if (buf == e || (*e && *e != '\n') || max <= 0) 4198 return -EINVAL; 4199 mddev->sync_speed_max = max; 4200 return len; 4201 } 4202 4203 static struct md_sysfs_entry md_sync_max = 4204 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store); 4205 4206 static ssize_t 4207 degraded_show(struct mddev *mddev, char *page) 4208 { 4209 return sprintf(page, "%d\n", mddev->degraded); 4210 } 4211 static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded); 4212 4213 static ssize_t 4214 sync_force_parallel_show(struct mddev *mddev, char *page) 4215 { 4216 return sprintf(page, "%d\n", mddev->parallel_resync); 4217 } 4218 4219 static ssize_t 4220 sync_force_parallel_store(struct mddev *mddev, const char *buf, size_t len) 4221 { 4222 long n; 4223 4224 if (strict_strtol(buf, 10, &n)) 4225 return -EINVAL; 4226 4227 if (n != 0 && n != 1) 4228 return -EINVAL; 4229 4230 mddev->parallel_resync = n; 4231 4232 if (mddev->sync_thread) 4233 wake_up(&resync_wait); 4234 4235 return len; 4236 } 4237 4238 /* force parallel resync, even with shared block devices */ 4239 static struct md_sysfs_entry md_sync_force_parallel = 4240 __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR, 4241 sync_force_parallel_show, sync_force_parallel_store); 4242 4243 static ssize_t 4244 sync_speed_show(struct mddev *mddev, char *page) 4245 { 4246 unsigned long resync, dt, db; 4247 if (mddev->curr_resync == 0) 4248 return sprintf(page, "none\n"); 4249 resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active); 4250 dt = (jiffies - mddev->resync_mark) / HZ; 4251 if (!dt) dt++; 4252 db = resync - mddev->resync_mark_cnt; 4253 return sprintf(page, "%lu\n", db/dt/2); /* K/sec */ 4254 } 4255 4256 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed); 4257 4258 static ssize_t 4259 sync_completed_show(struct mddev *mddev, char *page) 4260 { 4261 unsigned long long max_sectors, resync; 4262 4263 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4264 return sprintf(page, "none\n"); 4265 4266 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 4267 max_sectors = mddev->resync_max_sectors; 4268 else 4269 max_sectors = mddev->dev_sectors; 4270 4271 resync = mddev->curr_resync_completed; 4272 return sprintf(page, "%llu / %llu\n", resync, max_sectors); 4273 } 4274 4275 static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed); 4276 4277 static ssize_t 4278 min_sync_show(struct mddev *mddev, char *page) 4279 { 4280 return sprintf(page, "%llu\n", 4281 (unsigned long long)mddev->resync_min); 4282 } 4283 static ssize_t 4284 min_sync_store(struct mddev *mddev, const char *buf, size_t len) 4285 { 4286 unsigned long long min; 4287 if (strict_strtoull(buf, 10, &min)) 4288 return -EINVAL; 4289 if (min > mddev->resync_max) 4290 return -EINVAL; 4291 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4292 return -EBUSY; 4293 4294 /* Must be a multiple of chunk_size */ 4295 if (mddev->chunk_sectors) { 4296 sector_t temp = min; 4297 if (sector_div(temp, mddev->chunk_sectors)) 4298 return -EINVAL; 4299 } 4300 mddev->resync_min = min; 4301 4302 return len; 4303 } 4304 4305 static struct md_sysfs_entry md_min_sync = 4306 __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store); 4307 4308 static ssize_t 4309 max_sync_show(struct mddev *mddev, char *page) 4310 { 4311 if (mddev->resync_max == MaxSector) 4312 return sprintf(page, "max\n"); 4313 else 4314 return sprintf(page, "%llu\n", 4315 (unsigned long long)mddev->resync_max); 4316 } 4317 static ssize_t 4318 max_sync_store(struct mddev *mddev, const char *buf, size_t len) 4319 { 4320 if (strncmp(buf, "max", 3) == 0) 4321 mddev->resync_max = MaxSector; 4322 else { 4323 unsigned long long max; 4324 if (strict_strtoull(buf, 10, &max)) 4325 return -EINVAL; 4326 if (max < mddev->resync_min) 4327 return -EINVAL; 4328 if (max < mddev->resync_max && 4329 mddev->ro == 0 && 4330 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4331 return -EBUSY; 4332 4333 /* Must be a multiple of chunk_size */ 4334 if (mddev->chunk_sectors) { 4335 sector_t temp = max; 4336 if (sector_div(temp, mddev->chunk_sectors)) 4337 return -EINVAL; 4338 } 4339 mddev->resync_max = max; 4340 } 4341 wake_up(&mddev->recovery_wait); 4342 return len; 4343 } 4344 4345 static struct md_sysfs_entry md_max_sync = 4346 __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store); 4347 4348 static ssize_t 4349 suspend_lo_show(struct mddev *mddev, char *page) 4350 { 4351 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo); 4352 } 4353 4354 static ssize_t 4355 suspend_lo_store(struct mddev *mddev, const char *buf, size_t len) 4356 { 4357 char *e; 4358 unsigned long long new = simple_strtoull(buf, &e, 10); 4359 unsigned long long old = mddev->suspend_lo; 4360 4361 if (mddev->pers == NULL || 4362 mddev->pers->quiesce == NULL) 4363 return -EINVAL; 4364 if (buf == e || (*e && *e != '\n')) 4365 return -EINVAL; 4366 4367 mddev->suspend_lo = new; 4368 if (new >= old) 4369 /* Shrinking suspended region */ 4370 mddev->pers->quiesce(mddev, 2); 4371 else { 4372 /* Expanding suspended region - need to wait */ 4373 mddev->pers->quiesce(mddev, 1); 4374 mddev->pers->quiesce(mddev, 0); 4375 } 4376 return len; 4377 } 4378 static struct md_sysfs_entry md_suspend_lo = 4379 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store); 4380 4381 4382 static ssize_t 4383 suspend_hi_show(struct mddev *mddev, char *page) 4384 { 4385 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi); 4386 } 4387 4388 static ssize_t 4389 suspend_hi_store(struct mddev *mddev, const char *buf, size_t len) 4390 { 4391 char *e; 4392 unsigned long long new = simple_strtoull(buf, &e, 10); 4393 unsigned long long old = mddev->suspend_hi; 4394 4395 if (mddev->pers == NULL || 4396 mddev->pers->quiesce == NULL) 4397 return -EINVAL; 4398 if (buf == e || (*e && *e != '\n')) 4399 return -EINVAL; 4400 4401 mddev->suspend_hi = new; 4402 if (new <= old) 4403 /* Shrinking suspended region */ 4404 mddev->pers->quiesce(mddev, 2); 4405 else { 4406 /* Expanding suspended region - need to wait */ 4407 mddev->pers->quiesce(mddev, 1); 4408 mddev->pers->quiesce(mddev, 0); 4409 } 4410 return len; 4411 } 4412 static struct md_sysfs_entry md_suspend_hi = 4413 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store); 4414 4415 static ssize_t 4416 reshape_position_show(struct mddev *mddev, char *page) 4417 { 4418 if (mddev->reshape_position != MaxSector) 4419 return sprintf(page, "%llu\n", 4420 (unsigned long long)mddev->reshape_position); 4421 strcpy(page, "none\n"); 4422 return 5; 4423 } 4424 4425 static ssize_t 4426 reshape_position_store(struct mddev *mddev, const char *buf, size_t len) 4427 { 4428 char *e; 4429 unsigned long long new = simple_strtoull(buf, &e, 10); 4430 if (mddev->pers) 4431 return -EBUSY; 4432 if (buf == e || (*e && *e != '\n')) 4433 return -EINVAL; 4434 mddev->reshape_position = new; 4435 mddev->delta_disks = 0; 4436 mddev->new_level = mddev->level; 4437 mddev->new_layout = mddev->layout; 4438 mddev->new_chunk_sectors = mddev->chunk_sectors; 4439 return len; 4440 } 4441 4442 static struct md_sysfs_entry md_reshape_position = 4443 __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show, 4444 reshape_position_store); 4445 4446 static ssize_t 4447 array_size_show(struct mddev *mddev, char *page) 4448 { 4449 if (mddev->external_size) 4450 return sprintf(page, "%llu\n", 4451 (unsigned long long)mddev->array_sectors/2); 4452 else 4453 return sprintf(page, "default\n"); 4454 } 4455 4456 static ssize_t 4457 array_size_store(struct mddev *mddev, const char *buf, size_t len) 4458 { 4459 sector_t sectors; 4460 4461 if (strncmp(buf, "default", 7) == 0) { 4462 if (mddev->pers) 4463 sectors = mddev->pers->size(mddev, 0, 0); 4464 else 4465 sectors = mddev->array_sectors; 4466 4467 mddev->external_size = 0; 4468 } else { 4469 if (strict_blocks_to_sectors(buf, §ors) < 0) 4470 return -EINVAL; 4471 if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors) 4472 return -E2BIG; 4473 4474 mddev->external_size = 1; 4475 } 4476 4477 mddev->array_sectors = sectors; 4478 if (mddev->pers) { 4479 set_capacity(mddev->gendisk, mddev->array_sectors); 4480 revalidate_disk(mddev->gendisk); 4481 } 4482 return len; 4483 } 4484 4485 static struct md_sysfs_entry md_array_size = 4486 __ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show, 4487 array_size_store); 4488 4489 static struct attribute *md_default_attrs[] = { 4490 &md_level.attr, 4491 &md_layout.attr, 4492 &md_raid_disks.attr, 4493 &md_chunk_size.attr, 4494 &md_size.attr, 4495 &md_resync_start.attr, 4496 &md_metadata.attr, 4497 &md_new_device.attr, 4498 &md_safe_delay.attr, 4499 &md_array_state.attr, 4500 &md_reshape_position.attr, 4501 &md_array_size.attr, 4502 &max_corr_read_errors.attr, 4503 NULL, 4504 }; 4505 4506 static struct attribute *md_redundancy_attrs[] = { 4507 &md_scan_mode.attr, 4508 &md_mismatches.attr, 4509 &md_sync_min.attr, 4510 &md_sync_max.attr, 4511 &md_sync_speed.attr, 4512 &md_sync_force_parallel.attr, 4513 &md_sync_completed.attr, 4514 &md_min_sync.attr, 4515 &md_max_sync.attr, 4516 &md_suspend_lo.attr, 4517 &md_suspend_hi.attr, 4518 &md_bitmap.attr, 4519 &md_degraded.attr, 4520 NULL, 4521 }; 4522 static struct attribute_group md_redundancy_group = { 4523 .name = NULL, 4524 .attrs = md_redundancy_attrs, 4525 }; 4526 4527 4528 static ssize_t 4529 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 4530 { 4531 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); 4532 struct mddev *mddev = container_of(kobj, struct mddev, kobj); 4533 ssize_t rv; 4534 4535 if (!entry->show) 4536 return -EIO; 4537 spin_lock(&all_mddevs_lock); 4538 if (list_empty(&mddev->all_mddevs)) { 4539 spin_unlock(&all_mddevs_lock); 4540 return -EBUSY; 4541 } 4542 mddev_get(mddev); 4543 spin_unlock(&all_mddevs_lock); 4544 4545 rv = mddev_lock(mddev); 4546 if (!rv) { 4547 rv = entry->show(mddev, page); 4548 mddev_unlock(mddev); 4549 } 4550 mddev_put(mddev); 4551 return rv; 4552 } 4553 4554 static ssize_t 4555 md_attr_store(struct kobject *kobj, struct attribute *attr, 4556 const char *page, size_t length) 4557 { 4558 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); 4559 struct mddev *mddev = container_of(kobj, struct mddev, kobj); 4560 ssize_t rv; 4561 4562 if (!entry->store) 4563 return -EIO; 4564 if (!capable(CAP_SYS_ADMIN)) 4565 return -EACCES; 4566 spin_lock(&all_mddevs_lock); 4567 if (list_empty(&mddev->all_mddevs)) { 4568 spin_unlock(&all_mddevs_lock); 4569 return -EBUSY; 4570 } 4571 mddev_get(mddev); 4572 spin_unlock(&all_mddevs_lock); 4573 rv = mddev_lock(mddev); 4574 if (!rv) { 4575 rv = entry->store(mddev, page, length); 4576 mddev_unlock(mddev); 4577 } 4578 mddev_put(mddev); 4579 return rv; 4580 } 4581 4582 static void md_free(struct kobject *ko) 4583 { 4584 struct mddev *mddev = container_of(ko, struct mddev, kobj); 4585 4586 if (mddev->sysfs_state) 4587 sysfs_put(mddev->sysfs_state); 4588 4589 if (mddev->gendisk) { 4590 del_gendisk(mddev->gendisk); 4591 put_disk(mddev->gendisk); 4592 } 4593 if (mddev->queue) 4594 blk_cleanup_queue(mddev->queue); 4595 4596 kfree(mddev); 4597 } 4598 4599 static const struct sysfs_ops md_sysfs_ops = { 4600 .show = md_attr_show, 4601 .store = md_attr_store, 4602 }; 4603 static struct kobj_type md_ktype = { 4604 .release = md_free, 4605 .sysfs_ops = &md_sysfs_ops, 4606 .default_attrs = md_default_attrs, 4607 }; 4608 4609 int mdp_major = 0; 4610 4611 static void mddev_delayed_delete(struct work_struct *ws) 4612 { 4613 struct mddev *mddev = container_of(ws, struct mddev, del_work); 4614 4615 sysfs_remove_group(&mddev->kobj, &md_bitmap_group); 4616 kobject_del(&mddev->kobj); 4617 kobject_put(&mddev->kobj); 4618 } 4619 4620 static int md_alloc(dev_t dev, char *name) 4621 { 4622 static DEFINE_MUTEX(disks_mutex); 4623 struct mddev *mddev = mddev_find(dev); 4624 struct gendisk *disk; 4625 int partitioned; 4626 int shift; 4627 int unit; 4628 int error; 4629 4630 if (!mddev) 4631 return -ENODEV; 4632 4633 partitioned = (MAJOR(mddev->unit) != MD_MAJOR); 4634 shift = partitioned ? MdpMinorShift : 0; 4635 unit = MINOR(mddev->unit) >> shift; 4636 4637 /* wait for any previous instance of this device to be 4638 * completely removed (mddev_delayed_delete). 4639 */ 4640 flush_workqueue(md_misc_wq); 4641 4642 mutex_lock(&disks_mutex); 4643 error = -EEXIST; 4644 if (mddev->gendisk) 4645 goto abort; 4646 4647 if (name) { 4648 /* Need to ensure that 'name' is not a duplicate. 4649 */ 4650 struct mddev *mddev2; 4651 spin_lock(&all_mddevs_lock); 4652 4653 list_for_each_entry(mddev2, &all_mddevs, all_mddevs) 4654 if (mddev2->gendisk && 4655 strcmp(mddev2->gendisk->disk_name, name) == 0) { 4656 spin_unlock(&all_mddevs_lock); 4657 goto abort; 4658 } 4659 spin_unlock(&all_mddevs_lock); 4660 } 4661 4662 error = -ENOMEM; 4663 mddev->queue = blk_alloc_queue(GFP_KERNEL); 4664 if (!mddev->queue) 4665 goto abort; 4666 mddev->queue->queuedata = mddev; 4667 4668 blk_queue_make_request(mddev->queue, md_make_request); 4669 4670 disk = alloc_disk(1 << shift); 4671 if (!disk) { 4672 blk_cleanup_queue(mddev->queue); 4673 mddev->queue = NULL; 4674 goto abort; 4675 } 4676 disk->major = MAJOR(mddev->unit); 4677 disk->first_minor = unit << shift; 4678 if (name) 4679 strcpy(disk->disk_name, name); 4680 else if (partitioned) 4681 sprintf(disk->disk_name, "md_d%d", unit); 4682 else 4683 sprintf(disk->disk_name, "md%d", unit); 4684 disk->fops = &md_fops; 4685 disk->private_data = mddev; 4686 disk->queue = mddev->queue; 4687 blk_queue_flush(mddev->queue, REQ_FLUSH | REQ_FUA); 4688 /* Allow extended partitions. This makes the 4689 * 'mdp' device redundant, but we can't really 4690 * remove it now. 4691 */ 4692 disk->flags |= GENHD_FL_EXT_DEVT; 4693 mddev->gendisk = disk; 4694 /* As soon as we call add_disk(), another thread could get 4695 * through to md_open, so make sure it doesn't get too far 4696 */ 4697 mutex_lock(&mddev->open_mutex); 4698 add_disk(disk); 4699 4700 error = kobject_init_and_add(&mddev->kobj, &md_ktype, 4701 &disk_to_dev(disk)->kobj, "%s", "md"); 4702 if (error) { 4703 /* This isn't possible, but as kobject_init_and_add is marked 4704 * __must_check, we must do something with the result 4705 */ 4706 printk(KERN_WARNING "md: cannot register %s/md - name in use\n", 4707 disk->disk_name); 4708 error = 0; 4709 } 4710 if (mddev->kobj.sd && 4711 sysfs_create_group(&mddev->kobj, &md_bitmap_group)) 4712 printk(KERN_DEBUG "pointless warning\n"); 4713 mutex_unlock(&mddev->open_mutex); 4714 abort: 4715 mutex_unlock(&disks_mutex); 4716 if (!error && mddev->kobj.sd) { 4717 kobject_uevent(&mddev->kobj, KOBJ_ADD); 4718 mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state"); 4719 } 4720 mddev_put(mddev); 4721 return error; 4722 } 4723 4724 static struct kobject *md_probe(dev_t dev, int *part, void *data) 4725 { 4726 md_alloc(dev, NULL); 4727 return NULL; 4728 } 4729 4730 static int add_named_array(const char *val, struct kernel_param *kp) 4731 { 4732 /* val must be "md_*" where * is not all digits. 4733 * We allocate an array with a large free minor number, and 4734 * set the name to val. val must not already be an active name. 4735 */ 4736 int len = strlen(val); 4737 char buf[DISK_NAME_LEN]; 4738 4739 while (len && val[len-1] == '\n') 4740 len--; 4741 if (len >= DISK_NAME_LEN) 4742 return -E2BIG; 4743 strlcpy(buf, val, len+1); 4744 if (strncmp(buf, "md_", 3) != 0) 4745 return -EINVAL; 4746 return md_alloc(0, buf); 4747 } 4748 4749 static void md_safemode_timeout(unsigned long data) 4750 { 4751 struct mddev *mddev = (struct mddev *) data; 4752 4753 if (!atomic_read(&mddev->writes_pending)) { 4754 mddev->safemode = 1; 4755 if (mddev->external) 4756 sysfs_notify_dirent_safe(mddev->sysfs_state); 4757 } 4758 md_wakeup_thread(mddev->thread); 4759 } 4760 4761 static int start_dirty_degraded; 4762 4763 int md_run(struct mddev *mddev) 4764 { 4765 int err; 4766 struct md_rdev *rdev; 4767 struct md_personality *pers; 4768 4769 if (list_empty(&mddev->disks)) 4770 /* cannot run an array with no devices.. */ 4771 return -EINVAL; 4772 4773 if (mddev->pers) 4774 return -EBUSY; 4775 /* Cannot run until previous stop completes properly */ 4776 if (mddev->sysfs_active) 4777 return -EBUSY; 4778 4779 /* 4780 * Analyze all RAID superblock(s) 4781 */ 4782 if (!mddev->raid_disks) { 4783 if (!mddev->persistent) 4784 return -EINVAL; 4785 analyze_sbs(mddev); 4786 } 4787 4788 if (mddev->level != LEVEL_NONE) 4789 request_module("md-level-%d", mddev->level); 4790 else if (mddev->clevel[0]) 4791 request_module("md-%s", mddev->clevel); 4792 4793 /* 4794 * Drop all container device buffers, from now on 4795 * the only valid external interface is through the md 4796 * device. 4797 */ 4798 list_for_each_entry(rdev, &mddev->disks, same_set) { 4799 if (test_bit(Faulty, &rdev->flags)) 4800 continue; 4801 sync_blockdev(rdev->bdev); 4802 invalidate_bdev(rdev->bdev); 4803 4804 /* perform some consistency tests on the device. 4805 * We don't want the data to overlap the metadata, 4806 * Internal Bitmap issues have been handled elsewhere. 4807 */ 4808 if (rdev->meta_bdev) { 4809 /* Nothing to check */; 4810 } else if (rdev->data_offset < rdev->sb_start) { 4811 if (mddev->dev_sectors && 4812 rdev->data_offset + mddev->dev_sectors 4813 > rdev->sb_start) { 4814 printk("md: %s: data overlaps metadata\n", 4815 mdname(mddev)); 4816 return -EINVAL; 4817 } 4818 } else { 4819 if (rdev->sb_start + rdev->sb_size/512 4820 > rdev->data_offset) { 4821 printk("md: %s: metadata overlaps data\n", 4822 mdname(mddev)); 4823 return -EINVAL; 4824 } 4825 } 4826 sysfs_notify_dirent_safe(rdev->sysfs_state); 4827 } 4828 4829 if (mddev->bio_set == NULL) 4830 mddev->bio_set = bioset_create(BIO_POOL_SIZE, 4831 sizeof(struct mddev *)); 4832 4833 spin_lock(&pers_lock); 4834 pers = find_pers(mddev->level, mddev->clevel); 4835 if (!pers || !try_module_get(pers->owner)) { 4836 spin_unlock(&pers_lock); 4837 if (mddev->level != LEVEL_NONE) 4838 printk(KERN_WARNING "md: personality for level %d is not loaded!\n", 4839 mddev->level); 4840 else 4841 printk(KERN_WARNING "md: personality for level %s is not loaded!\n", 4842 mddev->clevel); 4843 return -EINVAL; 4844 } 4845 mddev->pers = pers; 4846 spin_unlock(&pers_lock); 4847 if (mddev->level != pers->level) { 4848 mddev->level = pers->level; 4849 mddev->new_level = pers->level; 4850 } 4851 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); 4852 4853 if (mddev->reshape_position != MaxSector && 4854 pers->start_reshape == NULL) { 4855 /* This personality cannot handle reshaping... */ 4856 mddev->pers = NULL; 4857 module_put(pers->owner); 4858 return -EINVAL; 4859 } 4860 4861 if (pers->sync_request) { 4862 /* Warn if this is a potentially silly 4863 * configuration. 4864 */ 4865 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 4866 struct md_rdev *rdev2; 4867 int warned = 0; 4868 4869 list_for_each_entry(rdev, &mddev->disks, same_set) 4870 list_for_each_entry(rdev2, &mddev->disks, same_set) { 4871 if (rdev < rdev2 && 4872 rdev->bdev->bd_contains == 4873 rdev2->bdev->bd_contains) { 4874 printk(KERN_WARNING 4875 "%s: WARNING: %s appears to be" 4876 " on the same physical disk as" 4877 " %s.\n", 4878 mdname(mddev), 4879 bdevname(rdev->bdev,b), 4880 bdevname(rdev2->bdev,b2)); 4881 warned = 1; 4882 } 4883 } 4884 4885 if (warned) 4886 printk(KERN_WARNING 4887 "True protection against single-disk" 4888 " failure might be compromised.\n"); 4889 } 4890 4891 mddev->recovery = 0; 4892 /* may be over-ridden by personality */ 4893 mddev->resync_max_sectors = mddev->dev_sectors; 4894 4895 mddev->ok_start_degraded = start_dirty_degraded; 4896 4897 if (start_readonly && mddev->ro == 0) 4898 mddev->ro = 2; /* read-only, but switch on first write */ 4899 4900 err = mddev->pers->run(mddev); 4901 if (err) 4902 printk(KERN_ERR "md: pers->run() failed ...\n"); 4903 else if (mddev->pers->size(mddev, 0, 0) < mddev->array_sectors) { 4904 WARN_ONCE(!mddev->external_size, "%s: default size too small," 4905 " but 'external_size' not in effect?\n", __func__); 4906 printk(KERN_ERR 4907 "md: invalid array_size %llu > default size %llu\n", 4908 (unsigned long long)mddev->array_sectors / 2, 4909 (unsigned long long)mddev->pers->size(mddev, 0, 0) / 2); 4910 err = -EINVAL; 4911 mddev->pers->stop(mddev); 4912 } 4913 if (err == 0 && mddev->pers->sync_request) { 4914 err = bitmap_create(mddev); 4915 if (err) { 4916 printk(KERN_ERR "%s: failed to create bitmap (%d)\n", 4917 mdname(mddev), err); 4918 mddev->pers->stop(mddev); 4919 } 4920 } 4921 if (err) { 4922 module_put(mddev->pers->owner); 4923 mddev->pers = NULL; 4924 bitmap_destroy(mddev); 4925 return err; 4926 } 4927 if (mddev->pers->sync_request) { 4928 if (mddev->kobj.sd && 4929 sysfs_create_group(&mddev->kobj, &md_redundancy_group)) 4930 printk(KERN_WARNING 4931 "md: cannot register extra attributes for %s\n", 4932 mdname(mddev)); 4933 mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action"); 4934 } else if (mddev->ro == 2) /* auto-readonly not meaningful */ 4935 mddev->ro = 0; 4936 4937 atomic_set(&mddev->writes_pending,0); 4938 atomic_set(&mddev->max_corr_read_errors, 4939 MD_DEFAULT_MAX_CORRECTED_READ_ERRORS); 4940 mddev->safemode = 0; 4941 mddev->safemode_timer.function = md_safemode_timeout; 4942 mddev->safemode_timer.data = (unsigned long) mddev; 4943 mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */ 4944 mddev->in_sync = 1; 4945 smp_wmb(); 4946 mddev->ready = 1; 4947 list_for_each_entry(rdev, &mddev->disks, same_set) 4948 if (rdev->raid_disk >= 0) 4949 if (sysfs_link_rdev(mddev, rdev)) 4950 /* failure here is OK */; 4951 4952 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4953 4954 if (mddev->flags) 4955 md_update_sb(mddev, 0); 4956 4957 md_new_event(mddev); 4958 sysfs_notify_dirent_safe(mddev->sysfs_state); 4959 sysfs_notify_dirent_safe(mddev->sysfs_action); 4960 sysfs_notify(&mddev->kobj, NULL, "degraded"); 4961 return 0; 4962 } 4963 EXPORT_SYMBOL_GPL(md_run); 4964 4965 static int do_md_run(struct mddev *mddev) 4966 { 4967 int err; 4968 4969 err = md_run(mddev); 4970 if (err) 4971 goto out; 4972 err = bitmap_load(mddev); 4973 if (err) { 4974 bitmap_destroy(mddev); 4975 goto out; 4976 } 4977 4978 md_wakeup_thread(mddev->thread); 4979 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ 4980 4981 set_capacity(mddev->gendisk, mddev->array_sectors); 4982 revalidate_disk(mddev->gendisk); 4983 mddev->changed = 1; 4984 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); 4985 out: 4986 return err; 4987 } 4988 4989 static int restart_array(struct mddev *mddev) 4990 { 4991 struct gendisk *disk = mddev->gendisk; 4992 4993 /* Complain if it has no devices */ 4994 if (list_empty(&mddev->disks)) 4995 return -ENXIO; 4996 if (!mddev->pers) 4997 return -EINVAL; 4998 if (!mddev->ro) 4999 return -EBUSY; 5000 mddev->safemode = 0; 5001 mddev->ro = 0; 5002 set_disk_ro(disk, 0); 5003 printk(KERN_INFO "md: %s switched to read-write mode.\n", 5004 mdname(mddev)); 5005 /* Kick recovery or resync if necessary */ 5006 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5007 md_wakeup_thread(mddev->thread); 5008 md_wakeup_thread(mddev->sync_thread); 5009 sysfs_notify_dirent_safe(mddev->sysfs_state); 5010 return 0; 5011 } 5012 5013 /* similar to deny_write_access, but accounts for our holding a reference 5014 * to the file ourselves */ 5015 static int deny_bitmap_write_access(struct file * file) 5016 { 5017 struct inode *inode = file->f_mapping->host; 5018 5019 spin_lock(&inode->i_lock); 5020 if (atomic_read(&inode->i_writecount) > 1) { 5021 spin_unlock(&inode->i_lock); 5022 return -ETXTBSY; 5023 } 5024 atomic_set(&inode->i_writecount, -1); 5025 spin_unlock(&inode->i_lock); 5026 5027 return 0; 5028 } 5029 5030 void restore_bitmap_write_access(struct file *file) 5031 { 5032 struct inode *inode = file->f_mapping->host; 5033 5034 spin_lock(&inode->i_lock); 5035 atomic_set(&inode->i_writecount, 1); 5036 spin_unlock(&inode->i_lock); 5037 } 5038 5039 static void md_clean(struct mddev *mddev) 5040 { 5041 mddev->array_sectors = 0; 5042 mddev->external_size = 0; 5043 mddev->dev_sectors = 0; 5044 mddev->raid_disks = 0; 5045 mddev->recovery_cp = 0; 5046 mddev->resync_min = 0; 5047 mddev->resync_max = MaxSector; 5048 mddev->reshape_position = MaxSector; 5049 mddev->external = 0; 5050 mddev->persistent = 0; 5051 mddev->level = LEVEL_NONE; 5052 mddev->clevel[0] = 0; 5053 mddev->flags = 0; 5054 mddev->ro = 0; 5055 mddev->metadata_type[0] = 0; 5056 mddev->chunk_sectors = 0; 5057 mddev->ctime = mddev->utime = 0; 5058 mddev->layout = 0; 5059 mddev->max_disks = 0; 5060 mddev->events = 0; 5061 mddev->can_decrease_events = 0; 5062 mddev->delta_disks = 0; 5063 mddev->new_level = LEVEL_NONE; 5064 mddev->new_layout = 0; 5065 mddev->new_chunk_sectors = 0; 5066 mddev->curr_resync = 0; 5067 mddev->resync_mismatches = 0; 5068 mddev->suspend_lo = mddev->suspend_hi = 0; 5069 mddev->sync_speed_min = mddev->sync_speed_max = 0; 5070 mddev->recovery = 0; 5071 mddev->in_sync = 0; 5072 mddev->changed = 0; 5073 mddev->degraded = 0; 5074 mddev->safemode = 0; 5075 mddev->bitmap_info.offset = 0; 5076 mddev->bitmap_info.default_offset = 0; 5077 mddev->bitmap_info.chunksize = 0; 5078 mddev->bitmap_info.daemon_sleep = 0; 5079 mddev->bitmap_info.max_write_behind = 0; 5080 } 5081 5082 static void __md_stop_writes(struct mddev *mddev) 5083 { 5084 if (mddev->sync_thread) { 5085 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5086 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 5087 reap_sync_thread(mddev); 5088 } 5089 5090 del_timer_sync(&mddev->safemode_timer); 5091 5092 bitmap_flush(mddev); 5093 md_super_wait(mddev); 5094 5095 if (!mddev->in_sync || mddev->flags) { 5096 /* mark array as shutdown cleanly */ 5097 mddev->in_sync = 1; 5098 md_update_sb(mddev, 1); 5099 } 5100 } 5101 5102 void md_stop_writes(struct mddev *mddev) 5103 { 5104 mddev_lock(mddev); 5105 __md_stop_writes(mddev); 5106 mddev_unlock(mddev); 5107 } 5108 EXPORT_SYMBOL_GPL(md_stop_writes); 5109 5110 void md_stop(struct mddev *mddev) 5111 { 5112 mddev->ready = 0; 5113 mddev->pers->stop(mddev); 5114 if (mddev->pers->sync_request && mddev->to_remove == NULL) 5115 mddev->to_remove = &md_redundancy_group; 5116 module_put(mddev->pers->owner); 5117 mddev->pers = NULL; 5118 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5119 } 5120 EXPORT_SYMBOL_GPL(md_stop); 5121 5122 static int md_set_readonly(struct mddev *mddev, int is_open) 5123 { 5124 int err = 0; 5125 mutex_lock(&mddev->open_mutex); 5126 if (atomic_read(&mddev->openers) > is_open) { 5127 printk("md: %s still in use.\n",mdname(mddev)); 5128 err = -EBUSY; 5129 goto out; 5130 } 5131 if (mddev->pers) { 5132 __md_stop_writes(mddev); 5133 5134 err = -ENXIO; 5135 if (mddev->ro==1) 5136 goto out; 5137 mddev->ro = 1; 5138 set_disk_ro(mddev->gendisk, 1); 5139 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5140 sysfs_notify_dirent_safe(mddev->sysfs_state); 5141 err = 0; 5142 } 5143 out: 5144 mutex_unlock(&mddev->open_mutex); 5145 return err; 5146 } 5147 5148 /* mode: 5149 * 0 - completely stop and dis-assemble array 5150 * 2 - stop but do not disassemble array 5151 */ 5152 static int do_md_stop(struct mddev * mddev, int mode, int is_open) 5153 { 5154 struct gendisk *disk = mddev->gendisk; 5155 struct md_rdev *rdev; 5156 5157 mutex_lock(&mddev->open_mutex); 5158 if (atomic_read(&mddev->openers) > is_open || 5159 mddev->sysfs_active) { 5160 printk("md: %s still in use.\n",mdname(mddev)); 5161 mutex_unlock(&mddev->open_mutex); 5162 return -EBUSY; 5163 } 5164 5165 if (mddev->pers) { 5166 if (mddev->ro) 5167 set_disk_ro(disk, 0); 5168 5169 __md_stop_writes(mddev); 5170 md_stop(mddev); 5171 mddev->queue->merge_bvec_fn = NULL; 5172 mddev->queue->backing_dev_info.congested_fn = NULL; 5173 5174 /* tell userspace to handle 'inactive' */ 5175 sysfs_notify_dirent_safe(mddev->sysfs_state); 5176 5177 list_for_each_entry(rdev, &mddev->disks, same_set) 5178 if (rdev->raid_disk >= 0) 5179 sysfs_unlink_rdev(mddev, rdev); 5180 5181 set_capacity(disk, 0); 5182 mutex_unlock(&mddev->open_mutex); 5183 mddev->changed = 1; 5184 revalidate_disk(disk); 5185 5186 if (mddev->ro) 5187 mddev->ro = 0; 5188 } else 5189 mutex_unlock(&mddev->open_mutex); 5190 /* 5191 * Free resources if final stop 5192 */ 5193 if (mode == 0) { 5194 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev)); 5195 5196 bitmap_destroy(mddev); 5197 if (mddev->bitmap_info.file) { 5198 restore_bitmap_write_access(mddev->bitmap_info.file); 5199 fput(mddev->bitmap_info.file); 5200 mddev->bitmap_info.file = NULL; 5201 } 5202 mddev->bitmap_info.offset = 0; 5203 5204 export_array(mddev); 5205 5206 md_clean(mddev); 5207 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); 5208 if (mddev->hold_active == UNTIL_STOP) 5209 mddev->hold_active = 0; 5210 } 5211 blk_integrity_unregister(disk); 5212 md_new_event(mddev); 5213 sysfs_notify_dirent_safe(mddev->sysfs_state); 5214 return 0; 5215 } 5216 5217 #ifndef MODULE 5218 static void autorun_array(struct mddev *mddev) 5219 { 5220 struct md_rdev *rdev; 5221 int err; 5222 5223 if (list_empty(&mddev->disks)) 5224 return; 5225 5226 printk(KERN_INFO "md: running: "); 5227 5228 list_for_each_entry(rdev, &mddev->disks, same_set) { 5229 char b[BDEVNAME_SIZE]; 5230 printk("<%s>", bdevname(rdev->bdev,b)); 5231 } 5232 printk("\n"); 5233 5234 err = do_md_run(mddev); 5235 if (err) { 5236 printk(KERN_WARNING "md: do_md_run() returned %d\n", err); 5237 do_md_stop(mddev, 0, 0); 5238 } 5239 } 5240 5241 /* 5242 * lets try to run arrays based on all disks that have arrived 5243 * until now. (those are in pending_raid_disks) 5244 * 5245 * the method: pick the first pending disk, collect all disks with 5246 * the same UUID, remove all from the pending list and put them into 5247 * the 'same_array' list. Then order this list based on superblock 5248 * update time (freshest comes first), kick out 'old' disks and 5249 * compare superblocks. If everything's fine then run it. 5250 * 5251 * If "unit" is allocated, then bump its reference count 5252 */ 5253 static void autorun_devices(int part) 5254 { 5255 struct md_rdev *rdev0, *rdev, *tmp; 5256 struct mddev *mddev; 5257 char b[BDEVNAME_SIZE]; 5258 5259 printk(KERN_INFO "md: autorun ...\n"); 5260 while (!list_empty(&pending_raid_disks)) { 5261 int unit; 5262 dev_t dev; 5263 LIST_HEAD(candidates); 5264 rdev0 = list_entry(pending_raid_disks.next, 5265 struct md_rdev, same_set); 5266 5267 printk(KERN_INFO "md: considering %s ...\n", 5268 bdevname(rdev0->bdev,b)); 5269 INIT_LIST_HEAD(&candidates); 5270 rdev_for_each_list(rdev, tmp, &pending_raid_disks) 5271 if (super_90_load(rdev, rdev0, 0) >= 0) { 5272 printk(KERN_INFO "md: adding %s ...\n", 5273 bdevname(rdev->bdev,b)); 5274 list_move(&rdev->same_set, &candidates); 5275 } 5276 /* 5277 * now we have a set of devices, with all of them having 5278 * mostly sane superblocks. It's time to allocate the 5279 * mddev. 5280 */ 5281 if (part) { 5282 dev = MKDEV(mdp_major, 5283 rdev0->preferred_minor << MdpMinorShift); 5284 unit = MINOR(dev) >> MdpMinorShift; 5285 } else { 5286 dev = MKDEV(MD_MAJOR, rdev0->preferred_minor); 5287 unit = MINOR(dev); 5288 } 5289 if (rdev0->preferred_minor != unit) { 5290 printk(KERN_INFO "md: unit number in %s is bad: %d\n", 5291 bdevname(rdev0->bdev, b), rdev0->preferred_minor); 5292 break; 5293 } 5294 5295 md_probe(dev, NULL, NULL); 5296 mddev = mddev_find(dev); 5297 if (!mddev || !mddev->gendisk) { 5298 if (mddev) 5299 mddev_put(mddev); 5300 printk(KERN_ERR 5301 "md: cannot allocate memory for md drive.\n"); 5302 break; 5303 } 5304 if (mddev_lock(mddev)) 5305 printk(KERN_WARNING "md: %s locked, cannot run\n", 5306 mdname(mddev)); 5307 else if (mddev->raid_disks || mddev->major_version 5308 || !list_empty(&mddev->disks)) { 5309 printk(KERN_WARNING 5310 "md: %s already running, cannot run %s\n", 5311 mdname(mddev), bdevname(rdev0->bdev,b)); 5312 mddev_unlock(mddev); 5313 } else { 5314 printk(KERN_INFO "md: created %s\n", mdname(mddev)); 5315 mddev->persistent = 1; 5316 rdev_for_each_list(rdev, tmp, &candidates) { 5317 list_del_init(&rdev->same_set); 5318 if (bind_rdev_to_array(rdev, mddev)) 5319 export_rdev(rdev); 5320 } 5321 autorun_array(mddev); 5322 mddev_unlock(mddev); 5323 } 5324 /* on success, candidates will be empty, on error 5325 * it won't... 5326 */ 5327 rdev_for_each_list(rdev, tmp, &candidates) { 5328 list_del_init(&rdev->same_set); 5329 export_rdev(rdev); 5330 } 5331 mddev_put(mddev); 5332 } 5333 printk(KERN_INFO "md: ... autorun DONE.\n"); 5334 } 5335 #endif /* !MODULE */ 5336 5337 static int get_version(void __user * arg) 5338 { 5339 mdu_version_t ver; 5340 5341 ver.major = MD_MAJOR_VERSION; 5342 ver.minor = MD_MINOR_VERSION; 5343 ver.patchlevel = MD_PATCHLEVEL_VERSION; 5344 5345 if (copy_to_user(arg, &ver, sizeof(ver))) 5346 return -EFAULT; 5347 5348 return 0; 5349 } 5350 5351 static int get_array_info(struct mddev * mddev, void __user * arg) 5352 { 5353 mdu_array_info_t info; 5354 int nr,working,insync,failed,spare; 5355 struct md_rdev *rdev; 5356 5357 nr=working=insync=failed=spare=0; 5358 list_for_each_entry(rdev, &mddev->disks, same_set) { 5359 nr++; 5360 if (test_bit(Faulty, &rdev->flags)) 5361 failed++; 5362 else { 5363 working++; 5364 if (test_bit(In_sync, &rdev->flags)) 5365 insync++; 5366 else 5367 spare++; 5368 } 5369 } 5370 5371 info.major_version = mddev->major_version; 5372 info.minor_version = mddev->minor_version; 5373 info.patch_version = MD_PATCHLEVEL_VERSION; 5374 info.ctime = mddev->ctime; 5375 info.level = mddev->level; 5376 info.size = mddev->dev_sectors / 2; 5377 if (info.size != mddev->dev_sectors / 2) /* overflow */ 5378 info.size = -1; 5379 info.nr_disks = nr; 5380 info.raid_disks = mddev->raid_disks; 5381 info.md_minor = mddev->md_minor; 5382 info.not_persistent= !mddev->persistent; 5383 5384 info.utime = mddev->utime; 5385 info.state = 0; 5386 if (mddev->in_sync) 5387 info.state = (1<<MD_SB_CLEAN); 5388 if (mddev->bitmap && mddev->bitmap_info.offset) 5389 info.state = (1<<MD_SB_BITMAP_PRESENT); 5390 info.active_disks = insync; 5391 info.working_disks = working; 5392 info.failed_disks = failed; 5393 info.spare_disks = spare; 5394 5395 info.layout = mddev->layout; 5396 info.chunk_size = mddev->chunk_sectors << 9; 5397 5398 if (copy_to_user(arg, &info, sizeof(info))) 5399 return -EFAULT; 5400 5401 return 0; 5402 } 5403 5404 static int get_bitmap_file(struct mddev * mddev, void __user * arg) 5405 { 5406 mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */ 5407 char *ptr, *buf = NULL; 5408 int err = -ENOMEM; 5409 5410 if (md_allow_write(mddev)) 5411 file = kmalloc(sizeof(*file), GFP_NOIO); 5412 else 5413 file = kmalloc(sizeof(*file), GFP_KERNEL); 5414 5415 if (!file) 5416 goto out; 5417 5418 /* bitmap disabled, zero the first byte and copy out */ 5419 if (!mddev->bitmap || !mddev->bitmap->file) { 5420 file->pathname[0] = '\0'; 5421 goto copy_out; 5422 } 5423 5424 buf = kmalloc(sizeof(file->pathname), GFP_KERNEL); 5425 if (!buf) 5426 goto out; 5427 5428 ptr = d_path(&mddev->bitmap->file->f_path, buf, sizeof(file->pathname)); 5429 if (IS_ERR(ptr)) 5430 goto out; 5431 5432 strcpy(file->pathname, ptr); 5433 5434 copy_out: 5435 err = 0; 5436 if (copy_to_user(arg, file, sizeof(*file))) 5437 err = -EFAULT; 5438 out: 5439 kfree(buf); 5440 kfree(file); 5441 return err; 5442 } 5443 5444 static int get_disk_info(struct mddev * mddev, void __user * arg) 5445 { 5446 mdu_disk_info_t info; 5447 struct md_rdev *rdev; 5448 5449 if (copy_from_user(&info, arg, sizeof(info))) 5450 return -EFAULT; 5451 5452 rdev = find_rdev_nr(mddev, info.number); 5453 if (rdev) { 5454 info.major = MAJOR(rdev->bdev->bd_dev); 5455 info.minor = MINOR(rdev->bdev->bd_dev); 5456 info.raid_disk = rdev->raid_disk; 5457 info.state = 0; 5458 if (test_bit(Faulty, &rdev->flags)) 5459 info.state |= (1<<MD_DISK_FAULTY); 5460 else if (test_bit(In_sync, &rdev->flags)) { 5461 info.state |= (1<<MD_DISK_ACTIVE); 5462 info.state |= (1<<MD_DISK_SYNC); 5463 } 5464 if (test_bit(WriteMostly, &rdev->flags)) 5465 info.state |= (1<<MD_DISK_WRITEMOSTLY); 5466 } else { 5467 info.major = info.minor = 0; 5468 info.raid_disk = -1; 5469 info.state = (1<<MD_DISK_REMOVED); 5470 } 5471 5472 if (copy_to_user(arg, &info, sizeof(info))) 5473 return -EFAULT; 5474 5475 return 0; 5476 } 5477 5478 static int add_new_disk(struct mddev * mddev, mdu_disk_info_t *info) 5479 { 5480 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 5481 struct md_rdev *rdev; 5482 dev_t dev = MKDEV(info->major,info->minor); 5483 5484 if (info->major != MAJOR(dev) || info->minor != MINOR(dev)) 5485 return -EOVERFLOW; 5486 5487 if (!mddev->raid_disks) { 5488 int err; 5489 /* expecting a device which has a superblock */ 5490 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version); 5491 if (IS_ERR(rdev)) { 5492 printk(KERN_WARNING 5493 "md: md_import_device returned %ld\n", 5494 PTR_ERR(rdev)); 5495 return PTR_ERR(rdev); 5496 } 5497 if (!list_empty(&mddev->disks)) { 5498 struct md_rdev *rdev0 5499 = list_entry(mddev->disks.next, 5500 struct md_rdev, same_set); 5501 err = super_types[mddev->major_version] 5502 .load_super(rdev, rdev0, mddev->minor_version); 5503 if (err < 0) { 5504 printk(KERN_WARNING 5505 "md: %s has different UUID to %s\n", 5506 bdevname(rdev->bdev,b), 5507 bdevname(rdev0->bdev,b2)); 5508 export_rdev(rdev); 5509 return -EINVAL; 5510 } 5511 } 5512 err = bind_rdev_to_array(rdev, mddev); 5513 if (err) 5514 export_rdev(rdev); 5515 return err; 5516 } 5517 5518 /* 5519 * add_new_disk can be used once the array is assembled 5520 * to add "hot spares". They must already have a superblock 5521 * written 5522 */ 5523 if (mddev->pers) { 5524 int err; 5525 if (!mddev->pers->hot_add_disk) { 5526 printk(KERN_WARNING 5527 "%s: personality does not support diskops!\n", 5528 mdname(mddev)); 5529 return -EINVAL; 5530 } 5531 if (mddev->persistent) 5532 rdev = md_import_device(dev, mddev->major_version, 5533 mddev->minor_version); 5534 else 5535 rdev = md_import_device(dev, -1, -1); 5536 if (IS_ERR(rdev)) { 5537 printk(KERN_WARNING 5538 "md: md_import_device returned %ld\n", 5539 PTR_ERR(rdev)); 5540 return PTR_ERR(rdev); 5541 } 5542 /* set saved_raid_disk if appropriate */ 5543 if (!mddev->persistent) { 5544 if (info->state & (1<<MD_DISK_SYNC) && 5545 info->raid_disk < mddev->raid_disks) { 5546 rdev->raid_disk = info->raid_disk; 5547 set_bit(In_sync, &rdev->flags); 5548 } else 5549 rdev->raid_disk = -1; 5550 } else 5551 super_types[mddev->major_version]. 5552 validate_super(mddev, rdev); 5553 if ((info->state & (1<<MD_DISK_SYNC)) && 5554 (!test_bit(In_sync, &rdev->flags) || 5555 rdev->raid_disk != info->raid_disk)) { 5556 /* This was a hot-add request, but events doesn't 5557 * match, so reject it. 5558 */ 5559 export_rdev(rdev); 5560 return -EINVAL; 5561 } 5562 5563 if (test_bit(In_sync, &rdev->flags)) 5564 rdev->saved_raid_disk = rdev->raid_disk; 5565 else 5566 rdev->saved_raid_disk = -1; 5567 5568 clear_bit(In_sync, &rdev->flags); /* just to be sure */ 5569 if (info->state & (1<<MD_DISK_WRITEMOSTLY)) 5570 set_bit(WriteMostly, &rdev->flags); 5571 else 5572 clear_bit(WriteMostly, &rdev->flags); 5573 5574 rdev->raid_disk = -1; 5575 err = bind_rdev_to_array(rdev, mddev); 5576 if (!err && !mddev->pers->hot_remove_disk) { 5577 /* If there is hot_add_disk but no hot_remove_disk 5578 * then added disks for geometry changes, 5579 * and should be added immediately. 5580 */ 5581 super_types[mddev->major_version]. 5582 validate_super(mddev, rdev); 5583 err = mddev->pers->hot_add_disk(mddev, rdev); 5584 if (err) 5585 unbind_rdev_from_array(rdev); 5586 } 5587 if (err) 5588 export_rdev(rdev); 5589 else 5590 sysfs_notify_dirent_safe(rdev->sysfs_state); 5591 5592 md_update_sb(mddev, 1); 5593 if (mddev->degraded) 5594 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 5595 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5596 if (!err) 5597 md_new_event(mddev); 5598 md_wakeup_thread(mddev->thread); 5599 return err; 5600 } 5601 5602 /* otherwise, add_new_disk is only allowed 5603 * for major_version==0 superblocks 5604 */ 5605 if (mddev->major_version != 0) { 5606 printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n", 5607 mdname(mddev)); 5608 return -EINVAL; 5609 } 5610 5611 if (!(info->state & (1<<MD_DISK_FAULTY))) { 5612 int err; 5613 rdev = md_import_device(dev, -1, 0); 5614 if (IS_ERR(rdev)) { 5615 printk(KERN_WARNING 5616 "md: error, md_import_device() returned %ld\n", 5617 PTR_ERR(rdev)); 5618 return PTR_ERR(rdev); 5619 } 5620 rdev->desc_nr = info->number; 5621 if (info->raid_disk < mddev->raid_disks) 5622 rdev->raid_disk = info->raid_disk; 5623 else 5624 rdev->raid_disk = -1; 5625 5626 if (rdev->raid_disk < mddev->raid_disks) 5627 if (info->state & (1<<MD_DISK_SYNC)) 5628 set_bit(In_sync, &rdev->flags); 5629 5630 if (info->state & (1<<MD_DISK_WRITEMOSTLY)) 5631 set_bit(WriteMostly, &rdev->flags); 5632 5633 if (!mddev->persistent) { 5634 printk(KERN_INFO "md: nonpersistent superblock ...\n"); 5635 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512; 5636 } else 5637 rdev->sb_start = calc_dev_sboffset(rdev); 5638 rdev->sectors = rdev->sb_start; 5639 5640 err = bind_rdev_to_array(rdev, mddev); 5641 if (err) { 5642 export_rdev(rdev); 5643 return err; 5644 } 5645 } 5646 5647 return 0; 5648 } 5649 5650 static int hot_remove_disk(struct mddev * mddev, dev_t dev) 5651 { 5652 char b[BDEVNAME_SIZE]; 5653 struct md_rdev *rdev; 5654 5655 rdev = find_rdev(mddev, dev); 5656 if (!rdev) 5657 return -ENXIO; 5658 5659 if (rdev->raid_disk >= 0) 5660 goto busy; 5661 5662 kick_rdev_from_array(rdev); 5663 md_update_sb(mddev, 1); 5664 md_new_event(mddev); 5665 5666 return 0; 5667 busy: 5668 printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n", 5669 bdevname(rdev->bdev,b), mdname(mddev)); 5670 return -EBUSY; 5671 } 5672 5673 static int hot_add_disk(struct mddev * mddev, dev_t dev) 5674 { 5675 char b[BDEVNAME_SIZE]; 5676 int err; 5677 struct md_rdev *rdev; 5678 5679 if (!mddev->pers) 5680 return -ENODEV; 5681 5682 if (mddev->major_version != 0) { 5683 printk(KERN_WARNING "%s: HOT_ADD may only be used with" 5684 " version-0 superblocks.\n", 5685 mdname(mddev)); 5686 return -EINVAL; 5687 } 5688 if (!mddev->pers->hot_add_disk) { 5689 printk(KERN_WARNING 5690 "%s: personality does not support diskops!\n", 5691 mdname(mddev)); 5692 return -EINVAL; 5693 } 5694 5695 rdev = md_import_device(dev, -1, 0); 5696 if (IS_ERR(rdev)) { 5697 printk(KERN_WARNING 5698 "md: error, md_import_device() returned %ld\n", 5699 PTR_ERR(rdev)); 5700 return -EINVAL; 5701 } 5702 5703 if (mddev->persistent) 5704 rdev->sb_start = calc_dev_sboffset(rdev); 5705 else 5706 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512; 5707 5708 rdev->sectors = rdev->sb_start; 5709 5710 if (test_bit(Faulty, &rdev->flags)) { 5711 printk(KERN_WARNING 5712 "md: can not hot-add faulty %s disk to %s!\n", 5713 bdevname(rdev->bdev,b), mdname(mddev)); 5714 err = -EINVAL; 5715 goto abort_export; 5716 } 5717 clear_bit(In_sync, &rdev->flags); 5718 rdev->desc_nr = -1; 5719 rdev->saved_raid_disk = -1; 5720 err = bind_rdev_to_array(rdev, mddev); 5721 if (err) 5722 goto abort_export; 5723 5724 /* 5725 * The rest should better be atomic, we can have disk failures 5726 * noticed in interrupt contexts ... 5727 */ 5728 5729 rdev->raid_disk = -1; 5730 5731 md_update_sb(mddev, 1); 5732 5733 /* 5734 * Kick recovery, maybe this spare has to be added to the 5735 * array immediately. 5736 */ 5737 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5738 md_wakeup_thread(mddev->thread); 5739 md_new_event(mddev); 5740 return 0; 5741 5742 abort_export: 5743 export_rdev(rdev); 5744 return err; 5745 } 5746 5747 static int set_bitmap_file(struct mddev *mddev, int fd) 5748 { 5749 int err; 5750 5751 if (mddev->pers) { 5752 if (!mddev->pers->quiesce) 5753 return -EBUSY; 5754 if (mddev->recovery || mddev->sync_thread) 5755 return -EBUSY; 5756 /* we should be able to change the bitmap.. */ 5757 } 5758 5759 5760 if (fd >= 0) { 5761 if (mddev->bitmap) 5762 return -EEXIST; /* cannot add when bitmap is present */ 5763 mddev->bitmap_info.file = fget(fd); 5764 5765 if (mddev->bitmap_info.file == NULL) { 5766 printk(KERN_ERR "%s: error: failed to get bitmap file\n", 5767 mdname(mddev)); 5768 return -EBADF; 5769 } 5770 5771 err = deny_bitmap_write_access(mddev->bitmap_info.file); 5772 if (err) { 5773 printk(KERN_ERR "%s: error: bitmap file is already in use\n", 5774 mdname(mddev)); 5775 fput(mddev->bitmap_info.file); 5776 mddev->bitmap_info.file = NULL; 5777 return err; 5778 } 5779 mddev->bitmap_info.offset = 0; /* file overrides offset */ 5780 } else if (mddev->bitmap == NULL) 5781 return -ENOENT; /* cannot remove what isn't there */ 5782 err = 0; 5783 if (mddev->pers) { 5784 mddev->pers->quiesce(mddev, 1); 5785 if (fd >= 0) { 5786 err = bitmap_create(mddev); 5787 if (!err) 5788 err = bitmap_load(mddev); 5789 } 5790 if (fd < 0 || err) { 5791 bitmap_destroy(mddev); 5792 fd = -1; /* make sure to put the file */ 5793 } 5794 mddev->pers->quiesce(mddev, 0); 5795 } 5796 if (fd < 0) { 5797 if (mddev->bitmap_info.file) { 5798 restore_bitmap_write_access(mddev->bitmap_info.file); 5799 fput(mddev->bitmap_info.file); 5800 } 5801 mddev->bitmap_info.file = NULL; 5802 } 5803 5804 return err; 5805 } 5806 5807 /* 5808 * set_array_info is used two different ways 5809 * The original usage is when creating a new array. 5810 * In this usage, raid_disks is > 0 and it together with 5811 * level, size, not_persistent,layout,chunksize determine the 5812 * shape of the array. 5813 * This will always create an array with a type-0.90.0 superblock. 5814 * The newer usage is when assembling an array. 5815 * In this case raid_disks will be 0, and the major_version field is 5816 * use to determine which style super-blocks are to be found on the devices. 5817 * The minor and patch _version numbers are also kept incase the 5818 * super_block handler wishes to interpret them. 5819 */ 5820 static int set_array_info(struct mddev * mddev, mdu_array_info_t *info) 5821 { 5822 5823 if (info->raid_disks == 0) { 5824 /* just setting version number for superblock loading */ 5825 if (info->major_version < 0 || 5826 info->major_version >= ARRAY_SIZE(super_types) || 5827 super_types[info->major_version].name == NULL) { 5828 /* maybe try to auto-load a module? */ 5829 printk(KERN_INFO 5830 "md: superblock version %d not known\n", 5831 info->major_version); 5832 return -EINVAL; 5833 } 5834 mddev->major_version = info->major_version; 5835 mddev->minor_version = info->minor_version; 5836 mddev->patch_version = info->patch_version; 5837 mddev->persistent = !info->not_persistent; 5838 /* ensure mddev_put doesn't delete this now that there 5839 * is some minimal configuration. 5840 */ 5841 mddev->ctime = get_seconds(); 5842 return 0; 5843 } 5844 mddev->major_version = MD_MAJOR_VERSION; 5845 mddev->minor_version = MD_MINOR_VERSION; 5846 mddev->patch_version = MD_PATCHLEVEL_VERSION; 5847 mddev->ctime = get_seconds(); 5848 5849 mddev->level = info->level; 5850 mddev->clevel[0] = 0; 5851 mddev->dev_sectors = 2 * (sector_t)info->size; 5852 mddev->raid_disks = info->raid_disks; 5853 /* don't set md_minor, it is determined by which /dev/md* was 5854 * openned 5855 */ 5856 if (info->state & (1<<MD_SB_CLEAN)) 5857 mddev->recovery_cp = MaxSector; 5858 else 5859 mddev->recovery_cp = 0; 5860 mddev->persistent = ! info->not_persistent; 5861 mddev->external = 0; 5862 5863 mddev->layout = info->layout; 5864 mddev->chunk_sectors = info->chunk_size >> 9; 5865 5866 mddev->max_disks = MD_SB_DISKS; 5867 5868 if (mddev->persistent) 5869 mddev->flags = 0; 5870 set_bit(MD_CHANGE_DEVS, &mddev->flags); 5871 5872 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; 5873 mddev->bitmap_info.offset = 0; 5874 5875 mddev->reshape_position = MaxSector; 5876 5877 /* 5878 * Generate a 128 bit UUID 5879 */ 5880 get_random_bytes(mddev->uuid, 16); 5881 5882 mddev->new_level = mddev->level; 5883 mddev->new_chunk_sectors = mddev->chunk_sectors; 5884 mddev->new_layout = mddev->layout; 5885 mddev->delta_disks = 0; 5886 5887 return 0; 5888 } 5889 5890 void md_set_array_sectors(struct mddev *mddev, sector_t array_sectors) 5891 { 5892 WARN(!mddev_is_locked(mddev), "%s: unlocked mddev!\n", __func__); 5893 5894 if (mddev->external_size) 5895 return; 5896 5897 mddev->array_sectors = array_sectors; 5898 } 5899 EXPORT_SYMBOL(md_set_array_sectors); 5900 5901 static int update_size(struct mddev *mddev, sector_t num_sectors) 5902 { 5903 struct md_rdev *rdev; 5904 int rv; 5905 int fit = (num_sectors == 0); 5906 5907 if (mddev->pers->resize == NULL) 5908 return -EINVAL; 5909 /* The "num_sectors" is the number of sectors of each device that 5910 * is used. This can only make sense for arrays with redundancy. 5911 * linear and raid0 always use whatever space is available. We can only 5912 * consider changing this number if no resync or reconstruction is 5913 * happening, and if the new size is acceptable. It must fit before the 5914 * sb_start or, if that is <data_offset, it must fit before the size 5915 * of each device. If num_sectors is zero, we find the largest size 5916 * that fits. 5917 */ 5918 if (mddev->sync_thread) 5919 return -EBUSY; 5920 if (mddev->bitmap) 5921 /* Sorry, cannot grow a bitmap yet, just remove it, 5922 * grow, and re-add. 5923 */ 5924 return -EBUSY; 5925 list_for_each_entry(rdev, &mddev->disks, same_set) { 5926 sector_t avail = rdev->sectors; 5927 5928 if (fit && (num_sectors == 0 || num_sectors > avail)) 5929 num_sectors = avail; 5930 if (avail < num_sectors) 5931 return -ENOSPC; 5932 } 5933 rv = mddev->pers->resize(mddev, num_sectors); 5934 if (!rv) 5935 revalidate_disk(mddev->gendisk); 5936 return rv; 5937 } 5938 5939 static int update_raid_disks(struct mddev *mddev, int raid_disks) 5940 { 5941 int rv; 5942 /* change the number of raid disks */ 5943 if (mddev->pers->check_reshape == NULL) 5944 return -EINVAL; 5945 if (raid_disks <= 0 || 5946 (mddev->max_disks && raid_disks >= mddev->max_disks)) 5947 return -EINVAL; 5948 if (mddev->sync_thread || mddev->reshape_position != MaxSector) 5949 return -EBUSY; 5950 mddev->delta_disks = raid_disks - mddev->raid_disks; 5951 5952 rv = mddev->pers->check_reshape(mddev); 5953 if (rv < 0) 5954 mddev->delta_disks = 0; 5955 return rv; 5956 } 5957 5958 5959 /* 5960 * update_array_info is used to change the configuration of an 5961 * on-line array. 5962 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size 5963 * fields in the info are checked against the array. 5964 * Any differences that cannot be handled will cause an error. 5965 * Normally, only one change can be managed at a time. 5966 */ 5967 static int update_array_info(struct mddev *mddev, mdu_array_info_t *info) 5968 { 5969 int rv = 0; 5970 int cnt = 0; 5971 int state = 0; 5972 5973 /* calculate expected state,ignoring low bits */ 5974 if (mddev->bitmap && mddev->bitmap_info.offset) 5975 state |= (1 << MD_SB_BITMAP_PRESENT); 5976 5977 if (mddev->major_version != info->major_version || 5978 mddev->minor_version != info->minor_version || 5979 /* mddev->patch_version != info->patch_version || */ 5980 mddev->ctime != info->ctime || 5981 mddev->level != info->level || 5982 /* mddev->layout != info->layout || */ 5983 !mddev->persistent != info->not_persistent|| 5984 mddev->chunk_sectors != info->chunk_size >> 9 || 5985 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */ 5986 ((state^info->state) & 0xfffffe00) 5987 ) 5988 return -EINVAL; 5989 /* Check there is only one change */ 5990 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) 5991 cnt++; 5992 if (mddev->raid_disks != info->raid_disks) 5993 cnt++; 5994 if (mddev->layout != info->layout) 5995 cnt++; 5996 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) 5997 cnt++; 5998 if (cnt == 0) 5999 return 0; 6000 if (cnt > 1) 6001 return -EINVAL; 6002 6003 if (mddev->layout != info->layout) { 6004 /* Change layout 6005 * we don't need to do anything at the md level, the 6006 * personality will take care of it all. 6007 */ 6008 if (mddev->pers->check_reshape == NULL) 6009 return -EINVAL; 6010 else { 6011 mddev->new_layout = info->layout; 6012 rv = mddev->pers->check_reshape(mddev); 6013 if (rv) 6014 mddev->new_layout = mddev->layout; 6015 return rv; 6016 } 6017 } 6018 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) 6019 rv = update_size(mddev, (sector_t)info->size * 2); 6020 6021 if (mddev->raid_disks != info->raid_disks) 6022 rv = update_raid_disks(mddev, info->raid_disks); 6023 6024 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) { 6025 if (mddev->pers->quiesce == NULL) 6026 return -EINVAL; 6027 if (mddev->recovery || mddev->sync_thread) 6028 return -EBUSY; 6029 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) { 6030 /* add the bitmap */ 6031 if (mddev->bitmap) 6032 return -EEXIST; 6033 if (mddev->bitmap_info.default_offset == 0) 6034 return -EINVAL; 6035 mddev->bitmap_info.offset = 6036 mddev->bitmap_info.default_offset; 6037 mddev->pers->quiesce(mddev, 1); 6038 rv = bitmap_create(mddev); 6039 if (!rv) 6040 rv = bitmap_load(mddev); 6041 if (rv) 6042 bitmap_destroy(mddev); 6043 mddev->pers->quiesce(mddev, 0); 6044 } else { 6045 /* remove the bitmap */ 6046 if (!mddev->bitmap) 6047 return -ENOENT; 6048 if (mddev->bitmap->file) 6049 return -EINVAL; 6050 mddev->pers->quiesce(mddev, 1); 6051 bitmap_destroy(mddev); 6052 mddev->pers->quiesce(mddev, 0); 6053 mddev->bitmap_info.offset = 0; 6054 } 6055 } 6056 md_update_sb(mddev, 1); 6057 return rv; 6058 } 6059 6060 static int set_disk_faulty(struct mddev *mddev, dev_t dev) 6061 { 6062 struct md_rdev *rdev; 6063 6064 if (mddev->pers == NULL) 6065 return -ENODEV; 6066 6067 rdev = find_rdev(mddev, dev); 6068 if (!rdev) 6069 return -ENODEV; 6070 6071 md_error(mddev, rdev); 6072 if (!test_bit(Faulty, &rdev->flags)) 6073 return -EBUSY; 6074 return 0; 6075 } 6076 6077 /* 6078 * We have a problem here : there is no easy way to give a CHS 6079 * virtual geometry. We currently pretend that we have a 2 heads 6080 * 4 sectors (with a BIG number of cylinders...). This drives 6081 * dosfs just mad... ;-) 6082 */ 6083 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo) 6084 { 6085 struct mddev *mddev = bdev->bd_disk->private_data; 6086 6087 geo->heads = 2; 6088 geo->sectors = 4; 6089 geo->cylinders = mddev->array_sectors / 8; 6090 return 0; 6091 } 6092 6093 static int md_ioctl(struct block_device *bdev, fmode_t mode, 6094 unsigned int cmd, unsigned long arg) 6095 { 6096 int err = 0; 6097 void __user *argp = (void __user *)arg; 6098 struct mddev *mddev = NULL; 6099 int ro; 6100 6101 switch (cmd) { 6102 case RAID_VERSION: 6103 case GET_ARRAY_INFO: 6104 case GET_DISK_INFO: 6105 break; 6106 default: 6107 if (!capable(CAP_SYS_ADMIN)) 6108 return -EACCES; 6109 } 6110 6111 /* 6112 * Commands dealing with the RAID driver but not any 6113 * particular array: 6114 */ 6115 switch (cmd) 6116 { 6117 case RAID_VERSION: 6118 err = get_version(argp); 6119 goto done; 6120 6121 case PRINT_RAID_DEBUG: 6122 err = 0; 6123 md_print_devices(); 6124 goto done; 6125 6126 #ifndef MODULE 6127 case RAID_AUTORUN: 6128 err = 0; 6129 autostart_arrays(arg); 6130 goto done; 6131 #endif 6132 default:; 6133 } 6134 6135 /* 6136 * Commands creating/starting a new array: 6137 */ 6138 6139 mddev = bdev->bd_disk->private_data; 6140 6141 if (!mddev) { 6142 BUG(); 6143 goto abort; 6144 } 6145 6146 err = mddev_lock(mddev); 6147 if (err) { 6148 printk(KERN_INFO 6149 "md: ioctl lock interrupted, reason %d, cmd %d\n", 6150 err, cmd); 6151 goto abort; 6152 } 6153 6154 switch (cmd) 6155 { 6156 case SET_ARRAY_INFO: 6157 { 6158 mdu_array_info_t info; 6159 if (!arg) 6160 memset(&info, 0, sizeof(info)); 6161 else if (copy_from_user(&info, argp, sizeof(info))) { 6162 err = -EFAULT; 6163 goto abort_unlock; 6164 } 6165 if (mddev->pers) { 6166 err = update_array_info(mddev, &info); 6167 if (err) { 6168 printk(KERN_WARNING "md: couldn't update" 6169 " array info. %d\n", err); 6170 goto abort_unlock; 6171 } 6172 goto done_unlock; 6173 } 6174 if (!list_empty(&mddev->disks)) { 6175 printk(KERN_WARNING 6176 "md: array %s already has disks!\n", 6177 mdname(mddev)); 6178 err = -EBUSY; 6179 goto abort_unlock; 6180 } 6181 if (mddev->raid_disks) { 6182 printk(KERN_WARNING 6183 "md: array %s already initialised!\n", 6184 mdname(mddev)); 6185 err = -EBUSY; 6186 goto abort_unlock; 6187 } 6188 err = set_array_info(mddev, &info); 6189 if (err) { 6190 printk(KERN_WARNING "md: couldn't set" 6191 " array info. %d\n", err); 6192 goto abort_unlock; 6193 } 6194 } 6195 goto done_unlock; 6196 6197 default:; 6198 } 6199 6200 /* 6201 * Commands querying/configuring an existing array: 6202 */ 6203 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY, 6204 * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */ 6205 if ((!mddev->raid_disks && !mddev->external) 6206 && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY 6207 && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE 6208 && cmd != GET_BITMAP_FILE) { 6209 err = -ENODEV; 6210 goto abort_unlock; 6211 } 6212 6213 /* 6214 * Commands even a read-only array can execute: 6215 */ 6216 switch (cmd) 6217 { 6218 case GET_ARRAY_INFO: 6219 err = get_array_info(mddev, argp); 6220 goto done_unlock; 6221 6222 case GET_BITMAP_FILE: 6223 err = get_bitmap_file(mddev, argp); 6224 goto done_unlock; 6225 6226 case GET_DISK_INFO: 6227 err = get_disk_info(mddev, argp); 6228 goto done_unlock; 6229 6230 case RESTART_ARRAY_RW: 6231 err = restart_array(mddev); 6232 goto done_unlock; 6233 6234 case STOP_ARRAY: 6235 err = do_md_stop(mddev, 0, 1); 6236 goto done_unlock; 6237 6238 case STOP_ARRAY_RO: 6239 err = md_set_readonly(mddev, 1); 6240 goto done_unlock; 6241 6242 case BLKROSET: 6243 if (get_user(ro, (int __user *)(arg))) { 6244 err = -EFAULT; 6245 goto done_unlock; 6246 } 6247 err = -EINVAL; 6248 6249 /* if the bdev is going readonly the value of mddev->ro 6250 * does not matter, no writes are coming 6251 */ 6252 if (ro) 6253 goto done_unlock; 6254 6255 /* are we are already prepared for writes? */ 6256 if (mddev->ro != 1) 6257 goto done_unlock; 6258 6259 /* transitioning to readauto need only happen for 6260 * arrays that call md_write_start 6261 */ 6262 if (mddev->pers) { 6263 err = restart_array(mddev); 6264 if (err == 0) { 6265 mddev->ro = 2; 6266 set_disk_ro(mddev->gendisk, 0); 6267 } 6268 } 6269 goto done_unlock; 6270 } 6271 6272 /* 6273 * The remaining ioctls are changing the state of the 6274 * superblock, so we do not allow them on read-only arrays. 6275 * However non-MD ioctls (e.g. get-size) will still come through 6276 * here and hit the 'default' below, so only disallow 6277 * 'md' ioctls, and switch to rw mode if started auto-readonly. 6278 */ 6279 if (_IOC_TYPE(cmd) == MD_MAJOR && mddev->ro && mddev->pers) { 6280 if (mddev->ro == 2) { 6281 mddev->ro = 0; 6282 sysfs_notify_dirent_safe(mddev->sysfs_state); 6283 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6284 md_wakeup_thread(mddev->thread); 6285 } else { 6286 err = -EROFS; 6287 goto abort_unlock; 6288 } 6289 } 6290 6291 switch (cmd) 6292 { 6293 case ADD_NEW_DISK: 6294 { 6295 mdu_disk_info_t info; 6296 if (copy_from_user(&info, argp, sizeof(info))) 6297 err = -EFAULT; 6298 else 6299 err = add_new_disk(mddev, &info); 6300 goto done_unlock; 6301 } 6302 6303 case HOT_REMOVE_DISK: 6304 err = hot_remove_disk(mddev, new_decode_dev(arg)); 6305 goto done_unlock; 6306 6307 case HOT_ADD_DISK: 6308 err = hot_add_disk(mddev, new_decode_dev(arg)); 6309 goto done_unlock; 6310 6311 case SET_DISK_FAULTY: 6312 err = set_disk_faulty(mddev, new_decode_dev(arg)); 6313 goto done_unlock; 6314 6315 case RUN_ARRAY: 6316 err = do_md_run(mddev); 6317 goto done_unlock; 6318 6319 case SET_BITMAP_FILE: 6320 err = set_bitmap_file(mddev, (int)arg); 6321 goto done_unlock; 6322 6323 default: 6324 err = -EINVAL; 6325 goto abort_unlock; 6326 } 6327 6328 done_unlock: 6329 abort_unlock: 6330 if (mddev->hold_active == UNTIL_IOCTL && 6331 err != -EINVAL) 6332 mddev->hold_active = 0; 6333 mddev_unlock(mddev); 6334 6335 return err; 6336 done: 6337 if (err) 6338 MD_BUG(); 6339 abort: 6340 return err; 6341 } 6342 #ifdef CONFIG_COMPAT 6343 static int md_compat_ioctl(struct block_device *bdev, fmode_t mode, 6344 unsigned int cmd, unsigned long arg) 6345 { 6346 switch (cmd) { 6347 case HOT_REMOVE_DISK: 6348 case HOT_ADD_DISK: 6349 case SET_DISK_FAULTY: 6350 case SET_BITMAP_FILE: 6351 /* These take in integer arg, do not convert */ 6352 break; 6353 default: 6354 arg = (unsigned long)compat_ptr(arg); 6355 break; 6356 } 6357 6358 return md_ioctl(bdev, mode, cmd, arg); 6359 } 6360 #endif /* CONFIG_COMPAT */ 6361 6362 static int md_open(struct block_device *bdev, fmode_t mode) 6363 { 6364 /* 6365 * Succeed if we can lock the mddev, which confirms that 6366 * it isn't being stopped right now. 6367 */ 6368 struct mddev *mddev = mddev_find(bdev->bd_dev); 6369 int err; 6370 6371 if (mddev->gendisk != bdev->bd_disk) { 6372 /* we are racing with mddev_put which is discarding this 6373 * bd_disk. 6374 */ 6375 mddev_put(mddev); 6376 /* Wait until bdev->bd_disk is definitely gone */ 6377 flush_workqueue(md_misc_wq); 6378 /* Then retry the open from the top */ 6379 return -ERESTARTSYS; 6380 } 6381 BUG_ON(mddev != bdev->bd_disk->private_data); 6382 6383 if ((err = mutex_lock_interruptible(&mddev->open_mutex))) 6384 goto out; 6385 6386 err = 0; 6387 atomic_inc(&mddev->openers); 6388 mutex_unlock(&mddev->open_mutex); 6389 6390 check_disk_change(bdev); 6391 out: 6392 return err; 6393 } 6394 6395 static int md_release(struct gendisk *disk, fmode_t mode) 6396 { 6397 struct mddev *mddev = disk->private_data; 6398 6399 BUG_ON(!mddev); 6400 atomic_dec(&mddev->openers); 6401 mddev_put(mddev); 6402 6403 return 0; 6404 } 6405 6406 static int md_media_changed(struct gendisk *disk) 6407 { 6408 struct mddev *mddev = disk->private_data; 6409 6410 return mddev->changed; 6411 } 6412 6413 static int md_revalidate(struct gendisk *disk) 6414 { 6415 struct mddev *mddev = disk->private_data; 6416 6417 mddev->changed = 0; 6418 return 0; 6419 } 6420 static const struct block_device_operations md_fops = 6421 { 6422 .owner = THIS_MODULE, 6423 .open = md_open, 6424 .release = md_release, 6425 .ioctl = md_ioctl, 6426 #ifdef CONFIG_COMPAT 6427 .compat_ioctl = md_compat_ioctl, 6428 #endif 6429 .getgeo = md_getgeo, 6430 .media_changed = md_media_changed, 6431 .revalidate_disk= md_revalidate, 6432 }; 6433 6434 static int md_thread(void * arg) 6435 { 6436 struct md_thread *thread = arg; 6437 6438 /* 6439 * md_thread is a 'system-thread', it's priority should be very 6440 * high. We avoid resource deadlocks individually in each 6441 * raid personality. (RAID5 does preallocation) We also use RR and 6442 * the very same RT priority as kswapd, thus we will never get 6443 * into a priority inversion deadlock. 6444 * 6445 * we definitely have to have equal or higher priority than 6446 * bdflush, otherwise bdflush will deadlock if there are too 6447 * many dirty RAID5 blocks. 6448 */ 6449 6450 allow_signal(SIGKILL); 6451 while (!kthread_should_stop()) { 6452 6453 /* We need to wait INTERRUPTIBLE so that 6454 * we don't add to the load-average. 6455 * That means we need to be sure no signals are 6456 * pending 6457 */ 6458 if (signal_pending(current)) 6459 flush_signals(current); 6460 6461 wait_event_interruptible_timeout 6462 (thread->wqueue, 6463 test_bit(THREAD_WAKEUP, &thread->flags) 6464 || kthread_should_stop(), 6465 thread->timeout); 6466 6467 clear_bit(THREAD_WAKEUP, &thread->flags); 6468 if (!kthread_should_stop()) 6469 thread->run(thread->mddev); 6470 } 6471 6472 return 0; 6473 } 6474 6475 void md_wakeup_thread(struct md_thread *thread) 6476 { 6477 if (thread) { 6478 pr_debug("md: waking up MD thread %s.\n", thread->tsk->comm); 6479 set_bit(THREAD_WAKEUP, &thread->flags); 6480 wake_up(&thread->wqueue); 6481 } 6482 } 6483 6484 struct md_thread *md_register_thread(void (*run) (struct mddev *), struct mddev *mddev, 6485 const char *name) 6486 { 6487 struct md_thread *thread; 6488 6489 thread = kzalloc(sizeof(struct md_thread), GFP_KERNEL); 6490 if (!thread) 6491 return NULL; 6492 6493 init_waitqueue_head(&thread->wqueue); 6494 6495 thread->run = run; 6496 thread->mddev = mddev; 6497 thread->timeout = MAX_SCHEDULE_TIMEOUT; 6498 thread->tsk = kthread_run(md_thread, thread, 6499 "%s_%s", 6500 mdname(thread->mddev), 6501 name ?: mddev->pers->name); 6502 if (IS_ERR(thread->tsk)) { 6503 kfree(thread); 6504 return NULL; 6505 } 6506 return thread; 6507 } 6508 6509 void md_unregister_thread(struct md_thread **threadp) 6510 { 6511 struct md_thread *thread = *threadp; 6512 if (!thread) 6513 return; 6514 pr_debug("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk)); 6515 /* Locking ensures that mddev_unlock does not wake_up a 6516 * non-existent thread 6517 */ 6518 spin_lock(&pers_lock); 6519 *threadp = NULL; 6520 spin_unlock(&pers_lock); 6521 6522 kthread_stop(thread->tsk); 6523 kfree(thread); 6524 } 6525 6526 void md_error(struct mddev *mddev, struct md_rdev *rdev) 6527 { 6528 if (!mddev) { 6529 MD_BUG(); 6530 return; 6531 } 6532 6533 if (!rdev || test_bit(Faulty, &rdev->flags)) 6534 return; 6535 6536 if (!mddev->pers || !mddev->pers->error_handler) 6537 return; 6538 mddev->pers->error_handler(mddev,rdev); 6539 if (mddev->degraded) 6540 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 6541 sysfs_notify_dirent_safe(rdev->sysfs_state); 6542 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 6543 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6544 md_wakeup_thread(mddev->thread); 6545 if (mddev->event_work.func) 6546 queue_work(md_misc_wq, &mddev->event_work); 6547 md_new_event_inintr(mddev); 6548 } 6549 6550 /* seq_file implementation /proc/mdstat */ 6551 6552 static void status_unused(struct seq_file *seq) 6553 { 6554 int i = 0; 6555 struct md_rdev *rdev; 6556 6557 seq_printf(seq, "unused devices: "); 6558 6559 list_for_each_entry(rdev, &pending_raid_disks, same_set) { 6560 char b[BDEVNAME_SIZE]; 6561 i++; 6562 seq_printf(seq, "%s ", 6563 bdevname(rdev->bdev,b)); 6564 } 6565 if (!i) 6566 seq_printf(seq, "<none>"); 6567 6568 seq_printf(seq, "\n"); 6569 } 6570 6571 6572 static void status_resync(struct seq_file *seq, struct mddev * mddev) 6573 { 6574 sector_t max_sectors, resync, res; 6575 unsigned long dt, db; 6576 sector_t rt; 6577 int scale; 6578 unsigned int per_milli; 6579 6580 resync = mddev->curr_resync - atomic_read(&mddev->recovery_active); 6581 6582 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 6583 max_sectors = mddev->resync_max_sectors; 6584 else 6585 max_sectors = mddev->dev_sectors; 6586 6587 /* 6588 * Should not happen. 6589 */ 6590 if (!max_sectors) { 6591 MD_BUG(); 6592 return; 6593 } 6594 /* Pick 'scale' such that (resync>>scale)*1000 will fit 6595 * in a sector_t, and (max_sectors>>scale) will fit in a 6596 * u32, as those are the requirements for sector_div. 6597 * Thus 'scale' must be at least 10 6598 */ 6599 scale = 10; 6600 if (sizeof(sector_t) > sizeof(unsigned long)) { 6601 while ( max_sectors/2 > (1ULL<<(scale+32))) 6602 scale++; 6603 } 6604 res = (resync>>scale)*1000; 6605 sector_div(res, (u32)((max_sectors>>scale)+1)); 6606 6607 per_milli = res; 6608 { 6609 int i, x = per_milli/50, y = 20-x; 6610 seq_printf(seq, "["); 6611 for (i = 0; i < x; i++) 6612 seq_printf(seq, "="); 6613 seq_printf(seq, ">"); 6614 for (i = 0; i < y; i++) 6615 seq_printf(seq, "."); 6616 seq_printf(seq, "] "); 6617 } 6618 seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)", 6619 (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)? 6620 "reshape" : 6621 (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)? 6622 "check" : 6623 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ? 6624 "resync" : "recovery"))), 6625 per_milli/10, per_milli % 10, 6626 (unsigned long long) resync/2, 6627 (unsigned long long) max_sectors/2); 6628 6629 /* 6630 * dt: time from mark until now 6631 * db: blocks written from mark until now 6632 * rt: remaining time 6633 * 6634 * rt is a sector_t, so could be 32bit or 64bit. 6635 * So we divide before multiply in case it is 32bit and close 6636 * to the limit. 6637 * We scale the divisor (db) by 32 to avoid losing precision 6638 * near the end of resync when the number of remaining sectors 6639 * is close to 'db'. 6640 * We then divide rt by 32 after multiplying by db to compensate. 6641 * The '+1' avoids division by zero if db is very small. 6642 */ 6643 dt = ((jiffies - mddev->resync_mark) / HZ); 6644 if (!dt) dt++; 6645 db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active)) 6646 - mddev->resync_mark_cnt; 6647 6648 rt = max_sectors - resync; /* number of remaining sectors */ 6649 sector_div(rt, db/32+1); 6650 rt *= dt; 6651 rt >>= 5; 6652 6653 seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60, 6654 ((unsigned long)rt % 60)/6); 6655 6656 seq_printf(seq, " speed=%ldK/sec", db/2/dt); 6657 } 6658 6659 static void *md_seq_start(struct seq_file *seq, loff_t *pos) 6660 { 6661 struct list_head *tmp; 6662 loff_t l = *pos; 6663 struct mddev *mddev; 6664 6665 if (l >= 0x10000) 6666 return NULL; 6667 if (!l--) 6668 /* header */ 6669 return (void*)1; 6670 6671 spin_lock(&all_mddevs_lock); 6672 list_for_each(tmp,&all_mddevs) 6673 if (!l--) { 6674 mddev = list_entry(tmp, struct mddev, all_mddevs); 6675 mddev_get(mddev); 6676 spin_unlock(&all_mddevs_lock); 6677 return mddev; 6678 } 6679 spin_unlock(&all_mddevs_lock); 6680 if (!l--) 6681 return (void*)2;/* tail */ 6682 return NULL; 6683 } 6684 6685 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos) 6686 { 6687 struct list_head *tmp; 6688 struct mddev *next_mddev, *mddev = v; 6689 6690 ++*pos; 6691 if (v == (void*)2) 6692 return NULL; 6693 6694 spin_lock(&all_mddevs_lock); 6695 if (v == (void*)1) 6696 tmp = all_mddevs.next; 6697 else 6698 tmp = mddev->all_mddevs.next; 6699 if (tmp != &all_mddevs) 6700 next_mddev = mddev_get(list_entry(tmp,struct mddev,all_mddevs)); 6701 else { 6702 next_mddev = (void*)2; 6703 *pos = 0x10000; 6704 } 6705 spin_unlock(&all_mddevs_lock); 6706 6707 if (v != (void*)1) 6708 mddev_put(mddev); 6709 return next_mddev; 6710 6711 } 6712 6713 static void md_seq_stop(struct seq_file *seq, void *v) 6714 { 6715 struct mddev *mddev = v; 6716 6717 if (mddev && v != (void*)1 && v != (void*)2) 6718 mddev_put(mddev); 6719 } 6720 6721 static int md_seq_show(struct seq_file *seq, void *v) 6722 { 6723 struct mddev *mddev = v; 6724 sector_t sectors; 6725 struct md_rdev *rdev; 6726 struct bitmap *bitmap; 6727 6728 if (v == (void*)1) { 6729 struct md_personality *pers; 6730 seq_printf(seq, "Personalities : "); 6731 spin_lock(&pers_lock); 6732 list_for_each_entry(pers, &pers_list, list) 6733 seq_printf(seq, "[%s] ", pers->name); 6734 6735 spin_unlock(&pers_lock); 6736 seq_printf(seq, "\n"); 6737 seq->poll_event = atomic_read(&md_event_count); 6738 return 0; 6739 } 6740 if (v == (void*)2) { 6741 status_unused(seq); 6742 return 0; 6743 } 6744 6745 if (mddev_lock(mddev) < 0) 6746 return -EINTR; 6747 6748 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) { 6749 seq_printf(seq, "%s : %sactive", mdname(mddev), 6750 mddev->pers ? "" : "in"); 6751 if (mddev->pers) { 6752 if (mddev->ro==1) 6753 seq_printf(seq, " (read-only)"); 6754 if (mddev->ro==2) 6755 seq_printf(seq, " (auto-read-only)"); 6756 seq_printf(seq, " %s", mddev->pers->name); 6757 } 6758 6759 sectors = 0; 6760 list_for_each_entry(rdev, &mddev->disks, same_set) { 6761 char b[BDEVNAME_SIZE]; 6762 seq_printf(seq, " %s[%d]", 6763 bdevname(rdev->bdev,b), rdev->desc_nr); 6764 if (test_bit(WriteMostly, &rdev->flags)) 6765 seq_printf(seq, "(W)"); 6766 if (test_bit(Faulty, &rdev->flags)) { 6767 seq_printf(seq, "(F)"); 6768 continue; 6769 } 6770 if (rdev->raid_disk < 0) 6771 seq_printf(seq, "(S)"); /* spare */ 6772 if (test_bit(Replacement, &rdev->flags)) 6773 seq_printf(seq, "(R)"); 6774 sectors += rdev->sectors; 6775 } 6776 6777 if (!list_empty(&mddev->disks)) { 6778 if (mddev->pers) 6779 seq_printf(seq, "\n %llu blocks", 6780 (unsigned long long) 6781 mddev->array_sectors / 2); 6782 else 6783 seq_printf(seq, "\n %llu blocks", 6784 (unsigned long long)sectors / 2); 6785 } 6786 if (mddev->persistent) { 6787 if (mddev->major_version != 0 || 6788 mddev->minor_version != 90) { 6789 seq_printf(seq," super %d.%d", 6790 mddev->major_version, 6791 mddev->minor_version); 6792 } 6793 } else if (mddev->external) 6794 seq_printf(seq, " super external:%s", 6795 mddev->metadata_type); 6796 else 6797 seq_printf(seq, " super non-persistent"); 6798 6799 if (mddev->pers) { 6800 mddev->pers->status(seq, mddev); 6801 seq_printf(seq, "\n "); 6802 if (mddev->pers->sync_request) { 6803 if (mddev->curr_resync > 2) { 6804 status_resync(seq, mddev); 6805 seq_printf(seq, "\n "); 6806 } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2) 6807 seq_printf(seq, "\tresync=DELAYED\n "); 6808 else if (mddev->recovery_cp < MaxSector) 6809 seq_printf(seq, "\tresync=PENDING\n "); 6810 } 6811 } else 6812 seq_printf(seq, "\n "); 6813 6814 if ((bitmap = mddev->bitmap)) { 6815 unsigned long chunk_kb; 6816 unsigned long flags; 6817 spin_lock_irqsave(&bitmap->lock, flags); 6818 chunk_kb = mddev->bitmap_info.chunksize >> 10; 6819 seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], " 6820 "%lu%s chunk", 6821 bitmap->pages - bitmap->missing_pages, 6822 bitmap->pages, 6823 (bitmap->pages - bitmap->missing_pages) 6824 << (PAGE_SHIFT - 10), 6825 chunk_kb ? chunk_kb : mddev->bitmap_info.chunksize, 6826 chunk_kb ? "KB" : "B"); 6827 if (bitmap->file) { 6828 seq_printf(seq, ", file: "); 6829 seq_path(seq, &bitmap->file->f_path, " \t\n"); 6830 } 6831 6832 seq_printf(seq, "\n"); 6833 spin_unlock_irqrestore(&bitmap->lock, flags); 6834 } 6835 6836 seq_printf(seq, "\n"); 6837 } 6838 mddev_unlock(mddev); 6839 6840 return 0; 6841 } 6842 6843 static const struct seq_operations md_seq_ops = { 6844 .start = md_seq_start, 6845 .next = md_seq_next, 6846 .stop = md_seq_stop, 6847 .show = md_seq_show, 6848 }; 6849 6850 static int md_seq_open(struct inode *inode, struct file *file) 6851 { 6852 struct seq_file *seq; 6853 int error; 6854 6855 error = seq_open(file, &md_seq_ops); 6856 if (error) 6857 return error; 6858 6859 seq = file->private_data; 6860 seq->poll_event = atomic_read(&md_event_count); 6861 return error; 6862 } 6863 6864 static unsigned int mdstat_poll(struct file *filp, poll_table *wait) 6865 { 6866 struct seq_file *seq = filp->private_data; 6867 int mask; 6868 6869 poll_wait(filp, &md_event_waiters, wait); 6870 6871 /* always allow read */ 6872 mask = POLLIN | POLLRDNORM; 6873 6874 if (seq->poll_event != atomic_read(&md_event_count)) 6875 mask |= POLLERR | POLLPRI; 6876 return mask; 6877 } 6878 6879 static const struct file_operations md_seq_fops = { 6880 .owner = THIS_MODULE, 6881 .open = md_seq_open, 6882 .read = seq_read, 6883 .llseek = seq_lseek, 6884 .release = seq_release_private, 6885 .poll = mdstat_poll, 6886 }; 6887 6888 int register_md_personality(struct md_personality *p) 6889 { 6890 spin_lock(&pers_lock); 6891 list_add_tail(&p->list, &pers_list); 6892 printk(KERN_INFO "md: %s personality registered for level %d\n", p->name, p->level); 6893 spin_unlock(&pers_lock); 6894 return 0; 6895 } 6896 6897 int unregister_md_personality(struct md_personality *p) 6898 { 6899 printk(KERN_INFO "md: %s personality unregistered\n", p->name); 6900 spin_lock(&pers_lock); 6901 list_del_init(&p->list); 6902 spin_unlock(&pers_lock); 6903 return 0; 6904 } 6905 6906 static int is_mddev_idle(struct mddev *mddev, int init) 6907 { 6908 struct md_rdev * rdev; 6909 int idle; 6910 int curr_events; 6911 6912 idle = 1; 6913 rcu_read_lock(); 6914 rdev_for_each_rcu(rdev, mddev) { 6915 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk; 6916 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) + 6917 (int)part_stat_read(&disk->part0, sectors[1]) - 6918 atomic_read(&disk->sync_io); 6919 /* sync IO will cause sync_io to increase before the disk_stats 6920 * as sync_io is counted when a request starts, and 6921 * disk_stats is counted when it completes. 6922 * So resync activity will cause curr_events to be smaller than 6923 * when there was no such activity. 6924 * non-sync IO will cause disk_stat to increase without 6925 * increasing sync_io so curr_events will (eventually) 6926 * be larger than it was before. Once it becomes 6927 * substantially larger, the test below will cause 6928 * the array to appear non-idle, and resync will slow 6929 * down. 6930 * If there is a lot of outstanding resync activity when 6931 * we set last_event to curr_events, then all that activity 6932 * completing might cause the array to appear non-idle 6933 * and resync will be slowed down even though there might 6934 * not have been non-resync activity. This will only 6935 * happen once though. 'last_events' will soon reflect 6936 * the state where there is little or no outstanding 6937 * resync requests, and further resync activity will 6938 * always make curr_events less than last_events. 6939 * 6940 */ 6941 if (init || curr_events - rdev->last_events > 64) { 6942 rdev->last_events = curr_events; 6943 idle = 0; 6944 } 6945 } 6946 rcu_read_unlock(); 6947 return idle; 6948 } 6949 6950 void md_done_sync(struct mddev *mddev, int blocks, int ok) 6951 { 6952 /* another "blocks" (512byte) blocks have been synced */ 6953 atomic_sub(blocks, &mddev->recovery_active); 6954 wake_up(&mddev->recovery_wait); 6955 if (!ok) { 6956 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 6957 md_wakeup_thread(mddev->thread); 6958 // stop recovery, signal do_sync .... 6959 } 6960 } 6961 6962 6963 /* md_write_start(mddev, bi) 6964 * If we need to update some array metadata (e.g. 'active' flag 6965 * in superblock) before writing, schedule a superblock update 6966 * and wait for it to complete. 6967 */ 6968 void md_write_start(struct mddev *mddev, struct bio *bi) 6969 { 6970 int did_change = 0; 6971 if (bio_data_dir(bi) != WRITE) 6972 return; 6973 6974 BUG_ON(mddev->ro == 1); 6975 if (mddev->ro == 2) { 6976 /* need to switch to read/write */ 6977 mddev->ro = 0; 6978 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6979 md_wakeup_thread(mddev->thread); 6980 md_wakeup_thread(mddev->sync_thread); 6981 did_change = 1; 6982 } 6983 atomic_inc(&mddev->writes_pending); 6984 if (mddev->safemode == 1) 6985 mddev->safemode = 0; 6986 if (mddev->in_sync) { 6987 spin_lock_irq(&mddev->write_lock); 6988 if (mddev->in_sync) { 6989 mddev->in_sync = 0; 6990 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 6991 set_bit(MD_CHANGE_PENDING, &mddev->flags); 6992 md_wakeup_thread(mddev->thread); 6993 did_change = 1; 6994 } 6995 spin_unlock_irq(&mddev->write_lock); 6996 } 6997 if (did_change) 6998 sysfs_notify_dirent_safe(mddev->sysfs_state); 6999 wait_event(mddev->sb_wait, 7000 !test_bit(MD_CHANGE_PENDING, &mddev->flags)); 7001 } 7002 7003 void md_write_end(struct mddev *mddev) 7004 { 7005 if (atomic_dec_and_test(&mddev->writes_pending)) { 7006 if (mddev->safemode == 2) 7007 md_wakeup_thread(mddev->thread); 7008 else if (mddev->safemode_delay) 7009 mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay); 7010 } 7011 } 7012 7013 /* md_allow_write(mddev) 7014 * Calling this ensures that the array is marked 'active' so that writes 7015 * may proceed without blocking. It is important to call this before 7016 * attempting a GFP_KERNEL allocation while holding the mddev lock. 7017 * Must be called with mddev_lock held. 7018 * 7019 * In the ->external case MD_CHANGE_CLEAN can not be cleared until mddev->lock 7020 * is dropped, so return -EAGAIN after notifying userspace. 7021 */ 7022 int md_allow_write(struct mddev *mddev) 7023 { 7024 if (!mddev->pers) 7025 return 0; 7026 if (mddev->ro) 7027 return 0; 7028 if (!mddev->pers->sync_request) 7029 return 0; 7030 7031 spin_lock_irq(&mddev->write_lock); 7032 if (mddev->in_sync) { 7033 mddev->in_sync = 0; 7034 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 7035 set_bit(MD_CHANGE_PENDING, &mddev->flags); 7036 if (mddev->safemode_delay && 7037 mddev->safemode == 0) 7038 mddev->safemode = 1; 7039 spin_unlock_irq(&mddev->write_lock); 7040 md_update_sb(mddev, 0); 7041 sysfs_notify_dirent_safe(mddev->sysfs_state); 7042 } else 7043 spin_unlock_irq(&mddev->write_lock); 7044 7045 if (test_bit(MD_CHANGE_PENDING, &mddev->flags)) 7046 return -EAGAIN; 7047 else 7048 return 0; 7049 } 7050 EXPORT_SYMBOL_GPL(md_allow_write); 7051 7052 #define SYNC_MARKS 10 7053 #define SYNC_MARK_STEP (3*HZ) 7054 void md_do_sync(struct mddev *mddev) 7055 { 7056 struct mddev *mddev2; 7057 unsigned int currspeed = 0, 7058 window; 7059 sector_t max_sectors,j, io_sectors; 7060 unsigned long mark[SYNC_MARKS]; 7061 sector_t mark_cnt[SYNC_MARKS]; 7062 int last_mark,m; 7063 struct list_head *tmp; 7064 sector_t last_check; 7065 int skipped = 0; 7066 struct md_rdev *rdev; 7067 char *desc; 7068 7069 /* just incase thread restarts... */ 7070 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) 7071 return; 7072 if (mddev->ro) /* never try to sync a read-only array */ 7073 return; 7074 7075 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 7076 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) 7077 desc = "data-check"; 7078 else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 7079 desc = "requested-resync"; 7080 else 7081 desc = "resync"; 7082 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 7083 desc = "reshape"; 7084 else 7085 desc = "recovery"; 7086 7087 /* we overload curr_resync somewhat here. 7088 * 0 == not engaged in resync at all 7089 * 2 == checking that there is no conflict with another sync 7090 * 1 == like 2, but have yielded to allow conflicting resync to 7091 * commense 7092 * other == active in resync - this many blocks 7093 * 7094 * Before starting a resync we must have set curr_resync to 7095 * 2, and then checked that every "conflicting" array has curr_resync 7096 * less than ours. When we find one that is the same or higher 7097 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync 7098 * to 1 if we choose to yield (based arbitrarily on address of mddev structure). 7099 * This will mean we have to start checking from the beginning again. 7100 * 7101 */ 7102 7103 do { 7104 mddev->curr_resync = 2; 7105 7106 try_again: 7107 if (kthread_should_stop()) 7108 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 7109 7110 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 7111 goto skip; 7112 for_each_mddev(mddev2, tmp) { 7113 if (mddev2 == mddev) 7114 continue; 7115 if (!mddev->parallel_resync 7116 && mddev2->curr_resync 7117 && match_mddev_units(mddev, mddev2)) { 7118 DEFINE_WAIT(wq); 7119 if (mddev < mddev2 && mddev->curr_resync == 2) { 7120 /* arbitrarily yield */ 7121 mddev->curr_resync = 1; 7122 wake_up(&resync_wait); 7123 } 7124 if (mddev > mddev2 && mddev->curr_resync == 1) 7125 /* no need to wait here, we can wait the next 7126 * time 'round when curr_resync == 2 7127 */ 7128 continue; 7129 /* We need to wait 'interruptible' so as not to 7130 * contribute to the load average, and not to 7131 * be caught by 'softlockup' 7132 */ 7133 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE); 7134 if (!kthread_should_stop() && 7135 mddev2->curr_resync >= mddev->curr_resync) { 7136 printk(KERN_INFO "md: delaying %s of %s" 7137 " until %s has finished (they" 7138 " share one or more physical units)\n", 7139 desc, mdname(mddev), mdname(mddev2)); 7140 mddev_put(mddev2); 7141 if (signal_pending(current)) 7142 flush_signals(current); 7143 schedule(); 7144 finish_wait(&resync_wait, &wq); 7145 goto try_again; 7146 } 7147 finish_wait(&resync_wait, &wq); 7148 } 7149 } 7150 } while (mddev->curr_resync < 2); 7151 7152 j = 0; 7153 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 7154 /* resync follows the size requested by the personality, 7155 * which defaults to physical size, but can be virtual size 7156 */ 7157 max_sectors = mddev->resync_max_sectors; 7158 mddev->resync_mismatches = 0; 7159 /* we don't use the checkpoint if there's a bitmap */ 7160 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 7161 j = mddev->resync_min; 7162 else if (!mddev->bitmap) 7163 j = mddev->recovery_cp; 7164 7165 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 7166 max_sectors = mddev->dev_sectors; 7167 else { 7168 /* recovery follows the physical size of devices */ 7169 max_sectors = mddev->dev_sectors; 7170 j = MaxSector; 7171 rcu_read_lock(); 7172 list_for_each_entry_rcu(rdev, &mddev->disks, same_set) 7173 if (rdev->raid_disk >= 0 && 7174 !test_bit(Faulty, &rdev->flags) && 7175 !test_bit(In_sync, &rdev->flags) && 7176 rdev->recovery_offset < j) 7177 j = rdev->recovery_offset; 7178 rcu_read_unlock(); 7179 } 7180 7181 printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev)); 7182 printk(KERN_INFO "md: minimum _guaranteed_ speed:" 7183 " %d KB/sec/disk.\n", speed_min(mddev)); 7184 printk(KERN_INFO "md: using maximum available idle IO bandwidth " 7185 "(but not more than %d KB/sec) for %s.\n", 7186 speed_max(mddev), desc); 7187 7188 is_mddev_idle(mddev, 1); /* this initializes IO event counters */ 7189 7190 io_sectors = 0; 7191 for (m = 0; m < SYNC_MARKS; m++) { 7192 mark[m] = jiffies; 7193 mark_cnt[m] = io_sectors; 7194 } 7195 last_mark = 0; 7196 mddev->resync_mark = mark[last_mark]; 7197 mddev->resync_mark_cnt = mark_cnt[last_mark]; 7198 7199 /* 7200 * Tune reconstruction: 7201 */ 7202 window = 32*(PAGE_SIZE/512); 7203 printk(KERN_INFO "md: using %dk window, over a total of %lluk.\n", 7204 window/2, (unsigned long long)max_sectors/2); 7205 7206 atomic_set(&mddev->recovery_active, 0); 7207 last_check = 0; 7208 7209 if (j>2) { 7210 printk(KERN_INFO 7211 "md: resuming %s of %s from checkpoint.\n", 7212 desc, mdname(mddev)); 7213 mddev->curr_resync = j; 7214 } 7215 mddev->curr_resync_completed = j; 7216 7217 while (j < max_sectors) { 7218 sector_t sectors; 7219 7220 skipped = 0; 7221 7222 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 7223 ((mddev->curr_resync > mddev->curr_resync_completed && 7224 (mddev->curr_resync - mddev->curr_resync_completed) 7225 > (max_sectors >> 4)) || 7226 (j - mddev->curr_resync_completed)*2 7227 >= mddev->resync_max - mddev->curr_resync_completed 7228 )) { 7229 /* time to update curr_resync_completed */ 7230 wait_event(mddev->recovery_wait, 7231 atomic_read(&mddev->recovery_active) == 0); 7232 mddev->curr_resync_completed = j; 7233 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 7234 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 7235 } 7236 7237 while (j >= mddev->resync_max && !kthread_should_stop()) { 7238 /* As this condition is controlled by user-space, 7239 * we can block indefinitely, so use '_interruptible' 7240 * to avoid triggering warnings. 7241 */ 7242 flush_signals(current); /* just in case */ 7243 wait_event_interruptible(mddev->recovery_wait, 7244 mddev->resync_max > j 7245 || kthread_should_stop()); 7246 } 7247 7248 if (kthread_should_stop()) 7249 goto interrupted; 7250 7251 sectors = mddev->pers->sync_request(mddev, j, &skipped, 7252 currspeed < speed_min(mddev)); 7253 if (sectors == 0) { 7254 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 7255 goto out; 7256 } 7257 7258 if (!skipped) { /* actual IO requested */ 7259 io_sectors += sectors; 7260 atomic_add(sectors, &mddev->recovery_active); 7261 } 7262 7263 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 7264 break; 7265 7266 j += sectors; 7267 if (j>1) mddev->curr_resync = j; 7268 mddev->curr_mark_cnt = io_sectors; 7269 if (last_check == 0) 7270 /* this is the earliest that rebuild will be 7271 * visible in /proc/mdstat 7272 */ 7273 md_new_event(mddev); 7274 7275 if (last_check + window > io_sectors || j == max_sectors) 7276 continue; 7277 7278 last_check = io_sectors; 7279 repeat: 7280 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) { 7281 /* step marks */ 7282 int next = (last_mark+1) % SYNC_MARKS; 7283 7284 mddev->resync_mark = mark[next]; 7285 mddev->resync_mark_cnt = mark_cnt[next]; 7286 mark[next] = jiffies; 7287 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active); 7288 last_mark = next; 7289 } 7290 7291 7292 if (kthread_should_stop()) 7293 goto interrupted; 7294 7295 7296 /* 7297 * this loop exits only if either when we are slower than 7298 * the 'hard' speed limit, or the system was IO-idle for 7299 * a jiffy. 7300 * the system might be non-idle CPU-wise, but we only care 7301 * about not overloading the IO subsystem. (things like an 7302 * e2fsck being done on the RAID array should execute fast) 7303 */ 7304 cond_resched(); 7305 7306 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2 7307 /((jiffies-mddev->resync_mark)/HZ +1) +1; 7308 7309 if (currspeed > speed_min(mddev)) { 7310 if ((currspeed > speed_max(mddev)) || 7311 !is_mddev_idle(mddev, 0)) { 7312 msleep(500); 7313 goto repeat; 7314 } 7315 } 7316 } 7317 printk(KERN_INFO "md: %s: %s done.\n",mdname(mddev), desc); 7318 /* 7319 * this also signals 'finished resyncing' to md_stop 7320 */ 7321 out: 7322 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); 7323 7324 /* tell personality that we are finished */ 7325 mddev->pers->sync_request(mddev, max_sectors, &skipped, 1); 7326 7327 if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) && 7328 mddev->curr_resync > 2) { 7329 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 7330 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 7331 if (mddev->curr_resync >= mddev->recovery_cp) { 7332 printk(KERN_INFO 7333 "md: checkpointing %s of %s.\n", 7334 desc, mdname(mddev)); 7335 mddev->recovery_cp = mddev->curr_resync; 7336 } 7337 } else 7338 mddev->recovery_cp = MaxSector; 7339 } else { 7340 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 7341 mddev->curr_resync = MaxSector; 7342 rcu_read_lock(); 7343 list_for_each_entry_rcu(rdev, &mddev->disks, same_set) 7344 if (rdev->raid_disk >= 0 && 7345 mddev->delta_disks >= 0 && 7346 !test_bit(Faulty, &rdev->flags) && 7347 !test_bit(In_sync, &rdev->flags) && 7348 rdev->recovery_offset < mddev->curr_resync) 7349 rdev->recovery_offset = mddev->curr_resync; 7350 rcu_read_unlock(); 7351 } 7352 } 7353 set_bit(MD_CHANGE_DEVS, &mddev->flags); 7354 7355 skip: 7356 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 7357 /* We completed so min/max setting can be forgotten if used. */ 7358 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 7359 mddev->resync_min = 0; 7360 mddev->resync_max = MaxSector; 7361 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 7362 mddev->resync_min = mddev->curr_resync_completed; 7363 mddev->curr_resync = 0; 7364 wake_up(&resync_wait); 7365 set_bit(MD_RECOVERY_DONE, &mddev->recovery); 7366 md_wakeup_thread(mddev->thread); 7367 return; 7368 7369 interrupted: 7370 /* 7371 * got a signal, exit. 7372 */ 7373 printk(KERN_INFO 7374 "md: md_do_sync() got signal ... exiting\n"); 7375 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 7376 goto out; 7377 7378 } 7379 EXPORT_SYMBOL_GPL(md_do_sync); 7380 7381 static int remove_and_add_spares(struct mddev *mddev) 7382 { 7383 struct md_rdev *rdev; 7384 int spares = 0; 7385 int removed = 0; 7386 7387 mddev->curr_resync_completed = 0; 7388 7389 list_for_each_entry(rdev, &mddev->disks, same_set) 7390 if (rdev->raid_disk >= 0 && 7391 !test_bit(Blocked, &rdev->flags) && 7392 (test_bit(Faulty, &rdev->flags) || 7393 ! test_bit(In_sync, &rdev->flags)) && 7394 atomic_read(&rdev->nr_pending)==0) { 7395 if (mddev->pers->hot_remove_disk( 7396 mddev, rdev) == 0) { 7397 sysfs_unlink_rdev(mddev, rdev); 7398 rdev->raid_disk = -1; 7399 removed++; 7400 } 7401 } 7402 if (removed) 7403 sysfs_notify(&mddev->kobj, NULL, 7404 "degraded"); 7405 7406 7407 list_for_each_entry(rdev, &mddev->disks, same_set) { 7408 if (rdev->raid_disk >= 0 && 7409 !test_bit(In_sync, &rdev->flags) && 7410 !test_bit(Faulty, &rdev->flags)) 7411 spares++; 7412 if (rdev->raid_disk < 0 7413 && !test_bit(Faulty, &rdev->flags)) { 7414 rdev->recovery_offset = 0; 7415 if (mddev->pers-> 7416 hot_add_disk(mddev, rdev) == 0) { 7417 if (sysfs_link_rdev(mddev, rdev)) 7418 /* failure here is OK */; 7419 spares++; 7420 md_new_event(mddev); 7421 set_bit(MD_CHANGE_DEVS, &mddev->flags); 7422 } 7423 } 7424 } 7425 return spares; 7426 } 7427 7428 static void reap_sync_thread(struct mddev *mddev) 7429 { 7430 struct md_rdev *rdev; 7431 7432 /* resync has finished, collect result */ 7433 md_unregister_thread(&mddev->sync_thread); 7434 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 7435 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 7436 /* success...*/ 7437 /* activate any spares */ 7438 if (mddev->pers->spare_active(mddev)) 7439 sysfs_notify(&mddev->kobj, NULL, 7440 "degraded"); 7441 } 7442 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 7443 mddev->pers->finish_reshape) 7444 mddev->pers->finish_reshape(mddev); 7445 7446 /* If array is no-longer degraded, then any saved_raid_disk 7447 * information must be scrapped. Also if any device is now 7448 * In_sync we must scrape the saved_raid_disk for that device 7449 * do the superblock for an incrementally recovered device 7450 * written out. 7451 */ 7452 list_for_each_entry(rdev, &mddev->disks, same_set) 7453 if (!mddev->degraded || 7454 test_bit(In_sync, &rdev->flags)) 7455 rdev->saved_raid_disk = -1; 7456 7457 md_update_sb(mddev, 1); 7458 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 7459 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 7460 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 7461 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 7462 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 7463 /* flag recovery needed just to double check */ 7464 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7465 sysfs_notify_dirent_safe(mddev->sysfs_action); 7466 md_new_event(mddev); 7467 if (mddev->event_work.func) 7468 queue_work(md_misc_wq, &mddev->event_work); 7469 } 7470 7471 /* 7472 * This routine is regularly called by all per-raid-array threads to 7473 * deal with generic issues like resync and super-block update. 7474 * Raid personalities that don't have a thread (linear/raid0) do not 7475 * need this as they never do any recovery or update the superblock. 7476 * 7477 * It does not do any resync itself, but rather "forks" off other threads 7478 * to do that as needed. 7479 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in 7480 * "->recovery" and create a thread at ->sync_thread. 7481 * When the thread finishes it sets MD_RECOVERY_DONE 7482 * and wakeups up this thread which will reap the thread and finish up. 7483 * This thread also removes any faulty devices (with nr_pending == 0). 7484 * 7485 * The overall approach is: 7486 * 1/ if the superblock needs updating, update it. 7487 * 2/ If a recovery thread is running, don't do anything else. 7488 * 3/ If recovery has finished, clean up, possibly marking spares active. 7489 * 4/ If there are any faulty devices, remove them. 7490 * 5/ If array is degraded, try to add spares devices 7491 * 6/ If array has spares or is not in-sync, start a resync thread. 7492 */ 7493 void md_check_recovery(struct mddev *mddev) 7494 { 7495 if (mddev->suspended) 7496 return; 7497 7498 if (mddev->bitmap) 7499 bitmap_daemon_work(mddev); 7500 7501 if (signal_pending(current)) { 7502 if (mddev->pers->sync_request && !mddev->external) { 7503 printk(KERN_INFO "md: %s in immediate safe mode\n", 7504 mdname(mddev)); 7505 mddev->safemode = 2; 7506 } 7507 flush_signals(current); 7508 } 7509 7510 if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) 7511 return; 7512 if ( ! ( 7513 (mddev->flags & ~ (1<<MD_CHANGE_PENDING)) || 7514 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || 7515 test_bit(MD_RECOVERY_DONE, &mddev->recovery) || 7516 (mddev->external == 0 && mddev->safemode == 1) || 7517 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending) 7518 && !mddev->in_sync && mddev->recovery_cp == MaxSector) 7519 )) 7520 return; 7521 7522 if (mddev_trylock(mddev)) { 7523 int spares = 0; 7524 7525 if (mddev->ro) { 7526 /* Only thing we do on a ro array is remove 7527 * failed devices. 7528 */ 7529 struct md_rdev *rdev; 7530 list_for_each_entry(rdev, &mddev->disks, same_set) 7531 if (rdev->raid_disk >= 0 && 7532 !test_bit(Blocked, &rdev->flags) && 7533 test_bit(Faulty, &rdev->flags) && 7534 atomic_read(&rdev->nr_pending)==0) { 7535 if (mddev->pers->hot_remove_disk( 7536 mddev, rdev) == 0) { 7537 sysfs_unlink_rdev(mddev, rdev); 7538 rdev->raid_disk = -1; 7539 } 7540 } 7541 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7542 goto unlock; 7543 } 7544 7545 if (!mddev->external) { 7546 int did_change = 0; 7547 spin_lock_irq(&mddev->write_lock); 7548 if (mddev->safemode && 7549 !atomic_read(&mddev->writes_pending) && 7550 !mddev->in_sync && 7551 mddev->recovery_cp == MaxSector) { 7552 mddev->in_sync = 1; 7553 did_change = 1; 7554 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 7555 } 7556 if (mddev->safemode == 1) 7557 mddev->safemode = 0; 7558 spin_unlock_irq(&mddev->write_lock); 7559 if (did_change) 7560 sysfs_notify_dirent_safe(mddev->sysfs_state); 7561 } 7562 7563 if (mddev->flags) 7564 md_update_sb(mddev, 0); 7565 7566 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && 7567 !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) { 7568 /* resync/recovery still happening */ 7569 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7570 goto unlock; 7571 } 7572 if (mddev->sync_thread) { 7573 reap_sync_thread(mddev); 7574 goto unlock; 7575 } 7576 /* Set RUNNING before clearing NEEDED to avoid 7577 * any transients in the value of "sync_action". 7578 */ 7579 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 7580 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7581 /* Clear some bits that don't mean anything, but 7582 * might be left set 7583 */ 7584 clear_bit(MD_RECOVERY_INTR, &mddev->recovery); 7585 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); 7586 7587 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) 7588 goto unlock; 7589 /* no recovery is running. 7590 * remove any failed drives, then 7591 * add spares if possible. 7592 * Spare are also removed and re-added, to allow 7593 * the personality to fail the re-add. 7594 */ 7595 7596 if (mddev->reshape_position != MaxSector) { 7597 if (mddev->pers->check_reshape == NULL || 7598 mddev->pers->check_reshape(mddev) != 0) 7599 /* Cannot proceed */ 7600 goto unlock; 7601 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 7602 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 7603 } else if ((spares = remove_and_add_spares(mddev))) { 7604 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 7605 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 7606 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 7607 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 7608 } else if (mddev->recovery_cp < MaxSector) { 7609 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 7610 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 7611 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 7612 /* nothing to be done ... */ 7613 goto unlock; 7614 7615 if (mddev->pers->sync_request) { 7616 if (spares && mddev->bitmap && ! mddev->bitmap->file) { 7617 /* We are adding a device or devices to an array 7618 * which has the bitmap stored on all devices. 7619 * So make sure all bitmap pages get written 7620 */ 7621 bitmap_write_all(mddev->bitmap); 7622 } 7623 mddev->sync_thread = md_register_thread(md_do_sync, 7624 mddev, 7625 "resync"); 7626 if (!mddev->sync_thread) { 7627 printk(KERN_ERR "%s: could not start resync" 7628 " thread...\n", 7629 mdname(mddev)); 7630 /* leave the spares where they are, it shouldn't hurt */ 7631 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 7632 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 7633 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 7634 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 7635 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 7636 } else 7637 md_wakeup_thread(mddev->sync_thread); 7638 sysfs_notify_dirent_safe(mddev->sysfs_action); 7639 md_new_event(mddev); 7640 } 7641 unlock: 7642 if (!mddev->sync_thread) { 7643 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 7644 if (test_and_clear_bit(MD_RECOVERY_RECOVER, 7645 &mddev->recovery)) 7646 if (mddev->sysfs_action) 7647 sysfs_notify_dirent_safe(mddev->sysfs_action); 7648 } 7649 mddev_unlock(mddev); 7650 } 7651 } 7652 7653 void md_wait_for_blocked_rdev(struct md_rdev *rdev, struct mddev *mddev) 7654 { 7655 sysfs_notify_dirent_safe(rdev->sysfs_state); 7656 wait_event_timeout(rdev->blocked_wait, 7657 !test_bit(Blocked, &rdev->flags) && 7658 !test_bit(BlockedBadBlocks, &rdev->flags), 7659 msecs_to_jiffies(5000)); 7660 rdev_dec_pending(rdev, mddev); 7661 } 7662 EXPORT_SYMBOL(md_wait_for_blocked_rdev); 7663 7664 7665 /* Bad block management. 7666 * We can record which blocks on each device are 'bad' and so just 7667 * fail those blocks, or that stripe, rather than the whole device. 7668 * Entries in the bad-block table are 64bits wide. This comprises: 7669 * Length of bad-range, in sectors: 0-511 for lengths 1-512 7670 * Start of bad-range, sector offset, 54 bits (allows 8 exbibytes) 7671 * A 'shift' can be set so that larger blocks are tracked and 7672 * consequently larger devices can be covered. 7673 * 'Acknowledged' flag - 1 bit. - the most significant bit. 7674 * 7675 * Locking of the bad-block table uses a seqlock so md_is_badblock 7676 * might need to retry if it is very unlucky. 7677 * We will sometimes want to check for bad blocks in a bi_end_io function, 7678 * so we use the write_seqlock_irq variant. 7679 * 7680 * When looking for a bad block we specify a range and want to 7681 * know if any block in the range is bad. So we binary-search 7682 * to the last range that starts at-or-before the given endpoint, 7683 * (or "before the sector after the target range") 7684 * then see if it ends after the given start. 7685 * We return 7686 * 0 if there are no known bad blocks in the range 7687 * 1 if there are known bad block which are all acknowledged 7688 * -1 if there are bad blocks which have not yet been acknowledged in metadata. 7689 * plus the start/length of the first bad section we overlap. 7690 */ 7691 int md_is_badblock(struct badblocks *bb, sector_t s, int sectors, 7692 sector_t *first_bad, int *bad_sectors) 7693 { 7694 int hi; 7695 int lo = 0; 7696 u64 *p = bb->page; 7697 int rv = 0; 7698 sector_t target = s + sectors; 7699 unsigned seq; 7700 7701 if (bb->shift > 0) { 7702 /* round the start down, and the end up */ 7703 s >>= bb->shift; 7704 target += (1<<bb->shift) - 1; 7705 target >>= bb->shift; 7706 sectors = target - s; 7707 } 7708 /* 'target' is now the first block after the bad range */ 7709 7710 retry: 7711 seq = read_seqbegin(&bb->lock); 7712 7713 hi = bb->count; 7714 7715 /* Binary search between lo and hi for 'target' 7716 * i.e. for the last range that starts before 'target' 7717 */ 7718 /* INVARIANT: ranges before 'lo' and at-or-after 'hi' 7719 * are known not to be the last range before target. 7720 * VARIANT: hi-lo is the number of possible 7721 * ranges, and decreases until it reaches 1 7722 */ 7723 while (hi - lo > 1) { 7724 int mid = (lo + hi) / 2; 7725 sector_t a = BB_OFFSET(p[mid]); 7726 if (a < target) 7727 /* This could still be the one, earlier ranges 7728 * could not. */ 7729 lo = mid; 7730 else 7731 /* This and later ranges are definitely out. */ 7732 hi = mid; 7733 } 7734 /* 'lo' might be the last that started before target, but 'hi' isn't */ 7735 if (hi > lo) { 7736 /* need to check all range that end after 's' to see if 7737 * any are unacknowledged. 7738 */ 7739 while (lo >= 0 && 7740 BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) { 7741 if (BB_OFFSET(p[lo]) < target) { 7742 /* starts before the end, and finishes after 7743 * the start, so they must overlap 7744 */ 7745 if (rv != -1 && BB_ACK(p[lo])) 7746 rv = 1; 7747 else 7748 rv = -1; 7749 *first_bad = BB_OFFSET(p[lo]); 7750 *bad_sectors = BB_LEN(p[lo]); 7751 } 7752 lo--; 7753 } 7754 } 7755 7756 if (read_seqretry(&bb->lock, seq)) 7757 goto retry; 7758 7759 return rv; 7760 } 7761 EXPORT_SYMBOL_GPL(md_is_badblock); 7762 7763 /* 7764 * Add a range of bad blocks to the table. 7765 * This might extend the table, or might contract it 7766 * if two adjacent ranges can be merged. 7767 * We binary-search to find the 'insertion' point, then 7768 * decide how best to handle it. 7769 */ 7770 static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors, 7771 int acknowledged) 7772 { 7773 u64 *p; 7774 int lo, hi; 7775 int rv = 1; 7776 7777 if (bb->shift < 0) 7778 /* badblocks are disabled */ 7779 return 0; 7780 7781 if (bb->shift) { 7782 /* round the start down, and the end up */ 7783 sector_t next = s + sectors; 7784 s >>= bb->shift; 7785 next += (1<<bb->shift) - 1; 7786 next >>= bb->shift; 7787 sectors = next - s; 7788 } 7789 7790 write_seqlock_irq(&bb->lock); 7791 7792 p = bb->page; 7793 lo = 0; 7794 hi = bb->count; 7795 /* Find the last range that starts at-or-before 's' */ 7796 while (hi - lo > 1) { 7797 int mid = (lo + hi) / 2; 7798 sector_t a = BB_OFFSET(p[mid]); 7799 if (a <= s) 7800 lo = mid; 7801 else 7802 hi = mid; 7803 } 7804 if (hi > lo && BB_OFFSET(p[lo]) > s) 7805 hi = lo; 7806 7807 if (hi > lo) { 7808 /* we found a range that might merge with the start 7809 * of our new range 7810 */ 7811 sector_t a = BB_OFFSET(p[lo]); 7812 sector_t e = a + BB_LEN(p[lo]); 7813 int ack = BB_ACK(p[lo]); 7814 if (e >= s) { 7815 /* Yes, we can merge with a previous range */ 7816 if (s == a && s + sectors >= e) 7817 /* new range covers old */ 7818 ack = acknowledged; 7819 else 7820 ack = ack && acknowledged; 7821 7822 if (e < s + sectors) 7823 e = s + sectors; 7824 if (e - a <= BB_MAX_LEN) { 7825 p[lo] = BB_MAKE(a, e-a, ack); 7826 s = e; 7827 } else { 7828 /* does not all fit in one range, 7829 * make p[lo] maximal 7830 */ 7831 if (BB_LEN(p[lo]) != BB_MAX_LEN) 7832 p[lo] = BB_MAKE(a, BB_MAX_LEN, ack); 7833 s = a + BB_MAX_LEN; 7834 } 7835 sectors = e - s; 7836 } 7837 } 7838 if (sectors && hi < bb->count) { 7839 /* 'hi' points to the first range that starts after 's'. 7840 * Maybe we can merge with the start of that range */ 7841 sector_t a = BB_OFFSET(p[hi]); 7842 sector_t e = a + BB_LEN(p[hi]); 7843 int ack = BB_ACK(p[hi]); 7844 if (a <= s + sectors) { 7845 /* merging is possible */ 7846 if (e <= s + sectors) { 7847 /* full overlap */ 7848 e = s + sectors; 7849 ack = acknowledged; 7850 } else 7851 ack = ack && acknowledged; 7852 7853 a = s; 7854 if (e - a <= BB_MAX_LEN) { 7855 p[hi] = BB_MAKE(a, e-a, ack); 7856 s = e; 7857 } else { 7858 p[hi] = BB_MAKE(a, BB_MAX_LEN, ack); 7859 s = a + BB_MAX_LEN; 7860 } 7861 sectors = e - s; 7862 lo = hi; 7863 hi++; 7864 } 7865 } 7866 if (sectors == 0 && hi < bb->count) { 7867 /* we might be able to combine lo and hi */ 7868 /* Note: 's' is at the end of 'lo' */ 7869 sector_t a = BB_OFFSET(p[hi]); 7870 int lolen = BB_LEN(p[lo]); 7871 int hilen = BB_LEN(p[hi]); 7872 int newlen = lolen + hilen - (s - a); 7873 if (s >= a && newlen < BB_MAX_LEN) { 7874 /* yes, we can combine them */ 7875 int ack = BB_ACK(p[lo]) && BB_ACK(p[hi]); 7876 p[lo] = BB_MAKE(BB_OFFSET(p[lo]), newlen, ack); 7877 memmove(p + hi, p + hi + 1, 7878 (bb->count - hi - 1) * 8); 7879 bb->count--; 7880 } 7881 } 7882 while (sectors) { 7883 /* didn't merge (it all). 7884 * Need to add a range just before 'hi' */ 7885 if (bb->count >= MD_MAX_BADBLOCKS) { 7886 /* No room for more */ 7887 rv = 0; 7888 break; 7889 } else { 7890 int this_sectors = sectors; 7891 memmove(p + hi + 1, p + hi, 7892 (bb->count - hi) * 8); 7893 bb->count++; 7894 7895 if (this_sectors > BB_MAX_LEN) 7896 this_sectors = BB_MAX_LEN; 7897 p[hi] = BB_MAKE(s, this_sectors, acknowledged); 7898 sectors -= this_sectors; 7899 s += this_sectors; 7900 } 7901 } 7902 7903 bb->changed = 1; 7904 if (!acknowledged) 7905 bb->unacked_exist = 1; 7906 write_sequnlock_irq(&bb->lock); 7907 7908 return rv; 7909 } 7910 7911 int rdev_set_badblocks(struct md_rdev *rdev, sector_t s, int sectors, 7912 int acknowledged) 7913 { 7914 int rv = md_set_badblocks(&rdev->badblocks, 7915 s + rdev->data_offset, sectors, acknowledged); 7916 if (rv) { 7917 /* Make sure they get written out promptly */ 7918 sysfs_notify_dirent_safe(rdev->sysfs_state); 7919 set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags); 7920 md_wakeup_thread(rdev->mddev->thread); 7921 } 7922 return rv; 7923 } 7924 EXPORT_SYMBOL_GPL(rdev_set_badblocks); 7925 7926 /* 7927 * Remove a range of bad blocks from the table. 7928 * This may involve extending the table if we spilt a region, 7929 * but it must not fail. So if the table becomes full, we just 7930 * drop the remove request. 7931 */ 7932 static int md_clear_badblocks(struct badblocks *bb, sector_t s, int sectors) 7933 { 7934 u64 *p; 7935 int lo, hi; 7936 sector_t target = s + sectors; 7937 int rv = 0; 7938 7939 if (bb->shift > 0) { 7940 /* When clearing we round the start up and the end down. 7941 * This should not matter as the shift should align with 7942 * the block size and no rounding should ever be needed. 7943 * However it is better the think a block is bad when it 7944 * isn't than to think a block is not bad when it is. 7945 */ 7946 s += (1<<bb->shift) - 1; 7947 s >>= bb->shift; 7948 target >>= bb->shift; 7949 sectors = target - s; 7950 } 7951 7952 write_seqlock_irq(&bb->lock); 7953 7954 p = bb->page; 7955 lo = 0; 7956 hi = bb->count; 7957 /* Find the last range that starts before 'target' */ 7958 while (hi - lo > 1) { 7959 int mid = (lo + hi) / 2; 7960 sector_t a = BB_OFFSET(p[mid]); 7961 if (a < target) 7962 lo = mid; 7963 else 7964 hi = mid; 7965 } 7966 if (hi > lo) { 7967 /* p[lo] is the last range that could overlap the 7968 * current range. Earlier ranges could also overlap, 7969 * but only this one can overlap the end of the range. 7970 */ 7971 if (BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > target) { 7972 /* Partial overlap, leave the tail of this range */ 7973 int ack = BB_ACK(p[lo]); 7974 sector_t a = BB_OFFSET(p[lo]); 7975 sector_t end = a + BB_LEN(p[lo]); 7976 7977 if (a < s) { 7978 /* we need to split this range */ 7979 if (bb->count >= MD_MAX_BADBLOCKS) { 7980 rv = 0; 7981 goto out; 7982 } 7983 memmove(p+lo+1, p+lo, (bb->count - lo) * 8); 7984 bb->count++; 7985 p[lo] = BB_MAKE(a, s-a, ack); 7986 lo++; 7987 } 7988 p[lo] = BB_MAKE(target, end - target, ack); 7989 /* there is no longer an overlap */ 7990 hi = lo; 7991 lo--; 7992 } 7993 while (lo >= 0 && 7994 BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) { 7995 /* This range does overlap */ 7996 if (BB_OFFSET(p[lo]) < s) { 7997 /* Keep the early parts of this range. */ 7998 int ack = BB_ACK(p[lo]); 7999 sector_t start = BB_OFFSET(p[lo]); 8000 p[lo] = BB_MAKE(start, s - start, ack); 8001 /* now low doesn't overlap, so.. */ 8002 break; 8003 } 8004 lo--; 8005 } 8006 /* 'lo' is strictly before, 'hi' is strictly after, 8007 * anything between needs to be discarded 8008 */ 8009 if (hi - lo > 1) { 8010 memmove(p+lo+1, p+hi, (bb->count - hi) * 8); 8011 bb->count -= (hi - lo - 1); 8012 } 8013 } 8014 8015 bb->changed = 1; 8016 out: 8017 write_sequnlock_irq(&bb->lock); 8018 return rv; 8019 } 8020 8021 int rdev_clear_badblocks(struct md_rdev *rdev, sector_t s, int sectors) 8022 { 8023 return md_clear_badblocks(&rdev->badblocks, 8024 s + rdev->data_offset, 8025 sectors); 8026 } 8027 EXPORT_SYMBOL_GPL(rdev_clear_badblocks); 8028 8029 /* 8030 * Acknowledge all bad blocks in a list. 8031 * This only succeeds if ->changed is clear. It is used by 8032 * in-kernel metadata updates 8033 */ 8034 void md_ack_all_badblocks(struct badblocks *bb) 8035 { 8036 if (bb->page == NULL || bb->changed) 8037 /* no point even trying */ 8038 return; 8039 write_seqlock_irq(&bb->lock); 8040 8041 if (bb->changed == 0) { 8042 u64 *p = bb->page; 8043 int i; 8044 for (i = 0; i < bb->count ; i++) { 8045 if (!BB_ACK(p[i])) { 8046 sector_t start = BB_OFFSET(p[i]); 8047 int len = BB_LEN(p[i]); 8048 p[i] = BB_MAKE(start, len, 1); 8049 } 8050 } 8051 bb->unacked_exist = 0; 8052 } 8053 write_sequnlock_irq(&bb->lock); 8054 } 8055 EXPORT_SYMBOL_GPL(md_ack_all_badblocks); 8056 8057 /* sysfs access to bad-blocks list. 8058 * We present two files. 8059 * 'bad-blocks' lists sector numbers and lengths of ranges that 8060 * are recorded as bad. The list is truncated to fit within 8061 * the one-page limit of sysfs. 8062 * Writing "sector length" to this file adds an acknowledged 8063 * bad block list. 8064 * 'unacknowledged-bad-blocks' lists bad blocks that have not yet 8065 * been acknowledged. Writing to this file adds bad blocks 8066 * without acknowledging them. This is largely for testing. 8067 */ 8068 8069 static ssize_t 8070 badblocks_show(struct badblocks *bb, char *page, int unack) 8071 { 8072 size_t len; 8073 int i; 8074 u64 *p = bb->page; 8075 unsigned seq; 8076 8077 if (bb->shift < 0) 8078 return 0; 8079 8080 retry: 8081 seq = read_seqbegin(&bb->lock); 8082 8083 len = 0; 8084 i = 0; 8085 8086 while (len < PAGE_SIZE && i < bb->count) { 8087 sector_t s = BB_OFFSET(p[i]); 8088 unsigned int length = BB_LEN(p[i]); 8089 int ack = BB_ACK(p[i]); 8090 i++; 8091 8092 if (unack && ack) 8093 continue; 8094 8095 len += snprintf(page+len, PAGE_SIZE-len, "%llu %u\n", 8096 (unsigned long long)s << bb->shift, 8097 length << bb->shift); 8098 } 8099 if (unack && len == 0) 8100 bb->unacked_exist = 0; 8101 8102 if (read_seqretry(&bb->lock, seq)) 8103 goto retry; 8104 8105 return len; 8106 } 8107 8108 #define DO_DEBUG 1 8109 8110 static ssize_t 8111 badblocks_store(struct badblocks *bb, const char *page, size_t len, int unack) 8112 { 8113 unsigned long long sector; 8114 int length; 8115 char newline; 8116 #ifdef DO_DEBUG 8117 /* Allow clearing via sysfs *only* for testing/debugging. 8118 * Normally only a successful write may clear a badblock 8119 */ 8120 int clear = 0; 8121 if (page[0] == '-') { 8122 clear = 1; 8123 page++; 8124 } 8125 #endif /* DO_DEBUG */ 8126 8127 switch (sscanf(page, "%llu %d%c", §or, &length, &newline)) { 8128 case 3: 8129 if (newline != '\n') 8130 return -EINVAL; 8131 case 2: 8132 if (length <= 0) 8133 return -EINVAL; 8134 break; 8135 default: 8136 return -EINVAL; 8137 } 8138 8139 #ifdef DO_DEBUG 8140 if (clear) { 8141 md_clear_badblocks(bb, sector, length); 8142 return len; 8143 } 8144 #endif /* DO_DEBUG */ 8145 if (md_set_badblocks(bb, sector, length, !unack)) 8146 return len; 8147 else 8148 return -ENOSPC; 8149 } 8150 8151 static int md_notify_reboot(struct notifier_block *this, 8152 unsigned long code, void *x) 8153 { 8154 struct list_head *tmp; 8155 struct mddev *mddev; 8156 int need_delay = 0; 8157 8158 if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) { 8159 8160 printk(KERN_INFO "md: stopping all md devices.\n"); 8161 8162 for_each_mddev(mddev, tmp) { 8163 if (mddev_trylock(mddev)) { 8164 /* Force a switch to readonly even array 8165 * appears to still be in use. Hence 8166 * the '100'. 8167 */ 8168 md_set_readonly(mddev, 100); 8169 mddev_unlock(mddev); 8170 } 8171 need_delay = 1; 8172 } 8173 /* 8174 * certain more exotic SCSI devices are known to be 8175 * volatile wrt too early system reboots. While the 8176 * right place to handle this issue is the given 8177 * driver, we do want to have a safe RAID driver ... 8178 */ 8179 if (need_delay) 8180 mdelay(1000*1); 8181 } 8182 return NOTIFY_DONE; 8183 } 8184 8185 static struct notifier_block md_notifier = { 8186 .notifier_call = md_notify_reboot, 8187 .next = NULL, 8188 .priority = INT_MAX, /* before any real devices */ 8189 }; 8190 8191 static void md_geninit(void) 8192 { 8193 pr_debug("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t)); 8194 8195 proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops); 8196 } 8197 8198 static int __init md_init(void) 8199 { 8200 int ret = -ENOMEM; 8201 8202 md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0); 8203 if (!md_wq) 8204 goto err_wq; 8205 8206 md_misc_wq = alloc_workqueue("md_misc", 0, 0); 8207 if (!md_misc_wq) 8208 goto err_misc_wq; 8209 8210 if ((ret = register_blkdev(MD_MAJOR, "md")) < 0) 8211 goto err_md; 8212 8213 if ((ret = register_blkdev(0, "mdp")) < 0) 8214 goto err_mdp; 8215 mdp_major = ret; 8216 8217 blk_register_region(MKDEV(MD_MAJOR, 0), 1UL<<MINORBITS, THIS_MODULE, 8218 md_probe, NULL, NULL); 8219 blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE, 8220 md_probe, NULL, NULL); 8221 8222 register_reboot_notifier(&md_notifier); 8223 raid_table_header = register_sysctl_table(raid_root_table); 8224 8225 md_geninit(); 8226 return 0; 8227 8228 err_mdp: 8229 unregister_blkdev(MD_MAJOR, "md"); 8230 err_md: 8231 destroy_workqueue(md_misc_wq); 8232 err_misc_wq: 8233 destroy_workqueue(md_wq); 8234 err_wq: 8235 return ret; 8236 } 8237 8238 #ifndef MODULE 8239 8240 /* 8241 * Searches all registered partitions for autorun RAID arrays 8242 * at boot time. 8243 */ 8244 8245 static LIST_HEAD(all_detected_devices); 8246 struct detected_devices_node { 8247 struct list_head list; 8248 dev_t dev; 8249 }; 8250 8251 void md_autodetect_dev(dev_t dev) 8252 { 8253 struct detected_devices_node *node_detected_dev; 8254 8255 node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL); 8256 if (node_detected_dev) { 8257 node_detected_dev->dev = dev; 8258 list_add_tail(&node_detected_dev->list, &all_detected_devices); 8259 } else { 8260 printk(KERN_CRIT "md: md_autodetect_dev: kzalloc failed" 8261 ", skipping dev(%d,%d)\n", MAJOR(dev), MINOR(dev)); 8262 } 8263 } 8264 8265 8266 static void autostart_arrays(int part) 8267 { 8268 struct md_rdev *rdev; 8269 struct detected_devices_node *node_detected_dev; 8270 dev_t dev; 8271 int i_scanned, i_passed; 8272 8273 i_scanned = 0; 8274 i_passed = 0; 8275 8276 printk(KERN_INFO "md: Autodetecting RAID arrays.\n"); 8277 8278 while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) { 8279 i_scanned++; 8280 node_detected_dev = list_entry(all_detected_devices.next, 8281 struct detected_devices_node, list); 8282 list_del(&node_detected_dev->list); 8283 dev = node_detected_dev->dev; 8284 kfree(node_detected_dev); 8285 rdev = md_import_device(dev,0, 90); 8286 if (IS_ERR(rdev)) 8287 continue; 8288 8289 if (test_bit(Faulty, &rdev->flags)) { 8290 MD_BUG(); 8291 continue; 8292 } 8293 set_bit(AutoDetected, &rdev->flags); 8294 list_add(&rdev->same_set, &pending_raid_disks); 8295 i_passed++; 8296 } 8297 8298 printk(KERN_INFO "md: Scanned %d and added %d devices.\n", 8299 i_scanned, i_passed); 8300 8301 autorun_devices(part); 8302 } 8303 8304 #endif /* !MODULE */ 8305 8306 static __exit void md_exit(void) 8307 { 8308 struct mddev *mddev; 8309 struct list_head *tmp; 8310 8311 blk_unregister_region(MKDEV(MD_MAJOR,0), 1U << MINORBITS); 8312 blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS); 8313 8314 unregister_blkdev(MD_MAJOR,"md"); 8315 unregister_blkdev(mdp_major, "mdp"); 8316 unregister_reboot_notifier(&md_notifier); 8317 unregister_sysctl_table(raid_table_header); 8318 remove_proc_entry("mdstat", NULL); 8319 for_each_mddev(mddev, tmp) { 8320 export_array(mddev); 8321 mddev->hold_active = 0; 8322 } 8323 destroy_workqueue(md_misc_wq); 8324 destroy_workqueue(md_wq); 8325 } 8326 8327 subsys_initcall(md_init); 8328 module_exit(md_exit) 8329 8330 static int get_ro(char *buffer, struct kernel_param *kp) 8331 { 8332 return sprintf(buffer, "%d", start_readonly); 8333 } 8334 static int set_ro(const char *val, struct kernel_param *kp) 8335 { 8336 char *e; 8337 int num = simple_strtoul(val, &e, 10); 8338 if (*val && (*e == '\0' || *e == '\n')) { 8339 start_readonly = num; 8340 return 0; 8341 } 8342 return -EINVAL; 8343 } 8344 8345 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR); 8346 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR); 8347 8348 module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR); 8349 8350 EXPORT_SYMBOL(register_md_personality); 8351 EXPORT_SYMBOL(unregister_md_personality); 8352 EXPORT_SYMBOL(md_error); 8353 EXPORT_SYMBOL(md_done_sync); 8354 EXPORT_SYMBOL(md_write_start); 8355 EXPORT_SYMBOL(md_write_end); 8356 EXPORT_SYMBOL(md_register_thread); 8357 EXPORT_SYMBOL(md_unregister_thread); 8358 EXPORT_SYMBOL(md_wakeup_thread); 8359 EXPORT_SYMBOL(md_check_recovery); 8360 MODULE_LICENSE("GPL"); 8361 MODULE_DESCRIPTION("MD RAID framework"); 8362 MODULE_ALIAS("md"); 8363 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR); 8364