1 /* 2 md.c : Multiple Devices driver for Linux 3 Copyright (C) 1998, 1999, 2000 Ingo Molnar 4 5 completely rewritten, based on the MD driver code from Marc Zyngier 6 7 Changes: 8 9 - RAID-1/RAID-5 extensions by Miguel de Icaza, Gadi Oxman, Ingo Molnar 10 - RAID-6 extensions by H. Peter Anvin <hpa@zytor.com> 11 - boot support for linear and striped mode by Harald Hoyer <HarryH@Royal.Net> 12 - kerneld support by Boris Tobotras <boris@xtalk.msk.su> 13 - kmod support by: Cyrus Durgin 14 - RAID0 bugfixes: Mark Anthony Lisher <markal@iname.com> 15 - Devfs support by Richard Gooch <rgooch@atnf.csiro.au> 16 17 - lots of fixes and improvements to the RAID1/RAID5 and generic 18 RAID code (such as request based resynchronization): 19 20 Neil Brown <neilb@cse.unsw.edu.au>. 21 22 - persistent bitmap code 23 Copyright (C) 2003-2004, Paul Clements, SteelEye Technology, Inc. 24 25 This program is free software; you can redistribute it and/or modify 26 it under the terms of the GNU General Public License as published by 27 the Free Software Foundation; either version 2, or (at your option) 28 any later version. 29 30 You should have received a copy of the GNU General Public License 31 (for example /usr/src/linux/COPYING); if not, write to the Free 32 Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 33 */ 34 35 #include <linux/kthread.h> 36 #include <linux/blkdev.h> 37 #include <linux/sysctl.h> 38 #include <linux/seq_file.h> 39 #include <linux/mutex.h> 40 #include <linux/buffer_head.h> /* for invalidate_bdev */ 41 #include <linux/poll.h> 42 #include <linux/ctype.h> 43 #include <linux/string.h> 44 #include <linux/hdreg.h> 45 #include <linux/proc_fs.h> 46 #include <linux/random.h> 47 #include <linux/reboot.h> 48 #include <linux/file.h> 49 #include <linux/compat.h> 50 #include <linux/delay.h> 51 #include <linux/raid/md_p.h> 52 #include <linux/raid/md_u.h> 53 #include <linux/slab.h> 54 #include "md.h" 55 #include "bitmap.h" 56 57 #define DEBUG 0 58 #define dprintk(x...) ((void)(DEBUG && printk(x))) 59 60 #ifndef MODULE 61 static void autostart_arrays(int part); 62 #endif 63 64 /* pers_list is a list of registered personalities protected 65 * by pers_lock. 66 * pers_lock does extra service to protect accesses to 67 * mddev->thread when the mutex cannot be held. 68 */ 69 static LIST_HEAD(pers_list); 70 static DEFINE_SPINLOCK(pers_lock); 71 72 static void md_print_devices(void); 73 74 static DECLARE_WAIT_QUEUE_HEAD(resync_wait); 75 static struct workqueue_struct *md_wq; 76 static struct workqueue_struct *md_misc_wq; 77 78 #define MD_BUG(x...) { printk("md: bug in file %s, line %d\n", __FILE__, __LINE__); md_print_devices(); } 79 80 /* 81 * Default number of read corrections we'll attempt on an rdev 82 * before ejecting it from the array. We divide the read error 83 * count by 2 for every hour elapsed between read errors. 84 */ 85 #define MD_DEFAULT_MAX_CORRECTED_READ_ERRORS 20 86 /* 87 * Current RAID-1,4,5 parallel reconstruction 'guaranteed speed limit' 88 * is 1000 KB/sec, so the extra system load does not show up that much. 89 * Increase it if you want to have more _guaranteed_ speed. Note that 90 * the RAID driver will use the maximum available bandwidth if the IO 91 * subsystem is idle. There is also an 'absolute maximum' reconstruction 92 * speed limit - in case reconstruction slows down your system despite 93 * idle IO detection. 94 * 95 * you can change it via /proc/sys/dev/raid/speed_limit_min and _max. 96 * or /sys/block/mdX/md/sync_speed_{min,max} 97 */ 98 99 static int sysctl_speed_limit_min = 1000; 100 static int sysctl_speed_limit_max = 200000; 101 static inline int speed_min(mddev_t *mddev) 102 { 103 return mddev->sync_speed_min ? 104 mddev->sync_speed_min : sysctl_speed_limit_min; 105 } 106 107 static inline int speed_max(mddev_t *mddev) 108 { 109 return mddev->sync_speed_max ? 110 mddev->sync_speed_max : sysctl_speed_limit_max; 111 } 112 113 static struct ctl_table_header *raid_table_header; 114 115 static ctl_table raid_table[] = { 116 { 117 .procname = "speed_limit_min", 118 .data = &sysctl_speed_limit_min, 119 .maxlen = sizeof(int), 120 .mode = S_IRUGO|S_IWUSR, 121 .proc_handler = proc_dointvec, 122 }, 123 { 124 .procname = "speed_limit_max", 125 .data = &sysctl_speed_limit_max, 126 .maxlen = sizeof(int), 127 .mode = S_IRUGO|S_IWUSR, 128 .proc_handler = proc_dointvec, 129 }, 130 { } 131 }; 132 133 static ctl_table raid_dir_table[] = { 134 { 135 .procname = "raid", 136 .maxlen = 0, 137 .mode = S_IRUGO|S_IXUGO, 138 .child = raid_table, 139 }, 140 { } 141 }; 142 143 static ctl_table raid_root_table[] = { 144 { 145 .procname = "dev", 146 .maxlen = 0, 147 .mode = 0555, 148 .child = raid_dir_table, 149 }, 150 { } 151 }; 152 153 static const struct block_device_operations md_fops; 154 155 static int start_readonly; 156 157 /* bio_clone_mddev 158 * like bio_clone, but with a local bio set 159 */ 160 161 static void mddev_bio_destructor(struct bio *bio) 162 { 163 mddev_t *mddev, **mddevp; 164 165 mddevp = (void*)bio; 166 mddev = mddevp[-1]; 167 168 bio_free(bio, mddev->bio_set); 169 } 170 171 struct bio *bio_alloc_mddev(gfp_t gfp_mask, int nr_iovecs, 172 mddev_t *mddev) 173 { 174 struct bio *b; 175 mddev_t **mddevp; 176 177 if (!mddev || !mddev->bio_set) 178 return bio_alloc(gfp_mask, nr_iovecs); 179 180 b = bio_alloc_bioset(gfp_mask, nr_iovecs, 181 mddev->bio_set); 182 if (!b) 183 return NULL; 184 mddevp = (void*)b; 185 mddevp[-1] = mddev; 186 b->bi_destructor = mddev_bio_destructor; 187 return b; 188 } 189 EXPORT_SYMBOL_GPL(bio_alloc_mddev); 190 191 struct bio *bio_clone_mddev(struct bio *bio, gfp_t gfp_mask, 192 mddev_t *mddev) 193 { 194 struct bio *b; 195 mddev_t **mddevp; 196 197 if (!mddev || !mddev->bio_set) 198 return bio_clone(bio, gfp_mask); 199 200 b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, 201 mddev->bio_set); 202 if (!b) 203 return NULL; 204 mddevp = (void*)b; 205 mddevp[-1] = mddev; 206 b->bi_destructor = mddev_bio_destructor; 207 __bio_clone(b, bio); 208 if (bio_integrity(bio)) { 209 int ret; 210 211 ret = bio_integrity_clone(b, bio, gfp_mask, mddev->bio_set); 212 213 if (ret < 0) { 214 bio_put(b); 215 return NULL; 216 } 217 } 218 219 return b; 220 } 221 EXPORT_SYMBOL_GPL(bio_clone_mddev); 222 223 void md_trim_bio(struct bio *bio, int offset, int size) 224 { 225 /* 'bio' is a cloned bio which we need to trim to match 226 * the given offset and size. 227 * This requires adjusting bi_sector, bi_size, and bi_io_vec 228 */ 229 int i; 230 struct bio_vec *bvec; 231 int sofar = 0; 232 233 size <<= 9; 234 if (offset == 0 && size == bio->bi_size) 235 return; 236 237 bio->bi_sector += offset; 238 bio->bi_size = size; 239 offset <<= 9; 240 clear_bit(BIO_SEG_VALID, &bio->bi_flags); 241 242 while (bio->bi_idx < bio->bi_vcnt && 243 bio->bi_io_vec[bio->bi_idx].bv_len <= offset) { 244 /* remove this whole bio_vec */ 245 offset -= bio->bi_io_vec[bio->bi_idx].bv_len; 246 bio->bi_idx++; 247 } 248 if (bio->bi_idx < bio->bi_vcnt) { 249 bio->bi_io_vec[bio->bi_idx].bv_offset += offset; 250 bio->bi_io_vec[bio->bi_idx].bv_len -= offset; 251 } 252 /* avoid any complications with bi_idx being non-zero*/ 253 if (bio->bi_idx) { 254 memmove(bio->bi_io_vec, bio->bi_io_vec+bio->bi_idx, 255 (bio->bi_vcnt - bio->bi_idx) * sizeof(struct bio_vec)); 256 bio->bi_vcnt -= bio->bi_idx; 257 bio->bi_idx = 0; 258 } 259 /* Make sure vcnt and last bv are not too big */ 260 bio_for_each_segment(bvec, bio, i) { 261 if (sofar + bvec->bv_len > size) 262 bvec->bv_len = size - sofar; 263 if (bvec->bv_len == 0) { 264 bio->bi_vcnt = i; 265 break; 266 } 267 sofar += bvec->bv_len; 268 } 269 } 270 EXPORT_SYMBOL_GPL(md_trim_bio); 271 272 /* 273 * We have a system wide 'event count' that is incremented 274 * on any 'interesting' event, and readers of /proc/mdstat 275 * can use 'poll' or 'select' to find out when the event 276 * count increases. 277 * 278 * Events are: 279 * start array, stop array, error, add device, remove device, 280 * start build, activate spare 281 */ 282 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters); 283 static atomic_t md_event_count; 284 void md_new_event(mddev_t *mddev) 285 { 286 atomic_inc(&md_event_count); 287 wake_up(&md_event_waiters); 288 } 289 EXPORT_SYMBOL_GPL(md_new_event); 290 291 /* Alternate version that can be called from interrupts 292 * when calling sysfs_notify isn't needed. 293 */ 294 static void md_new_event_inintr(mddev_t *mddev) 295 { 296 atomic_inc(&md_event_count); 297 wake_up(&md_event_waiters); 298 } 299 300 /* 301 * Enables to iterate over all existing md arrays 302 * all_mddevs_lock protects this list. 303 */ 304 static LIST_HEAD(all_mddevs); 305 static DEFINE_SPINLOCK(all_mddevs_lock); 306 307 308 /* 309 * iterates through all used mddevs in the system. 310 * We take care to grab the all_mddevs_lock whenever navigating 311 * the list, and to always hold a refcount when unlocked. 312 * Any code which breaks out of this loop while own 313 * a reference to the current mddev and must mddev_put it. 314 */ 315 #define for_each_mddev(mddev,tmp) \ 316 \ 317 for (({ spin_lock(&all_mddevs_lock); \ 318 tmp = all_mddevs.next; \ 319 mddev = NULL;}); \ 320 ({ if (tmp != &all_mddevs) \ 321 mddev_get(list_entry(tmp, mddev_t, all_mddevs));\ 322 spin_unlock(&all_mddevs_lock); \ 323 if (mddev) mddev_put(mddev); \ 324 mddev = list_entry(tmp, mddev_t, all_mddevs); \ 325 tmp != &all_mddevs;}); \ 326 ({ spin_lock(&all_mddevs_lock); \ 327 tmp = tmp->next;}) \ 328 ) 329 330 331 /* Rather than calling directly into the personality make_request function, 332 * IO requests come here first so that we can check if the device is 333 * being suspended pending a reconfiguration. 334 * We hold a refcount over the call to ->make_request. By the time that 335 * call has finished, the bio has been linked into some internal structure 336 * and so is visible to ->quiesce(), so we don't need the refcount any more. 337 */ 338 static int md_make_request(struct request_queue *q, struct bio *bio) 339 { 340 const int rw = bio_data_dir(bio); 341 mddev_t *mddev = q->queuedata; 342 int rv; 343 int cpu; 344 unsigned int sectors; 345 346 if (mddev == NULL || mddev->pers == NULL 347 || !mddev->ready) { 348 bio_io_error(bio); 349 return 0; 350 } 351 smp_rmb(); /* Ensure implications of 'active' are visible */ 352 rcu_read_lock(); 353 if (mddev->suspended) { 354 DEFINE_WAIT(__wait); 355 for (;;) { 356 prepare_to_wait(&mddev->sb_wait, &__wait, 357 TASK_UNINTERRUPTIBLE); 358 if (!mddev->suspended) 359 break; 360 rcu_read_unlock(); 361 schedule(); 362 rcu_read_lock(); 363 } 364 finish_wait(&mddev->sb_wait, &__wait); 365 } 366 atomic_inc(&mddev->active_io); 367 rcu_read_unlock(); 368 369 /* 370 * save the sectors now since our bio can 371 * go away inside make_request 372 */ 373 sectors = bio_sectors(bio); 374 rv = mddev->pers->make_request(mddev, bio); 375 376 cpu = part_stat_lock(); 377 part_stat_inc(cpu, &mddev->gendisk->part0, ios[rw]); 378 part_stat_add(cpu, &mddev->gendisk->part0, sectors[rw], sectors); 379 part_stat_unlock(); 380 381 if (atomic_dec_and_test(&mddev->active_io) && mddev->suspended) 382 wake_up(&mddev->sb_wait); 383 384 return rv; 385 } 386 387 /* mddev_suspend makes sure no new requests are submitted 388 * to the device, and that any requests that have been submitted 389 * are completely handled. 390 * Once ->stop is called and completes, the module will be completely 391 * unused. 392 */ 393 void mddev_suspend(mddev_t *mddev) 394 { 395 BUG_ON(mddev->suspended); 396 mddev->suspended = 1; 397 synchronize_rcu(); 398 wait_event(mddev->sb_wait, atomic_read(&mddev->active_io) == 0); 399 mddev->pers->quiesce(mddev, 1); 400 } 401 EXPORT_SYMBOL_GPL(mddev_suspend); 402 403 void mddev_resume(mddev_t *mddev) 404 { 405 mddev->suspended = 0; 406 wake_up(&mddev->sb_wait); 407 mddev->pers->quiesce(mddev, 0); 408 409 md_wakeup_thread(mddev->thread); 410 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ 411 } 412 EXPORT_SYMBOL_GPL(mddev_resume); 413 414 int mddev_congested(mddev_t *mddev, int bits) 415 { 416 return mddev->suspended; 417 } 418 EXPORT_SYMBOL(mddev_congested); 419 420 /* 421 * Generic flush handling for md 422 */ 423 424 static void md_end_flush(struct bio *bio, int err) 425 { 426 mdk_rdev_t *rdev = bio->bi_private; 427 mddev_t *mddev = rdev->mddev; 428 429 rdev_dec_pending(rdev, mddev); 430 431 if (atomic_dec_and_test(&mddev->flush_pending)) { 432 /* The pre-request flush has finished */ 433 queue_work(md_wq, &mddev->flush_work); 434 } 435 bio_put(bio); 436 } 437 438 static void md_submit_flush_data(struct work_struct *ws); 439 440 static void submit_flushes(struct work_struct *ws) 441 { 442 mddev_t *mddev = container_of(ws, mddev_t, flush_work); 443 mdk_rdev_t *rdev; 444 445 INIT_WORK(&mddev->flush_work, md_submit_flush_data); 446 atomic_set(&mddev->flush_pending, 1); 447 rcu_read_lock(); 448 list_for_each_entry_rcu(rdev, &mddev->disks, same_set) 449 if (rdev->raid_disk >= 0 && 450 !test_bit(Faulty, &rdev->flags)) { 451 /* Take two references, one is dropped 452 * when request finishes, one after 453 * we reclaim rcu_read_lock 454 */ 455 struct bio *bi; 456 atomic_inc(&rdev->nr_pending); 457 atomic_inc(&rdev->nr_pending); 458 rcu_read_unlock(); 459 bi = bio_alloc_mddev(GFP_KERNEL, 0, mddev); 460 bi->bi_end_io = md_end_flush; 461 bi->bi_private = rdev; 462 bi->bi_bdev = rdev->bdev; 463 atomic_inc(&mddev->flush_pending); 464 submit_bio(WRITE_FLUSH, bi); 465 rcu_read_lock(); 466 rdev_dec_pending(rdev, mddev); 467 } 468 rcu_read_unlock(); 469 if (atomic_dec_and_test(&mddev->flush_pending)) 470 queue_work(md_wq, &mddev->flush_work); 471 } 472 473 static void md_submit_flush_data(struct work_struct *ws) 474 { 475 mddev_t *mddev = container_of(ws, mddev_t, flush_work); 476 struct bio *bio = mddev->flush_bio; 477 478 if (bio->bi_size == 0) 479 /* an empty barrier - all done */ 480 bio_endio(bio, 0); 481 else { 482 bio->bi_rw &= ~REQ_FLUSH; 483 if (mddev->pers->make_request(mddev, bio)) 484 generic_make_request(bio); 485 } 486 487 mddev->flush_bio = NULL; 488 wake_up(&mddev->sb_wait); 489 } 490 491 void md_flush_request(mddev_t *mddev, struct bio *bio) 492 { 493 spin_lock_irq(&mddev->write_lock); 494 wait_event_lock_irq(mddev->sb_wait, 495 !mddev->flush_bio, 496 mddev->write_lock, /*nothing*/); 497 mddev->flush_bio = bio; 498 spin_unlock_irq(&mddev->write_lock); 499 500 INIT_WORK(&mddev->flush_work, submit_flushes); 501 queue_work(md_wq, &mddev->flush_work); 502 } 503 EXPORT_SYMBOL(md_flush_request); 504 505 /* Support for plugging. 506 * This mirrors the plugging support in request_queue, but does not 507 * require having a whole queue or request structures. 508 * We allocate an md_plug_cb for each md device and each thread it gets 509 * plugged on. This links tot the private plug_handle structure in the 510 * personality data where we keep a count of the number of outstanding 511 * plugs so other code can see if a plug is active. 512 */ 513 struct md_plug_cb { 514 struct blk_plug_cb cb; 515 mddev_t *mddev; 516 }; 517 518 static void plugger_unplug(struct blk_plug_cb *cb) 519 { 520 struct md_plug_cb *mdcb = container_of(cb, struct md_plug_cb, cb); 521 if (atomic_dec_and_test(&mdcb->mddev->plug_cnt)) 522 md_wakeup_thread(mdcb->mddev->thread); 523 kfree(mdcb); 524 } 525 526 /* Check that an unplug wakeup will come shortly. 527 * If not, wakeup the md thread immediately 528 */ 529 int mddev_check_plugged(mddev_t *mddev) 530 { 531 struct blk_plug *plug = current->plug; 532 struct md_plug_cb *mdcb; 533 534 if (!plug) 535 return 0; 536 537 list_for_each_entry(mdcb, &plug->cb_list, cb.list) { 538 if (mdcb->cb.callback == plugger_unplug && 539 mdcb->mddev == mddev) { 540 /* Already on the list, move to top */ 541 if (mdcb != list_first_entry(&plug->cb_list, 542 struct md_plug_cb, 543 cb.list)) 544 list_move(&mdcb->cb.list, &plug->cb_list); 545 return 1; 546 } 547 } 548 /* Not currently on the callback list */ 549 mdcb = kmalloc(sizeof(*mdcb), GFP_ATOMIC); 550 if (!mdcb) 551 return 0; 552 553 mdcb->mddev = mddev; 554 mdcb->cb.callback = plugger_unplug; 555 atomic_inc(&mddev->plug_cnt); 556 list_add(&mdcb->cb.list, &plug->cb_list); 557 return 1; 558 } 559 EXPORT_SYMBOL_GPL(mddev_check_plugged); 560 561 static inline mddev_t *mddev_get(mddev_t *mddev) 562 { 563 atomic_inc(&mddev->active); 564 return mddev; 565 } 566 567 static void mddev_delayed_delete(struct work_struct *ws); 568 569 static void mddev_put(mddev_t *mddev) 570 { 571 struct bio_set *bs = NULL; 572 573 if (!atomic_dec_and_lock(&mddev->active, &all_mddevs_lock)) 574 return; 575 if (!mddev->raid_disks && list_empty(&mddev->disks) && 576 mddev->ctime == 0 && !mddev->hold_active) { 577 /* Array is not configured at all, and not held active, 578 * so destroy it */ 579 list_del(&mddev->all_mddevs); 580 bs = mddev->bio_set; 581 mddev->bio_set = NULL; 582 if (mddev->gendisk) { 583 /* We did a probe so need to clean up. Call 584 * queue_work inside the spinlock so that 585 * flush_workqueue() after mddev_find will 586 * succeed in waiting for the work to be done. 587 */ 588 INIT_WORK(&mddev->del_work, mddev_delayed_delete); 589 queue_work(md_misc_wq, &mddev->del_work); 590 } else 591 kfree(mddev); 592 } 593 spin_unlock(&all_mddevs_lock); 594 if (bs) 595 bioset_free(bs); 596 } 597 598 void mddev_init(mddev_t *mddev) 599 { 600 mutex_init(&mddev->open_mutex); 601 mutex_init(&mddev->reconfig_mutex); 602 mutex_init(&mddev->bitmap_info.mutex); 603 INIT_LIST_HEAD(&mddev->disks); 604 INIT_LIST_HEAD(&mddev->all_mddevs); 605 init_timer(&mddev->safemode_timer); 606 atomic_set(&mddev->active, 1); 607 atomic_set(&mddev->openers, 0); 608 atomic_set(&mddev->active_io, 0); 609 atomic_set(&mddev->plug_cnt, 0); 610 spin_lock_init(&mddev->write_lock); 611 atomic_set(&mddev->flush_pending, 0); 612 init_waitqueue_head(&mddev->sb_wait); 613 init_waitqueue_head(&mddev->recovery_wait); 614 mddev->reshape_position = MaxSector; 615 mddev->resync_min = 0; 616 mddev->resync_max = MaxSector; 617 mddev->level = LEVEL_NONE; 618 } 619 EXPORT_SYMBOL_GPL(mddev_init); 620 621 static mddev_t * mddev_find(dev_t unit) 622 { 623 mddev_t *mddev, *new = NULL; 624 625 if (unit && MAJOR(unit) != MD_MAJOR) 626 unit &= ~((1<<MdpMinorShift)-1); 627 628 retry: 629 spin_lock(&all_mddevs_lock); 630 631 if (unit) { 632 list_for_each_entry(mddev, &all_mddevs, all_mddevs) 633 if (mddev->unit == unit) { 634 mddev_get(mddev); 635 spin_unlock(&all_mddevs_lock); 636 kfree(new); 637 return mddev; 638 } 639 640 if (new) { 641 list_add(&new->all_mddevs, &all_mddevs); 642 spin_unlock(&all_mddevs_lock); 643 new->hold_active = UNTIL_IOCTL; 644 return new; 645 } 646 } else if (new) { 647 /* find an unused unit number */ 648 static int next_minor = 512; 649 int start = next_minor; 650 int is_free = 0; 651 int dev = 0; 652 while (!is_free) { 653 dev = MKDEV(MD_MAJOR, next_minor); 654 next_minor++; 655 if (next_minor > MINORMASK) 656 next_minor = 0; 657 if (next_minor == start) { 658 /* Oh dear, all in use. */ 659 spin_unlock(&all_mddevs_lock); 660 kfree(new); 661 return NULL; 662 } 663 664 is_free = 1; 665 list_for_each_entry(mddev, &all_mddevs, all_mddevs) 666 if (mddev->unit == dev) { 667 is_free = 0; 668 break; 669 } 670 } 671 new->unit = dev; 672 new->md_minor = MINOR(dev); 673 new->hold_active = UNTIL_STOP; 674 list_add(&new->all_mddevs, &all_mddevs); 675 spin_unlock(&all_mddevs_lock); 676 return new; 677 } 678 spin_unlock(&all_mddevs_lock); 679 680 new = kzalloc(sizeof(*new), GFP_KERNEL); 681 if (!new) 682 return NULL; 683 684 new->unit = unit; 685 if (MAJOR(unit) == MD_MAJOR) 686 new->md_minor = MINOR(unit); 687 else 688 new->md_minor = MINOR(unit) >> MdpMinorShift; 689 690 mddev_init(new); 691 692 goto retry; 693 } 694 695 static inline int mddev_lock(mddev_t * mddev) 696 { 697 return mutex_lock_interruptible(&mddev->reconfig_mutex); 698 } 699 700 static inline int mddev_is_locked(mddev_t *mddev) 701 { 702 return mutex_is_locked(&mddev->reconfig_mutex); 703 } 704 705 static inline int mddev_trylock(mddev_t * mddev) 706 { 707 return mutex_trylock(&mddev->reconfig_mutex); 708 } 709 710 static struct attribute_group md_redundancy_group; 711 712 static void mddev_unlock(mddev_t * mddev) 713 { 714 if (mddev->to_remove) { 715 /* These cannot be removed under reconfig_mutex as 716 * an access to the files will try to take reconfig_mutex 717 * while holding the file unremovable, which leads to 718 * a deadlock. 719 * So hold set sysfs_active while the remove in happeing, 720 * and anything else which might set ->to_remove or my 721 * otherwise change the sysfs namespace will fail with 722 * -EBUSY if sysfs_active is still set. 723 * We set sysfs_active under reconfig_mutex and elsewhere 724 * test it under the same mutex to ensure its correct value 725 * is seen. 726 */ 727 struct attribute_group *to_remove = mddev->to_remove; 728 mddev->to_remove = NULL; 729 mddev->sysfs_active = 1; 730 mutex_unlock(&mddev->reconfig_mutex); 731 732 if (mddev->kobj.sd) { 733 if (to_remove != &md_redundancy_group) 734 sysfs_remove_group(&mddev->kobj, to_remove); 735 if (mddev->pers == NULL || 736 mddev->pers->sync_request == NULL) { 737 sysfs_remove_group(&mddev->kobj, &md_redundancy_group); 738 if (mddev->sysfs_action) 739 sysfs_put(mddev->sysfs_action); 740 mddev->sysfs_action = NULL; 741 } 742 } 743 mddev->sysfs_active = 0; 744 } else 745 mutex_unlock(&mddev->reconfig_mutex); 746 747 /* was we've dropped the mutex we need a spinlock to 748 * make sur the thread doesn't disappear 749 */ 750 spin_lock(&pers_lock); 751 md_wakeup_thread(mddev->thread); 752 spin_unlock(&pers_lock); 753 } 754 755 static mdk_rdev_t * find_rdev_nr(mddev_t *mddev, int nr) 756 { 757 mdk_rdev_t *rdev; 758 759 list_for_each_entry(rdev, &mddev->disks, same_set) 760 if (rdev->desc_nr == nr) 761 return rdev; 762 763 return NULL; 764 } 765 766 static mdk_rdev_t * find_rdev(mddev_t * mddev, dev_t dev) 767 { 768 mdk_rdev_t *rdev; 769 770 list_for_each_entry(rdev, &mddev->disks, same_set) 771 if (rdev->bdev->bd_dev == dev) 772 return rdev; 773 774 return NULL; 775 } 776 777 static struct mdk_personality *find_pers(int level, char *clevel) 778 { 779 struct mdk_personality *pers; 780 list_for_each_entry(pers, &pers_list, list) { 781 if (level != LEVEL_NONE && pers->level == level) 782 return pers; 783 if (strcmp(pers->name, clevel)==0) 784 return pers; 785 } 786 return NULL; 787 } 788 789 /* return the offset of the super block in 512byte sectors */ 790 static inline sector_t calc_dev_sboffset(mdk_rdev_t *rdev) 791 { 792 sector_t num_sectors = i_size_read(rdev->bdev->bd_inode) / 512; 793 return MD_NEW_SIZE_SECTORS(num_sectors); 794 } 795 796 static int alloc_disk_sb(mdk_rdev_t * rdev) 797 { 798 if (rdev->sb_page) 799 MD_BUG(); 800 801 rdev->sb_page = alloc_page(GFP_KERNEL); 802 if (!rdev->sb_page) { 803 printk(KERN_ALERT "md: out of memory.\n"); 804 return -ENOMEM; 805 } 806 807 return 0; 808 } 809 810 static void free_disk_sb(mdk_rdev_t * rdev) 811 { 812 if (rdev->sb_page) { 813 put_page(rdev->sb_page); 814 rdev->sb_loaded = 0; 815 rdev->sb_page = NULL; 816 rdev->sb_start = 0; 817 rdev->sectors = 0; 818 } 819 if (rdev->bb_page) { 820 put_page(rdev->bb_page); 821 rdev->bb_page = NULL; 822 } 823 } 824 825 826 static void super_written(struct bio *bio, int error) 827 { 828 mdk_rdev_t *rdev = bio->bi_private; 829 mddev_t *mddev = rdev->mddev; 830 831 if (error || !test_bit(BIO_UPTODATE, &bio->bi_flags)) { 832 printk("md: super_written gets error=%d, uptodate=%d\n", 833 error, test_bit(BIO_UPTODATE, &bio->bi_flags)); 834 WARN_ON(test_bit(BIO_UPTODATE, &bio->bi_flags)); 835 md_error(mddev, rdev); 836 } 837 838 if (atomic_dec_and_test(&mddev->pending_writes)) 839 wake_up(&mddev->sb_wait); 840 bio_put(bio); 841 } 842 843 void md_super_write(mddev_t *mddev, mdk_rdev_t *rdev, 844 sector_t sector, int size, struct page *page) 845 { 846 /* write first size bytes of page to sector of rdev 847 * Increment mddev->pending_writes before returning 848 * and decrement it on completion, waking up sb_wait 849 * if zero is reached. 850 * If an error occurred, call md_error 851 */ 852 struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev); 853 854 bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev; 855 bio->bi_sector = sector; 856 bio_add_page(bio, page, size, 0); 857 bio->bi_private = rdev; 858 bio->bi_end_io = super_written; 859 860 atomic_inc(&mddev->pending_writes); 861 submit_bio(WRITE_FLUSH_FUA, bio); 862 } 863 864 void md_super_wait(mddev_t *mddev) 865 { 866 /* wait for all superblock writes that were scheduled to complete */ 867 DEFINE_WAIT(wq); 868 for(;;) { 869 prepare_to_wait(&mddev->sb_wait, &wq, TASK_UNINTERRUPTIBLE); 870 if (atomic_read(&mddev->pending_writes)==0) 871 break; 872 schedule(); 873 } 874 finish_wait(&mddev->sb_wait, &wq); 875 } 876 877 static void bi_complete(struct bio *bio, int error) 878 { 879 complete((struct completion*)bio->bi_private); 880 } 881 882 int sync_page_io(mdk_rdev_t *rdev, sector_t sector, int size, 883 struct page *page, int rw, bool metadata_op) 884 { 885 struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev); 886 struct completion event; 887 int ret; 888 889 rw |= REQ_SYNC; 890 891 bio->bi_bdev = (metadata_op && rdev->meta_bdev) ? 892 rdev->meta_bdev : rdev->bdev; 893 if (metadata_op) 894 bio->bi_sector = sector + rdev->sb_start; 895 else 896 bio->bi_sector = sector + rdev->data_offset; 897 bio_add_page(bio, page, size, 0); 898 init_completion(&event); 899 bio->bi_private = &event; 900 bio->bi_end_io = bi_complete; 901 submit_bio(rw, bio); 902 wait_for_completion(&event); 903 904 ret = test_bit(BIO_UPTODATE, &bio->bi_flags); 905 bio_put(bio); 906 return ret; 907 } 908 EXPORT_SYMBOL_GPL(sync_page_io); 909 910 static int read_disk_sb(mdk_rdev_t * rdev, int size) 911 { 912 char b[BDEVNAME_SIZE]; 913 if (!rdev->sb_page) { 914 MD_BUG(); 915 return -EINVAL; 916 } 917 if (rdev->sb_loaded) 918 return 0; 919 920 921 if (!sync_page_io(rdev, 0, size, rdev->sb_page, READ, true)) 922 goto fail; 923 rdev->sb_loaded = 1; 924 return 0; 925 926 fail: 927 printk(KERN_WARNING "md: disabled device %s, could not read superblock.\n", 928 bdevname(rdev->bdev,b)); 929 return -EINVAL; 930 } 931 932 static int uuid_equal(mdp_super_t *sb1, mdp_super_t *sb2) 933 { 934 return sb1->set_uuid0 == sb2->set_uuid0 && 935 sb1->set_uuid1 == sb2->set_uuid1 && 936 sb1->set_uuid2 == sb2->set_uuid2 && 937 sb1->set_uuid3 == sb2->set_uuid3; 938 } 939 940 static int sb_equal(mdp_super_t *sb1, mdp_super_t *sb2) 941 { 942 int ret; 943 mdp_super_t *tmp1, *tmp2; 944 945 tmp1 = kmalloc(sizeof(*tmp1),GFP_KERNEL); 946 tmp2 = kmalloc(sizeof(*tmp2),GFP_KERNEL); 947 948 if (!tmp1 || !tmp2) { 949 ret = 0; 950 printk(KERN_INFO "md.c sb_equal(): failed to allocate memory!\n"); 951 goto abort; 952 } 953 954 *tmp1 = *sb1; 955 *tmp2 = *sb2; 956 957 /* 958 * nr_disks is not constant 959 */ 960 tmp1->nr_disks = 0; 961 tmp2->nr_disks = 0; 962 963 ret = (memcmp(tmp1, tmp2, MD_SB_GENERIC_CONSTANT_WORDS * 4) == 0); 964 abort: 965 kfree(tmp1); 966 kfree(tmp2); 967 return ret; 968 } 969 970 971 static u32 md_csum_fold(u32 csum) 972 { 973 csum = (csum & 0xffff) + (csum >> 16); 974 return (csum & 0xffff) + (csum >> 16); 975 } 976 977 static unsigned int calc_sb_csum(mdp_super_t * sb) 978 { 979 u64 newcsum = 0; 980 u32 *sb32 = (u32*)sb; 981 int i; 982 unsigned int disk_csum, csum; 983 984 disk_csum = sb->sb_csum; 985 sb->sb_csum = 0; 986 987 for (i = 0; i < MD_SB_BYTES/4 ; i++) 988 newcsum += sb32[i]; 989 csum = (newcsum & 0xffffffff) + (newcsum>>32); 990 991 992 #ifdef CONFIG_ALPHA 993 /* This used to use csum_partial, which was wrong for several 994 * reasons including that different results are returned on 995 * different architectures. It isn't critical that we get exactly 996 * the same return value as before (we always csum_fold before 997 * testing, and that removes any differences). However as we 998 * know that csum_partial always returned a 16bit value on 999 * alphas, do a fold to maximise conformity to previous behaviour. 1000 */ 1001 sb->sb_csum = md_csum_fold(disk_csum); 1002 #else 1003 sb->sb_csum = disk_csum; 1004 #endif 1005 return csum; 1006 } 1007 1008 1009 /* 1010 * Handle superblock details. 1011 * We want to be able to handle multiple superblock formats 1012 * so we have a common interface to them all, and an array of 1013 * different handlers. 1014 * We rely on user-space to write the initial superblock, and support 1015 * reading and updating of superblocks. 1016 * Interface methods are: 1017 * int load_super(mdk_rdev_t *dev, mdk_rdev_t *refdev, int minor_version) 1018 * loads and validates a superblock on dev. 1019 * if refdev != NULL, compare superblocks on both devices 1020 * Return: 1021 * 0 - dev has a superblock that is compatible with refdev 1022 * 1 - dev has a superblock that is compatible and newer than refdev 1023 * so dev should be used as the refdev in future 1024 * -EINVAL superblock incompatible or invalid 1025 * -othererror e.g. -EIO 1026 * 1027 * int validate_super(mddev_t *mddev, mdk_rdev_t *dev) 1028 * Verify that dev is acceptable into mddev. 1029 * The first time, mddev->raid_disks will be 0, and data from 1030 * dev should be merged in. Subsequent calls check that dev 1031 * is new enough. Return 0 or -EINVAL 1032 * 1033 * void sync_super(mddev_t *mddev, mdk_rdev_t *dev) 1034 * Update the superblock for rdev with data in mddev 1035 * This does not write to disc. 1036 * 1037 */ 1038 1039 struct super_type { 1040 char *name; 1041 struct module *owner; 1042 int (*load_super)(mdk_rdev_t *rdev, mdk_rdev_t *refdev, 1043 int minor_version); 1044 int (*validate_super)(mddev_t *mddev, mdk_rdev_t *rdev); 1045 void (*sync_super)(mddev_t *mddev, mdk_rdev_t *rdev); 1046 unsigned long long (*rdev_size_change)(mdk_rdev_t *rdev, 1047 sector_t num_sectors); 1048 }; 1049 1050 /* 1051 * Check that the given mddev has no bitmap. 1052 * 1053 * This function is called from the run method of all personalities that do not 1054 * support bitmaps. It prints an error message and returns non-zero if mddev 1055 * has a bitmap. Otherwise, it returns 0. 1056 * 1057 */ 1058 int md_check_no_bitmap(mddev_t *mddev) 1059 { 1060 if (!mddev->bitmap_info.file && !mddev->bitmap_info.offset) 1061 return 0; 1062 printk(KERN_ERR "%s: bitmaps are not supported for %s\n", 1063 mdname(mddev), mddev->pers->name); 1064 return 1; 1065 } 1066 EXPORT_SYMBOL(md_check_no_bitmap); 1067 1068 /* 1069 * load_super for 0.90.0 1070 */ 1071 static int super_90_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) 1072 { 1073 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 1074 mdp_super_t *sb; 1075 int ret; 1076 1077 /* 1078 * Calculate the position of the superblock (512byte sectors), 1079 * it's at the end of the disk. 1080 * 1081 * It also happens to be a multiple of 4Kb. 1082 */ 1083 rdev->sb_start = calc_dev_sboffset(rdev); 1084 1085 ret = read_disk_sb(rdev, MD_SB_BYTES); 1086 if (ret) return ret; 1087 1088 ret = -EINVAL; 1089 1090 bdevname(rdev->bdev, b); 1091 sb = page_address(rdev->sb_page); 1092 1093 if (sb->md_magic != MD_SB_MAGIC) { 1094 printk(KERN_ERR "md: invalid raid superblock magic on %s\n", 1095 b); 1096 goto abort; 1097 } 1098 1099 if (sb->major_version != 0 || 1100 sb->minor_version < 90 || 1101 sb->minor_version > 91) { 1102 printk(KERN_WARNING "Bad version number %d.%d on %s\n", 1103 sb->major_version, sb->minor_version, 1104 b); 1105 goto abort; 1106 } 1107 1108 if (sb->raid_disks <= 0) 1109 goto abort; 1110 1111 if (md_csum_fold(calc_sb_csum(sb)) != md_csum_fold(sb->sb_csum)) { 1112 printk(KERN_WARNING "md: invalid superblock checksum on %s\n", 1113 b); 1114 goto abort; 1115 } 1116 1117 rdev->preferred_minor = sb->md_minor; 1118 rdev->data_offset = 0; 1119 rdev->sb_size = MD_SB_BYTES; 1120 rdev->badblocks.shift = -1; 1121 1122 if (sb->level == LEVEL_MULTIPATH) 1123 rdev->desc_nr = -1; 1124 else 1125 rdev->desc_nr = sb->this_disk.number; 1126 1127 if (!refdev) { 1128 ret = 1; 1129 } else { 1130 __u64 ev1, ev2; 1131 mdp_super_t *refsb = page_address(refdev->sb_page); 1132 if (!uuid_equal(refsb, sb)) { 1133 printk(KERN_WARNING "md: %s has different UUID to %s\n", 1134 b, bdevname(refdev->bdev,b2)); 1135 goto abort; 1136 } 1137 if (!sb_equal(refsb, sb)) { 1138 printk(KERN_WARNING "md: %s has same UUID" 1139 " but different superblock to %s\n", 1140 b, bdevname(refdev->bdev, b2)); 1141 goto abort; 1142 } 1143 ev1 = md_event(sb); 1144 ev2 = md_event(refsb); 1145 if (ev1 > ev2) 1146 ret = 1; 1147 else 1148 ret = 0; 1149 } 1150 rdev->sectors = rdev->sb_start; 1151 /* Limit to 4TB as metadata cannot record more than that */ 1152 if (rdev->sectors >= (2ULL << 32)) 1153 rdev->sectors = (2ULL << 32) - 2; 1154 1155 if (rdev->sectors < ((sector_t)sb->size) * 2 && sb->level >= 1) 1156 /* "this cannot possibly happen" ... */ 1157 ret = -EINVAL; 1158 1159 abort: 1160 return ret; 1161 } 1162 1163 /* 1164 * validate_super for 0.90.0 1165 */ 1166 static int super_90_validate(mddev_t *mddev, mdk_rdev_t *rdev) 1167 { 1168 mdp_disk_t *desc; 1169 mdp_super_t *sb = page_address(rdev->sb_page); 1170 __u64 ev1 = md_event(sb); 1171 1172 rdev->raid_disk = -1; 1173 clear_bit(Faulty, &rdev->flags); 1174 clear_bit(In_sync, &rdev->flags); 1175 clear_bit(WriteMostly, &rdev->flags); 1176 1177 if (mddev->raid_disks == 0) { 1178 mddev->major_version = 0; 1179 mddev->minor_version = sb->minor_version; 1180 mddev->patch_version = sb->patch_version; 1181 mddev->external = 0; 1182 mddev->chunk_sectors = sb->chunk_size >> 9; 1183 mddev->ctime = sb->ctime; 1184 mddev->utime = sb->utime; 1185 mddev->level = sb->level; 1186 mddev->clevel[0] = 0; 1187 mddev->layout = sb->layout; 1188 mddev->raid_disks = sb->raid_disks; 1189 mddev->dev_sectors = ((sector_t)sb->size) * 2; 1190 mddev->events = ev1; 1191 mddev->bitmap_info.offset = 0; 1192 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; 1193 1194 if (mddev->minor_version >= 91) { 1195 mddev->reshape_position = sb->reshape_position; 1196 mddev->delta_disks = sb->delta_disks; 1197 mddev->new_level = sb->new_level; 1198 mddev->new_layout = sb->new_layout; 1199 mddev->new_chunk_sectors = sb->new_chunk >> 9; 1200 } else { 1201 mddev->reshape_position = MaxSector; 1202 mddev->delta_disks = 0; 1203 mddev->new_level = mddev->level; 1204 mddev->new_layout = mddev->layout; 1205 mddev->new_chunk_sectors = mddev->chunk_sectors; 1206 } 1207 1208 if (sb->state & (1<<MD_SB_CLEAN)) 1209 mddev->recovery_cp = MaxSector; 1210 else { 1211 if (sb->events_hi == sb->cp_events_hi && 1212 sb->events_lo == sb->cp_events_lo) { 1213 mddev->recovery_cp = sb->recovery_cp; 1214 } else 1215 mddev->recovery_cp = 0; 1216 } 1217 1218 memcpy(mddev->uuid+0, &sb->set_uuid0, 4); 1219 memcpy(mddev->uuid+4, &sb->set_uuid1, 4); 1220 memcpy(mddev->uuid+8, &sb->set_uuid2, 4); 1221 memcpy(mddev->uuid+12,&sb->set_uuid3, 4); 1222 1223 mddev->max_disks = MD_SB_DISKS; 1224 1225 if (sb->state & (1<<MD_SB_BITMAP_PRESENT) && 1226 mddev->bitmap_info.file == NULL) 1227 mddev->bitmap_info.offset = 1228 mddev->bitmap_info.default_offset; 1229 1230 } else if (mddev->pers == NULL) { 1231 /* Insist on good event counter while assembling, except 1232 * for spares (which don't need an event count) */ 1233 ++ev1; 1234 if (sb->disks[rdev->desc_nr].state & ( 1235 (1<<MD_DISK_SYNC) | (1 << MD_DISK_ACTIVE))) 1236 if (ev1 < mddev->events) 1237 return -EINVAL; 1238 } else if (mddev->bitmap) { 1239 /* if adding to array with a bitmap, then we can accept an 1240 * older device ... but not too old. 1241 */ 1242 if (ev1 < mddev->bitmap->events_cleared) 1243 return 0; 1244 } else { 1245 if (ev1 < mddev->events) 1246 /* just a hot-add of a new device, leave raid_disk at -1 */ 1247 return 0; 1248 } 1249 1250 if (mddev->level != LEVEL_MULTIPATH) { 1251 desc = sb->disks + rdev->desc_nr; 1252 1253 if (desc->state & (1<<MD_DISK_FAULTY)) 1254 set_bit(Faulty, &rdev->flags); 1255 else if (desc->state & (1<<MD_DISK_SYNC) /* && 1256 desc->raid_disk < mddev->raid_disks */) { 1257 set_bit(In_sync, &rdev->flags); 1258 rdev->raid_disk = desc->raid_disk; 1259 } else if (desc->state & (1<<MD_DISK_ACTIVE)) { 1260 /* active but not in sync implies recovery up to 1261 * reshape position. We don't know exactly where 1262 * that is, so set to zero for now */ 1263 if (mddev->minor_version >= 91) { 1264 rdev->recovery_offset = 0; 1265 rdev->raid_disk = desc->raid_disk; 1266 } 1267 } 1268 if (desc->state & (1<<MD_DISK_WRITEMOSTLY)) 1269 set_bit(WriteMostly, &rdev->flags); 1270 } else /* MULTIPATH are always insync */ 1271 set_bit(In_sync, &rdev->flags); 1272 return 0; 1273 } 1274 1275 /* 1276 * sync_super for 0.90.0 1277 */ 1278 static void super_90_sync(mddev_t *mddev, mdk_rdev_t *rdev) 1279 { 1280 mdp_super_t *sb; 1281 mdk_rdev_t *rdev2; 1282 int next_spare = mddev->raid_disks; 1283 1284 1285 /* make rdev->sb match mddev data.. 1286 * 1287 * 1/ zero out disks 1288 * 2/ Add info for each disk, keeping track of highest desc_nr (next_spare); 1289 * 3/ any empty disks < next_spare become removed 1290 * 1291 * disks[0] gets initialised to REMOVED because 1292 * we cannot be sure from other fields if it has 1293 * been initialised or not. 1294 */ 1295 int i; 1296 int active=0, working=0,failed=0,spare=0,nr_disks=0; 1297 1298 rdev->sb_size = MD_SB_BYTES; 1299 1300 sb = page_address(rdev->sb_page); 1301 1302 memset(sb, 0, sizeof(*sb)); 1303 1304 sb->md_magic = MD_SB_MAGIC; 1305 sb->major_version = mddev->major_version; 1306 sb->patch_version = mddev->patch_version; 1307 sb->gvalid_words = 0; /* ignored */ 1308 memcpy(&sb->set_uuid0, mddev->uuid+0, 4); 1309 memcpy(&sb->set_uuid1, mddev->uuid+4, 4); 1310 memcpy(&sb->set_uuid2, mddev->uuid+8, 4); 1311 memcpy(&sb->set_uuid3, mddev->uuid+12,4); 1312 1313 sb->ctime = mddev->ctime; 1314 sb->level = mddev->level; 1315 sb->size = mddev->dev_sectors / 2; 1316 sb->raid_disks = mddev->raid_disks; 1317 sb->md_minor = mddev->md_minor; 1318 sb->not_persistent = 0; 1319 sb->utime = mddev->utime; 1320 sb->state = 0; 1321 sb->events_hi = (mddev->events>>32); 1322 sb->events_lo = (u32)mddev->events; 1323 1324 if (mddev->reshape_position == MaxSector) 1325 sb->minor_version = 90; 1326 else { 1327 sb->minor_version = 91; 1328 sb->reshape_position = mddev->reshape_position; 1329 sb->new_level = mddev->new_level; 1330 sb->delta_disks = mddev->delta_disks; 1331 sb->new_layout = mddev->new_layout; 1332 sb->new_chunk = mddev->new_chunk_sectors << 9; 1333 } 1334 mddev->minor_version = sb->minor_version; 1335 if (mddev->in_sync) 1336 { 1337 sb->recovery_cp = mddev->recovery_cp; 1338 sb->cp_events_hi = (mddev->events>>32); 1339 sb->cp_events_lo = (u32)mddev->events; 1340 if (mddev->recovery_cp == MaxSector) 1341 sb->state = (1<< MD_SB_CLEAN); 1342 } else 1343 sb->recovery_cp = 0; 1344 1345 sb->layout = mddev->layout; 1346 sb->chunk_size = mddev->chunk_sectors << 9; 1347 1348 if (mddev->bitmap && mddev->bitmap_info.file == NULL) 1349 sb->state |= (1<<MD_SB_BITMAP_PRESENT); 1350 1351 sb->disks[0].state = (1<<MD_DISK_REMOVED); 1352 list_for_each_entry(rdev2, &mddev->disks, same_set) { 1353 mdp_disk_t *d; 1354 int desc_nr; 1355 int is_active = test_bit(In_sync, &rdev2->flags); 1356 1357 if (rdev2->raid_disk >= 0 && 1358 sb->minor_version >= 91) 1359 /* we have nowhere to store the recovery_offset, 1360 * but if it is not below the reshape_position, 1361 * we can piggy-back on that. 1362 */ 1363 is_active = 1; 1364 if (rdev2->raid_disk < 0 || 1365 test_bit(Faulty, &rdev2->flags)) 1366 is_active = 0; 1367 if (is_active) 1368 desc_nr = rdev2->raid_disk; 1369 else 1370 desc_nr = next_spare++; 1371 rdev2->desc_nr = desc_nr; 1372 d = &sb->disks[rdev2->desc_nr]; 1373 nr_disks++; 1374 d->number = rdev2->desc_nr; 1375 d->major = MAJOR(rdev2->bdev->bd_dev); 1376 d->minor = MINOR(rdev2->bdev->bd_dev); 1377 if (is_active) 1378 d->raid_disk = rdev2->raid_disk; 1379 else 1380 d->raid_disk = rdev2->desc_nr; /* compatibility */ 1381 if (test_bit(Faulty, &rdev2->flags)) 1382 d->state = (1<<MD_DISK_FAULTY); 1383 else if (is_active) { 1384 d->state = (1<<MD_DISK_ACTIVE); 1385 if (test_bit(In_sync, &rdev2->flags)) 1386 d->state |= (1<<MD_DISK_SYNC); 1387 active++; 1388 working++; 1389 } else { 1390 d->state = 0; 1391 spare++; 1392 working++; 1393 } 1394 if (test_bit(WriteMostly, &rdev2->flags)) 1395 d->state |= (1<<MD_DISK_WRITEMOSTLY); 1396 } 1397 /* now set the "removed" and "faulty" bits on any missing devices */ 1398 for (i=0 ; i < mddev->raid_disks ; i++) { 1399 mdp_disk_t *d = &sb->disks[i]; 1400 if (d->state == 0 && d->number == 0) { 1401 d->number = i; 1402 d->raid_disk = i; 1403 d->state = (1<<MD_DISK_REMOVED); 1404 d->state |= (1<<MD_DISK_FAULTY); 1405 failed++; 1406 } 1407 } 1408 sb->nr_disks = nr_disks; 1409 sb->active_disks = active; 1410 sb->working_disks = working; 1411 sb->failed_disks = failed; 1412 sb->spare_disks = spare; 1413 1414 sb->this_disk = sb->disks[rdev->desc_nr]; 1415 sb->sb_csum = calc_sb_csum(sb); 1416 } 1417 1418 /* 1419 * rdev_size_change for 0.90.0 1420 */ 1421 static unsigned long long 1422 super_90_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors) 1423 { 1424 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) 1425 return 0; /* component must fit device */ 1426 if (rdev->mddev->bitmap_info.offset) 1427 return 0; /* can't move bitmap */ 1428 rdev->sb_start = calc_dev_sboffset(rdev); 1429 if (!num_sectors || num_sectors > rdev->sb_start) 1430 num_sectors = rdev->sb_start; 1431 /* Limit to 4TB as metadata cannot record more than that. 1432 * 4TB == 2^32 KB, or 2*2^32 sectors. 1433 */ 1434 if (num_sectors >= (2ULL << 32)) 1435 num_sectors = (2ULL << 32) - 2; 1436 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, 1437 rdev->sb_page); 1438 md_super_wait(rdev->mddev); 1439 return num_sectors; 1440 } 1441 1442 1443 /* 1444 * version 1 superblock 1445 */ 1446 1447 static __le32 calc_sb_1_csum(struct mdp_superblock_1 * sb) 1448 { 1449 __le32 disk_csum; 1450 u32 csum; 1451 unsigned long long newcsum; 1452 int size = 256 + le32_to_cpu(sb->max_dev)*2; 1453 __le32 *isuper = (__le32*)sb; 1454 int i; 1455 1456 disk_csum = sb->sb_csum; 1457 sb->sb_csum = 0; 1458 newcsum = 0; 1459 for (i=0; size>=4; size -= 4 ) 1460 newcsum += le32_to_cpu(*isuper++); 1461 1462 if (size == 2) 1463 newcsum += le16_to_cpu(*(__le16*) isuper); 1464 1465 csum = (newcsum & 0xffffffff) + (newcsum >> 32); 1466 sb->sb_csum = disk_csum; 1467 return cpu_to_le32(csum); 1468 } 1469 1470 static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors, 1471 int acknowledged); 1472 static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) 1473 { 1474 struct mdp_superblock_1 *sb; 1475 int ret; 1476 sector_t sb_start; 1477 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 1478 int bmask; 1479 1480 /* 1481 * Calculate the position of the superblock in 512byte sectors. 1482 * It is always aligned to a 4K boundary and 1483 * depeding on minor_version, it can be: 1484 * 0: At least 8K, but less than 12K, from end of device 1485 * 1: At start of device 1486 * 2: 4K from start of device. 1487 */ 1488 switch(minor_version) { 1489 case 0: 1490 sb_start = i_size_read(rdev->bdev->bd_inode) >> 9; 1491 sb_start -= 8*2; 1492 sb_start &= ~(sector_t)(4*2-1); 1493 break; 1494 case 1: 1495 sb_start = 0; 1496 break; 1497 case 2: 1498 sb_start = 8; 1499 break; 1500 default: 1501 return -EINVAL; 1502 } 1503 rdev->sb_start = sb_start; 1504 1505 /* superblock is rarely larger than 1K, but it can be larger, 1506 * and it is safe to read 4k, so we do that 1507 */ 1508 ret = read_disk_sb(rdev, 4096); 1509 if (ret) return ret; 1510 1511 1512 sb = page_address(rdev->sb_page); 1513 1514 if (sb->magic != cpu_to_le32(MD_SB_MAGIC) || 1515 sb->major_version != cpu_to_le32(1) || 1516 le32_to_cpu(sb->max_dev) > (4096-256)/2 || 1517 le64_to_cpu(sb->super_offset) != rdev->sb_start || 1518 (le32_to_cpu(sb->feature_map) & ~MD_FEATURE_ALL) != 0) 1519 return -EINVAL; 1520 1521 if (calc_sb_1_csum(sb) != sb->sb_csum) { 1522 printk("md: invalid superblock checksum on %s\n", 1523 bdevname(rdev->bdev,b)); 1524 return -EINVAL; 1525 } 1526 if (le64_to_cpu(sb->data_size) < 10) { 1527 printk("md: data_size too small on %s\n", 1528 bdevname(rdev->bdev,b)); 1529 return -EINVAL; 1530 } 1531 1532 rdev->preferred_minor = 0xffff; 1533 rdev->data_offset = le64_to_cpu(sb->data_offset); 1534 atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read)); 1535 1536 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256; 1537 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; 1538 if (rdev->sb_size & bmask) 1539 rdev->sb_size = (rdev->sb_size | bmask) + 1; 1540 1541 if (minor_version 1542 && rdev->data_offset < sb_start + (rdev->sb_size/512)) 1543 return -EINVAL; 1544 1545 if (sb->level == cpu_to_le32(LEVEL_MULTIPATH)) 1546 rdev->desc_nr = -1; 1547 else 1548 rdev->desc_nr = le32_to_cpu(sb->dev_number); 1549 1550 if (!rdev->bb_page) { 1551 rdev->bb_page = alloc_page(GFP_KERNEL); 1552 if (!rdev->bb_page) 1553 return -ENOMEM; 1554 } 1555 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BAD_BLOCKS) && 1556 rdev->badblocks.count == 0) { 1557 /* need to load the bad block list. 1558 * Currently we limit it to one page. 1559 */ 1560 s32 offset; 1561 sector_t bb_sector; 1562 u64 *bbp; 1563 int i; 1564 int sectors = le16_to_cpu(sb->bblog_size); 1565 if (sectors > (PAGE_SIZE / 512)) 1566 return -EINVAL; 1567 offset = le32_to_cpu(sb->bblog_offset); 1568 if (offset == 0) 1569 return -EINVAL; 1570 bb_sector = (long long)offset; 1571 if (!sync_page_io(rdev, bb_sector, sectors << 9, 1572 rdev->bb_page, READ, true)) 1573 return -EIO; 1574 bbp = (u64 *)page_address(rdev->bb_page); 1575 rdev->badblocks.shift = sb->bblog_shift; 1576 for (i = 0 ; i < (sectors << (9-3)) ; i++, bbp++) { 1577 u64 bb = le64_to_cpu(*bbp); 1578 int count = bb & (0x3ff); 1579 u64 sector = bb >> 10; 1580 sector <<= sb->bblog_shift; 1581 count <<= sb->bblog_shift; 1582 if (bb + 1 == 0) 1583 break; 1584 if (md_set_badblocks(&rdev->badblocks, 1585 sector, count, 1) == 0) 1586 return -EINVAL; 1587 } 1588 } else if (sb->bblog_offset == 0) 1589 rdev->badblocks.shift = -1; 1590 1591 if (!refdev) { 1592 ret = 1; 1593 } else { 1594 __u64 ev1, ev2; 1595 struct mdp_superblock_1 *refsb = page_address(refdev->sb_page); 1596 1597 if (memcmp(sb->set_uuid, refsb->set_uuid, 16) != 0 || 1598 sb->level != refsb->level || 1599 sb->layout != refsb->layout || 1600 sb->chunksize != refsb->chunksize) { 1601 printk(KERN_WARNING "md: %s has strangely different" 1602 " superblock to %s\n", 1603 bdevname(rdev->bdev,b), 1604 bdevname(refdev->bdev,b2)); 1605 return -EINVAL; 1606 } 1607 ev1 = le64_to_cpu(sb->events); 1608 ev2 = le64_to_cpu(refsb->events); 1609 1610 if (ev1 > ev2) 1611 ret = 1; 1612 else 1613 ret = 0; 1614 } 1615 if (minor_version) 1616 rdev->sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) - 1617 le64_to_cpu(sb->data_offset); 1618 else 1619 rdev->sectors = rdev->sb_start; 1620 if (rdev->sectors < le64_to_cpu(sb->data_size)) 1621 return -EINVAL; 1622 rdev->sectors = le64_to_cpu(sb->data_size); 1623 if (le64_to_cpu(sb->size) > rdev->sectors) 1624 return -EINVAL; 1625 return ret; 1626 } 1627 1628 static int super_1_validate(mddev_t *mddev, mdk_rdev_t *rdev) 1629 { 1630 struct mdp_superblock_1 *sb = page_address(rdev->sb_page); 1631 __u64 ev1 = le64_to_cpu(sb->events); 1632 1633 rdev->raid_disk = -1; 1634 clear_bit(Faulty, &rdev->flags); 1635 clear_bit(In_sync, &rdev->flags); 1636 clear_bit(WriteMostly, &rdev->flags); 1637 1638 if (mddev->raid_disks == 0) { 1639 mddev->major_version = 1; 1640 mddev->patch_version = 0; 1641 mddev->external = 0; 1642 mddev->chunk_sectors = le32_to_cpu(sb->chunksize); 1643 mddev->ctime = le64_to_cpu(sb->ctime) & ((1ULL << 32)-1); 1644 mddev->utime = le64_to_cpu(sb->utime) & ((1ULL << 32)-1); 1645 mddev->level = le32_to_cpu(sb->level); 1646 mddev->clevel[0] = 0; 1647 mddev->layout = le32_to_cpu(sb->layout); 1648 mddev->raid_disks = le32_to_cpu(sb->raid_disks); 1649 mddev->dev_sectors = le64_to_cpu(sb->size); 1650 mddev->events = ev1; 1651 mddev->bitmap_info.offset = 0; 1652 mddev->bitmap_info.default_offset = 1024 >> 9; 1653 1654 mddev->recovery_cp = le64_to_cpu(sb->resync_offset); 1655 memcpy(mddev->uuid, sb->set_uuid, 16); 1656 1657 mddev->max_disks = (4096-256)/2; 1658 1659 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_BITMAP_OFFSET) && 1660 mddev->bitmap_info.file == NULL ) 1661 mddev->bitmap_info.offset = 1662 (__s32)le32_to_cpu(sb->bitmap_offset); 1663 1664 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE)) { 1665 mddev->reshape_position = le64_to_cpu(sb->reshape_position); 1666 mddev->delta_disks = le32_to_cpu(sb->delta_disks); 1667 mddev->new_level = le32_to_cpu(sb->new_level); 1668 mddev->new_layout = le32_to_cpu(sb->new_layout); 1669 mddev->new_chunk_sectors = le32_to_cpu(sb->new_chunk); 1670 } else { 1671 mddev->reshape_position = MaxSector; 1672 mddev->delta_disks = 0; 1673 mddev->new_level = mddev->level; 1674 mddev->new_layout = mddev->layout; 1675 mddev->new_chunk_sectors = mddev->chunk_sectors; 1676 } 1677 1678 } else if (mddev->pers == NULL) { 1679 /* Insist of good event counter while assembling, except for 1680 * spares (which don't need an event count) */ 1681 ++ev1; 1682 if (rdev->desc_nr >= 0 && 1683 rdev->desc_nr < le32_to_cpu(sb->max_dev) && 1684 le16_to_cpu(sb->dev_roles[rdev->desc_nr]) < 0xfffe) 1685 if (ev1 < mddev->events) 1686 return -EINVAL; 1687 } else if (mddev->bitmap) { 1688 /* If adding to array with a bitmap, then we can accept an 1689 * older device, but not too old. 1690 */ 1691 if (ev1 < mddev->bitmap->events_cleared) 1692 return 0; 1693 } else { 1694 if (ev1 < mddev->events) 1695 /* just a hot-add of a new device, leave raid_disk at -1 */ 1696 return 0; 1697 } 1698 if (mddev->level != LEVEL_MULTIPATH) { 1699 int role; 1700 if (rdev->desc_nr < 0 || 1701 rdev->desc_nr >= le32_to_cpu(sb->max_dev)) { 1702 role = 0xffff; 1703 rdev->desc_nr = -1; 1704 } else 1705 role = le16_to_cpu(sb->dev_roles[rdev->desc_nr]); 1706 switch(role) { 1707 case 0xffff: /* spare */ 1708 break; 1709 case 0xfffe: /* faulty */ 1710 set_bit(Faulty, &rdev->flags); 1711 break; 1712 default: 1713 if ((le32_to_cpu(sb->feature_map) & 1714 MD_FEATURE_RECOVERY_OFFSET)) 1715 rdev->recovery_offset = le64_to_cpu(sb->recovery_offset); 1716 else 1717 set_bit(In_sync, &rdev->flags); 1718 rdev->raid_disk = role; 1719 break; 1720 } 1721 if (sb->devflags & WriteMostly1) 1722 set_bit(WriteMostly, &rdev->flags); 1723 } else /* MULTIPATH are always insync */ 1724 set_bit(In_sync, &rdev->flags); 1725 1726 return 0; 1727 } 1728 1729 static void super_1_sync(mddev_t *mddev, mdk_rdev_t *rdev) 1730 { 1731 struct mdp_superblock_1 *sb; 1732 mdk_rdev_t *rdev2; 1733 int max_dev, i; 1734 /* make rdev->sb match mddev and rdev data. */ 1735 1736 sb = page_address(rdev->sb_page); 1737 1738 sb->feature_map = 0; 1739 sb->pad0 = 0; 1740 sb->recovery_offset = cpu_to_le64(0); 1741 memset(sb->pad1, 0, sizeof(sb->pad1)); 1742 memset(sb->pad3, 0, sizeof(sb->pad3)); 1743 1744 sb->utime = cpu_to_le64((__u64)mddev->utime); 1745 sb->events = cpu_to_le64(mddev->events); 1746 if (mddev->in_sync) 1747 sb->resync_offset = cpu_to_le64(mddev->recovery_cp); 1748 else 1749 sb->resync_offset = cpu_to_le64(0); 1750 1751 sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors)); 1752 1753 sb->raid_disks = cpu_to_le32(mddev->raid_disks); 1754 sb->size = cpu_to_le64(mddev->dev_sectors); 1755 sb->chunksize = cpu_to_le32(mddev->chunk_sectors); 1756 sb->level = cpu_to_le32(mddev->level); 1757 sb->layout = cpu_to_le32(mddev->layout); 1758 1759 if (test_bit(WriteMostly, &rdev->flags)) 1760 sb->devflags |= WriteMostly1; 1761 else 1762 sb->devflags &= ~WriteMostly1; 1763 1764 if (mddev->bitmap && mddev->bitmap_info.file == NULL) { 1765 sb->bitmap_offset = cpu_to_le32((__u32)mddev->bitmap_info.offset); 1766 sb->feature_map = cpu_to_le32(MD_FEATURE_BITMAP_OFFSET); 1767 } 1768 1769 if (rdev->raid_disk >= 0 && 1770 !test_bit(In_sync, &rdev->flags)) { 1771 sb->feature_map |= 1772 cpu_to_le32(MD_FEATURE_RECOVERY_OFFSET); 1773 sb->recovery_offset = 1774 cpu_to_le64(rdev->recovery_offset); 1775 } 1776 1777 if (mddev->reshape_position != MaxSector) { 1778 sb->feature_map |= cpu_to_le32(MD_FEATURE_RESHAPE_ACTIVE); 1779 sb->reshape_position = cpu_to_le64(mddev->reshape_position); 1780 sb->new_layout = cpu_to_le32(mddev->new_layout); 1781 sb->delta_disks = cpu_to_le32(mddev->delta_disks); 1782 sb->new_level = cpu_to_le32(mddev->new_level); 1783 sb->new_chunk = cpu_to_le32(mddev->new_chunk_sectors); 1784 } 1785 1786 if (rdev->badblocks.count == 0) 1787 /* Nothing to do for bad blocks*/ ; 1788 else if (sb->bblog_offset == 0) 1789 /* Cannot record bad blocks on this device */ 1790 md_error(mddev, rdev); 1791 else { 1792 struct badblocks *bb = &rdev->badblocks; 1793 u64 *bbp = (u64 *)page_address(rdev->bb_page); 1794 u64 *p = bb->page; 1795 sb->feature_map |= cpu_to_le32(MD_FEATURE_BAD_BLOCKS); 1796 if (bb->changed) { 1797 unsigned seq; 1798 1799 retry: 1800 seq = read_seqbegin(&bb->lock); 1801 1802 memset(bbp, 0xff, PAGE_SIZE); 1803 1804 for (i = 0 ; i < bb->count ; i++) { 1805 u64 internal_bb = *p++; 1806 u64 store_bb = ((BB_OFFSET(internal_bb) << 10) 1807 | BB_LEN(internal_bb)); 1808 *bbp++ = cpu_to_le64(store_bb); 1809 } 1810 if (read_seqretry(&bb->lock, seq)) 1811 goto retry; 1812 1813 bb->sector = (rdev->sb_start + 1814 (int)le32_to_cpu(sb->bblog_offset)); 1815 bb->size = le16_to_cpu(sb->bblog_size); 1816 bb->changed = 0; 1817 } 1818 } 1819 1820 max_dev = 0; 1821 list_for_each_entry(rdev2, &mddev->disks, same_set) 1822 if (rdev2->desc_nr+1 > max_dev) 1823 max_dev = rdev2->desc_nr+1; 1824 1825 if (max_dev > le32_to_cpu(sb->max_dev)) { 1826 int bmask; 1827 sb->max_dev = cpu_to_le32(max_dev); 1828 rdev->sb_size = max_dev * 2 + 256; 1829 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1; 1830 if (rdev->sb_size & bmask) 1831 rdev->sb_size = (rdev->sb_size | bmask) + 1; 1832 } else 1833 max_dev = le32_to_cpu(sb->max_dev); 1834 1835 for (i=0; i<max_dev;i++) 1836 sb->dev_roles[i] = cpu_to_le16(0xfffe); 1837 1838 list_for_each_entry(rdev2, &mddev->disks, same_set) { 1839 i = rdev2->desc_nr; 1840 if (test_bit(Faulty, &rdev2->flags)) 1841 sb->dev_roles[i] = cpu_to_le16(0xfffe); 1842 else if (test_bit(In_sync, &rdev2->flags)) 1843 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); 1844 else if (rdev2->raid_disk >= 0) 1845 sb->dev_roles[i] = cpu_to_le16(rdev2->raid_disk); 1846 else 1847 sb->dev_roles[i] = cpu_to_le16(0xffff); 1848 } 1849 1850 sb->sb_csum = calc_sb_1_csum(sb); 1851 } 1852 1853 static unsigned long long 1854 super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors) 1855 { 1856 struct mdp_superblock_1 *sb; 1857 sector_t max_sectors; 1858 if (num_sectors && num_sectors < rdev->mddev->dev_sectors) 1859 return 0; /* component must fit device */ 1860 if (rdev->sb_start < rdev->data_offset) { 1861 /* minor versions 1 and 2; superblock before data */ 1862 max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9; 1863 max_sectors -= rdev->data_offset; 1864 if (!num_sectors || num_sectors > max_sectors) 1865 num_sectors = max_sectors; 1866 } else if (rdev->mddev->bitmap_info.offset) { 1867 /* minor version 0 with bitmap we can't move */ 1868 return 0; 1869 } else { 1870 /* minor version 0; superblock after data */ 1871 sector_t sb_start; 1872 sb_start = (i_size_read(rdev->bdev->bd_inode) >> 9) - 8*2; 1873 sb_start &= ~(sector_t)(4*2 - 1); 1874 max_sectors = rdev->sectors + sb_start - rdev->sb_start; 1875 if (!num_sectors || num_sectors > max_sectors) 1876 num_sectors = max_sectors; 1877 rdev->sb_start = sb_start; 1878 } 1879 sb = page_address(rdev->sb_page); 1880 sb->data_size = cpu_to_le64(num_sectors); 1881 sb->super_offset = rdev->sb_start; 1882 sb->sb_csum = calc_sb_1_csum(sb); 1883 md_super_write(rdev->mddev, rdev, rdev->sb_start, rdev->sb_size, 1884 rdev->sb_page); 1885 md_super_wait(rdev->mddev); 1886 return num_sectors; 1887 } 1888 1889 static struct super_type super_types[] = { 1890 [0] = { 1891 .name = "0.90.0", 1892 .owner = THIS_MODULE, 1893 .load_super = super_90_load, 1894 .validate_super = super_90_validate, 1895 .sync_super = super_90_sync, 1896 .rdev_size_change = super_90_rdev_size_change, 1897 }, 1898 [1] = { 1899 .name = "md-1", 1900 .owner = THIS_MODULE, 1901 .load_super = super_1_load, 1902 .validate_super = super_1_validate, 1903 .sync_super = super_1_sync, 1904 .rdev_size_change = super_1_rdev_size_change, 1905 }, 1906 }; 1907 1908 static void sync_super(mddev_t *mddev, mdk_rdev_t *rdev) 1909 { 1910 if (mddev->sync_super) { 1911 mddev->sync_super(mddev, rdev); 1912 return; 1913 } 1914 1915 BUG_ON(mddev->major_version >= ARRAY_SIZE(super_types)); 1916 1917 super_types[mddev->major_version].sync_super(mddev, rdev); 1918 } 1919 1920 static int match_mddev_units(mddev_t *mddev1, mddev_t *mddev2) 1921 { 1922 mdk_rdev_t *rdev, *rdev2; 1923 1924 rcu_read_lock(); 1925 rdev_for_each_rcu(rdev, mddev1) 1926 rdev_for_each_rcu(rdev2, mddev2) 1927 if (rdev->bdev->bd_contains == 1928 rdev2->bdev->bd_contains) { 1929 rcu_read_unlock(); 1930 return 1; 1931 } 1932 rcu_read_unlock(); 1933 return 0; 1934 } 1935 1936 static LIST_HEAD(pending_raid_disks); 1937 1938 /* 1939 * Try to register data integrity profile for an mddev 1940 * 1941 * This is called when an array is started and after a disk has been kicked 1942 * from the array. It only succeeds if all working and active component devices 1943 * are integrity capable with matching profiles. 1944 */ 1945 int md_integrity_register(mddev_t *mddev) 1946 { 1947 mdk_rdev_t *rdev, *reference = NULL; 1948 1949 if (list_empty(&mddev->disks)) 1950 return 0; /* nothing to do */ 1951 if (!mddev->gendisk || blk_get_integrity(mddev->gendisk)) 1952 return 0; /* shouldn't register, or already is */ 1953 list_for_each_entry(rdev, &mddev->disks, same_set) { 1954 /* skip spares and non-functional disks */ 1955 if (test_bit(Faulty, &rdev->flags)) 1956 continue; 1957 if (rdev->raid_disk < 0) 1958 continue; 1959 if (!reference) { 1960 /* Use the first rdev as the reference */ 1961 reference = rdev; 1962 continue; 1963 } 1964 /* does this rdev's profile match the reference profile? */ 1965 if (blk_integrity_compare(reference->bdev->bd_disk, 1966 rdev->bdev->bd_disk) < 0) 1967 return -EINVAL; 1968 } 1969 if (!reference || !bdev_get_integrity(reference->bdev)) 1970 return 0; 1971 /* 1972 * All component devices are integrity capable and have matching 1973 * profiles, register the common profile for the md device. 1974 */ 1975 if (blk_integrity_register(mddev->gendisk, 1976 bdev_get_integrity(reference->bdev)) != 0) { 1977 printk(KERN_ERR "md: failed to register integrity for %s\n", 1978 mdname(mddev)); 1979 return -EINVAL; 1980 } 1981 printk(KERN_NOTICE "md: data integrity enabled on %s\n", mdname(mddev)); 1982 if (bioset_integrity_create(mddev->bio_set, BIO_POOL_SIZE)) { 1983 printk(KERN_ERR "md: failed to create integrity pool for %s\n", 1984 mdname(mddev)); 1985 return -EINVAL; 1986 } 1987 return 0; 1988 } 1989 EXPORT_SYMBOL(md_integrity_register); 1990 1991 /* Disable data integrity if non-capable/non-matching disk is being added */ 1992 void md_integrity_add_rdev(mdk_rdev_t *rdev, mddev_t *mddev) 1993 { 1994 struct blk_integrity *bi_rdev = bdev_get_integrity(rdev->bdev); 1995 struct blk_integrity *bi_mddev = blk_get_integrity(mddev->gendisk); 1996 1997 if (!bi_mddev) /* nothing to do */ 1998 return; 1999 if (rdev->raid_disk < 0) /* skip spares */ 2000 return; 2001 if (bi_rdev && blk_integrity_compare(mddev->gendisk, 2002 rdev->bdev->bd_disk) >= 0) 2003 return; 2004 printk(KERN_NOTICE "disabling data integrity on %s\n", mdname(mddev)); 2005 blk_integrity_unregister(mddev->gendisk); 2006 } 2007 EXPORT_SYMBOL(md_integrity_add_rdev); 2008 2009 static int bind_rdev_to_array(mdk_rdev_t * rdev, mddev_t * mddev) 2010 { 2011 char b[BDEVNAME_SIZE]; 2012 struct kobject *ko; 2013 char *s; 2014 int err; 2015 2016 if (rdev->mddev) { 2017 MD_BUG(); 2018 return -EINVAL; 2019 } 2020 2021 /* prevent duplicates */ 2022 if (find_rdev(mddev, rdev->bdev->bd_dev)) 2023 return -EEXIST; 2024 2025 /* make sure rdev->sectors exceeds mddev->dev_sectors */ 2026 if (rdev->sectors && (mddev->dev_sectors == 0 || 2027 rdev->sectors < mddev->dev_sectors)) { 2028 if (mddev->pers) { 2029 /* Cannot change size, so fail 2030 * If mddev->level <= 0, then we don't care 2031 * about aligning sizes (e.g. linear) 2032 */ 2033 if (mddev->level > 0) 2034 return -ENOSPC; 2035 } else 2036 mddev->dev_sectors = rdev->sectors; 2037 } 2038 2039 /* Verify rdev->desc_nr is unique. 2040 * If it is -1, assign a free number, else 2041 * check number is not in use 2042 */ 2043 if (rdev->desc_nr < 0) { 2044 int choice = 0; 2045 if (mddev->pers) choice = mddev->raid_disks; 2046 while (find_rdev_nr(mddev, choice)) 2047 choice++; 2048 rdev->desc_nr = choice; 2049 } else { 2050 if (find_rdev_nr(mddev, rdev->desc_nr)) 2051 return -EBUSY; 2052 } 2053 if (mddev->max_disks && rdev->desc_nr >= mddev->max_disks) { 2054 printk(KERN_WARNING "md: %s: array is limited to %d devices\n", 2055 mdname(mddev), mddev->max_disks); 2056 return -EBUSY; 2057 } 2058 bdevname(rdev->bdev,b); 2059 while ( (s=strchr(b, '/')) != NULL) 2060 *s = '!'; 2061 2062 rdev->mddev = mddev; 2063 printk(KERN_INFO "md: bind<%s>\n", b); 2064 2065 if ((err = kobject_add(&rdev->kobj, &mddev->kobj, "dev-%s", b))) 2066 goto fail; 2067 2068 ko = &part_to_dev(rdev->bdev->bd_part)->kobj; 2069 if (sysfs_create_link(&rdev->kobj, ko, "block")) 2070 /* failure here is OK */; 2071 rdev->sysfs_state = sysfs_get_dirent_safe(rdev->kobj.sd, "state"); 2072 2073 list_add_rcu(&rdev->same_set, &mddev->disks); 2074 bd_link_disk_holder(rdev->bdev, mddev->gendisk); 2075 2076 /* May as well allow recovery to be retried once */ 2077 mddev->recovery_disabled++; 2078 2079 return 0; 2080 2081 fail: 2082 printk(KERN_WARNING "md: failed to register dev-%s for %s\n", 2083 b, mdname(mddev)); 2084 return err; 2085 } 2086 2087 static void md_delayed_delete(struct work_struct *ws) 2088 { 2089 mdk_rdev_t *rdev = container_of(ws, mdk_rdev_t, del_work); 2090 kobject_del(&rdev->kobj); 2091 kobject_put(&rdev->kobj); 2092 } 2093 2094 static void unbind_rdev_from_array(mdk_rdev_t * rdev) 2095 { 2096 char b[BDEVNAME_SIZE]; 2097 if (!rdev->mddev) { 2098 MD_BUG(); 2099 return; 2100 } 2101 bd_unlink_disk_holder(rdev->bdev, rdev->mddev->gendisk); 2102 list_del_rcu(&rdev->same_set); 2103 printk(KERN_INFO "md: unbind<%s>\n", bdevname(rdev->bdev,b)); 2104 rdev->mddev = NULL; 2105 sysfs_remove_link(&rdev->kobj, "block"); 2106 sysfs_put(rdev->sysfs_state); 2107 rdev->sysfs_state = NULL; 2108 kfree(rdev->badblocks.page); 2109 rdev->badblocks.count = 0; 2110 rdev->badblocks.page = NULL; 2111 /* We need to delay this, otherwise we can deadlock when 2112 * writing to 'remove' to "dev/state". We also need 2113 * to delay it due to rcu usage. 2114 */ 2115 synchronize_rcu(); 2116 INIT_WORK(&rdev->del_work, md_delayed_delete); 2117 kobject_get(&rdev->kobj); 2118 queue_work(md_misc_wq, &rdev->del_work); 2119 } 2120 2121 /* 2122 * prevent the device from being mounted, repartitioned or 2123 * otherwise reused by a RAID array (or any other kernel 2124 * subsystem), by bd_claiming the device. 2125 */ 2126 static int lock_rdev(mdk_rdev_t *rdev, dev_t dev, int shared) 2127 { 2128 int err = 0; 2129 struct block_device *bdev; 2130 char b[BDEVNAME_SIZE]; 2131 2132 bdev = blkdev_get_by_dev(dev, FMODE_READ|FMODE_WRITE|FMODE_EXCL, 2133 shared ? (mdk_rdev_t *)lock_rdev : rdev); 2134 if (IS_ERR(bdev)) { 2135 printk(KERN_ERR "md: could not open %s.\n", 2136 __bdevname(dev, b)); 2137 return PTR_ERR(bdev); 2138 } 2139 rdev->bdev = bdev; 2140 return err; 2141 } 2142 2143 static void unlock_rdev(mdk_rdev_t *rdev) 2144 { 2145 struct block_device *bdev = rdev->bdev; 2146 rdev->bdev = NULL; 2147 if (!bdev) 2148 MD_BUG(); 2149 blkdev_put(bdev, FMODE_READ|FMODE_WRITE|FMODE_EXCL); 2150 } 2151 2152 void md_autodetect_dev(dev_t dev); 2153 2154 static void export_rdev(mdk_rdev_t * rdev) 2155 { 2156 char b[BDEVNAME_SIZE]; 2157 printk(KERN_INFO "md: export_rdev(%s)\n", 2158 bdevname(rdev->bdev,b)); 2159 if (rdev->mddev) 2160 MD_BUG(); 2161 free_disk_sb(rdev); 2162 #ifndef MODULE 2163 if (test_bit(AutoDetected, &rdev->flags)) 2164 md_autodetect_dev(rdev->bdev->bd_dev); 2165 #endif 2166 unlock_rdev(rdev); 2167 kobject_put(&rdev->kobj); 2168 } 2169 2170 static void kick_rdev_from_array(mdk_rdev_t * rdev) 2171 { 2172 unbind_rdev_from_array(rdev); 2173 export_rdev(rdev); 2174 } 2175 2176 static void export_array(mddev_t *mddev) 2177 { 2178 mdk_rdev_t *rdev, *tmp; 2179 2180 rdev_for_each(rdev, tmp, mddev) { 2181 if (!rdev->mddev) { 2182 MD_BUG(); 2183 continue; 2184 } 2185 kick_rdev_from_array(rdev); 2186 } 2187 if (!list_empty(&mddev->disks)) 2188 MD_BUG(); 2189 mddev->raid_disks = 0; 2190 mddev->major_version = 0; 2191 } 2192 2193 static void print_desc(mdp_disk_t *desc) 2194 { 2195 printk(" DISK<N:%d,(%d,%d),R:%d,S:%d>\n", desc->number, 2196 desc->major,desc->minor,desc->raid_disk,desc->state); 2197 } 2198 2199 static void print_sb_90(mdp_super_t *sb) 2200 { 2201 int i; 2202 2203 printk(KERN_INFO 2204 "md: SB: (V:%d.%d.%d) ID:<%08x.%08x.%08x.%08x> CT:%08x\n", 2205 sb->major_version, sb->minor_version, sb->patch_version, 2206 sb->set_uuid0, sb->set_uuid1, sb->set_uuid2, sb->set_uuid3, 2207 sb->ctime); 2208 printk(KERN_INFO "md: L%d S%08d ND:%d RD:%d md%d LO:%d CS:%d\n", 2209 sb->level, sb->size, sb->nr_disks, sb->raid_disks, 2210 sb->md_minor, sb->layout, sb->chunk_size); 2211 printk(KERN_INFO "md: UT:%08x ST:%d AD:%d WD:%d" 2212 " FD:%d SD:%d CSUM:%08x E:%08lx\n", 2213 sb->utime, sb->state, sb->active_disks, sb->working_disks, 2214 sb->failed_disks, sb->spare_disks, 2215 sb->sb_csum, (unsigned long)sb->events_lo); 2216 2217 printk(KERN_INFO); 2218 for (i = 0; i < MD_SB_DISKS; i++) { 2219 mdp_disk_t *desc; 2220 2221 desc = sb->disks + i; 2222 if (desc->number || desc->major || desc->minor || 2223 desc->raid_disk || (desc->state && (desc->state != 4))) { 2224 printk(" D %2d: ", i); 2225 print_desc(desc); 2226 } 2227 } 2228 printk(KERN_INFO "md: THIS: "); 2229 print_desc(&sb->this_disk); 2230 } 2231 2232 static void print_sb_1(struct mdp_superblock_1 *sb) 2233 { 2234 __u8 *uuid; 2235 2236 uuid = sb->set_uuid; 2237 printk(KERN_INFO 2238 "md: SB: (V:%u) (F:0x%08x) Array-ID:<%pU>\n" 2239 "md: Name: \"%s\" CT:%llu\n", 2240 le32_to_cpu(sb->major_version), 2241 le32_to_cpu(sb->feature_map), 2242 uuid, 2243 sb->set_name, 2244 (unsigned long long)le64_to_cpu(sb->ctime) 2245 & MD_SUPERBLOCK_1_TIME_SEC_MASK); 2246 2247 uuid = sb->device_uuid; 2248 printk(KERN_INFO 2249 "md: L%u SZ%llu RD:%u LO:%u CS:%u DO:%llu DS:%llu SO:%llu" 2250 " RO:%llu\n" 2251 "md: Dev:%08x UUID: %pU\n" 2252 "md: (F:0x%08x) UT:%llu Events:%llu ResyncOffset:%llu CSUM:0x%08x\n" 2253 "md: (MaxDev:%u) \n", 2254 le32_to_cpu(sb->level), 2255 (unsigned long long)le64_to_cpu(sb->size), 2256 le32_to_cpu(sb->raid_disks), 2257 le32_to_cpu(sb->layout), 2258 le32_to_cpu(sb->chunksize), 2259 (unsigned long long)le64_to_cpu(sb->data_offset), 2260 (unsigned long long)le64_to_cpu(sb->data_size), 2261 (unsigned long long)le64_to_cpu(sb->super_offset), 2262 (unsigned long long)le64_to_cpu(sb->recovery_offset), 2263 le32_to_cpu(sb->dev_number), 2264 uuid, 2265 sb->devflags, 2266 (unsigned long long)le64_to_cpu(sb->utime) & MD_SUPERBLOCK_1_TIME_SEC_MASK, 2267 (unsigned long long)le64_to_cpu(sb->events), 2268 (unsigned long long)le64_to_cpu(sb->resync_offset), 2269 le32_to_cpu(sb->sb_csum), 2270 le32_to_cpu(sb->max_dev) 2271 ); 2272 } 2273 2274 static void print_rdev(mdk_rdev_t *rdev, int major_version) 2275 { 2276 char b[BDEVNAME_SIZE]; 2277 printk(KERN_INFO "md: rdev %s, Sect:%08llu F:%d S:%d DN:%u\n", 2278 bdevname(rdev->bdev, b), (unsigned long long)rdev->sectors, 2279 test_bit(Faulty, &rdev->flags), test_bit(In_sync, &rdev->flags), 2280 rdev->desc_nr); 2281 if (rdev->sb_loaded) { 2282 printk(KERN_INFO "md: rdev superblock (MJ:%d):\n", major_version); 2283 switch (major_version) { 2284 case 0: 2285 print_sb_90(page_address(rdev->sb_page)); 2286 break; 2287 case 1: 2288 print_sb_1(page_address(rdev->sb_page)); 2289 break; 2290 } 2291 } else 2292 printk(KERN_INFO "md: no rdev superblock!\n"); 2293 } 2294 2295 static void md_print_devices(void) 2296 { 2297 struct list_head *tmp; 2298 mdk_rdev_t *rdev; 2299 mddev_t *mddev; 2300 char b[BDEVNAME_SIZE]; 2301 2302 printk("\n"); 2303 printk("md: **********************************\n"); 2304 printk("md: * <COMPLETE RAID STATE PRINTOUT> *\n"); 2305 printk("md: **********************************\n"); 2306 for_each_mddev(mddev, tmp) { 2307 2308 if (mddev->bitmap) 2309 bitmap_print_sb(mddev->bitmap); 2310 else 2311 printk("%s: ", mdname(mddev)); 2312 list_for_each_entry(rdev, &mddev->disks, same_set) 2313 printk("<%s>", bdevname(rdev->bdev,b)); 2314 printk("\n"); 2315 2316 list_for_each_entry(rdev, &mddev->disks, same_set) 2317 print_rdev(rdev, mddev->major_version); 2318 } 2319 printk("md: **********************************\n"); 2320 printk("\n"); 2321 } 2322 2323 2324 static void sync_sbs(mddev_t * mddev, int nospares) 2325 { 2326 /* Update each superblock (in-memory image), but 2327 * if we are allowed to, skip spares which already 2328 * have the right event counter, or have one earlier 2329 * (which would mean they aren't being marked as dirty 2330 * with the rest of the array) 2331 */ 2332 mdk_rdev_t *rdev; 2333 list_for_each_entry(rdev, &mddev->disks, same_set) { 2334 if (rdev->sb_events == mddev->events || 2335 (nospares && 2336 rdev->raid_disk < 0 && 2337 rdev->sb_events+1 == mddev->events)) { 2338 /* Don't update this superblock */ 2339 rdev->sb_loaded = 2; 2340 } else { 2341 sync_super(mddev, rdev); 2342 rdev->sb_loaded = 1; 2343 } 2344 } 2345 } 2346 2347 static void md_update_sb(mddev_t * mddev, int force_change) 2348 { 2349 mdk_rdev_t *rdev; 2350 int sync_req; 2351 int nospares = 0; 2352 int any_badblocks_changed = 0; 2353 2354 repeat: 2355 /* First make sure individual recovery_offsets are correct */ 2356 list_for_each_entry(rdev, &mddev->disks, same_set) { 2357 if (rdev->raid_disk >= 0 && 2358 mddev->delta_disks >= 0 && 2359 !test_bit(In_sync, &rdev->flags) && 2360 mddev->curr_resync_completed > rdev->recovery_offset) 2361 rdev->recovery_offset = mddev->curr_resync_completed; 2362 2363 } 2364 if (!mddev->persistent) { 2365 clear_bit(MD_CHANGE_CLEAN, &mddev->flags); 2366 clear_bit(MD_CHANGE_DEVS, &mddev->flags); 2367 if (!mddev->external) { 2368 clear_bit(MD_CHANGE_PENDING, &mddev->flags); 2369 list_for_each_entry(rdev, &mddev->disks, same_set) { 2370 if (rdev->badblocks.changed) { 2371 md_ack_all_badblocks(&rdev->badblocks); 2372 md_error(mddev, rdev); 2373 } 2374 clear_bit(Blocked, &rdev->flags); 2375 clear_bit(BlockedBadBlocks, &rdev->flags); 2376 wake_up(&rdev->blocked_wait); 2377 } 2378 } 2379 wake_up(&mddev->sb_wait); 2380 return; 2381 } 2382 2383 spin_lock_irq(&mddev->write_lock); 2384 2385 mddev->utime = get_seconds(); 2386 2387 if (test_and_clear_bit(MD_CHANGE_DEVS, &mddev->flags)) 2388 force_change = 1; 2389 if (test_and_clear_bit(MD_CHANGE_CLEAN, &mddev->flags)) 2390 /* just a clean<-> dirty transition, possibly leave spares alone, 2391 * though if events isn't the right even/odd, we will have to do 2392 * spares after all 2393 */ 2394 nospares = 1; 2395 if (force_change) 2396 nospares = 0; 2397 if (mddev->degraded) 2398 /* If the array is degraded, then skipping spares is both 2399 * dangerous and fairly pointless. 2400 * Dangerous because a device that was removed from the array 2401 * might have a event_count that still looks up-to-date, 2402 * so it can be re-added without a resync. 2403 * Pointless because if there are any spares to skip, 2404 * then a recovery will happen and soon that array won't 2405 * be degraded any more and the spare can go back to sleep then. 2406 */ 2407 nospares = 0; 2408 2409 sync_req = mddev->in_sync; 2410 2411 /* If this is just a dirty<->clean transition, and the array is clean 2412 * and 'events' is odd, we can roll back to the previous clean state */ 2413 if (nospares 2414 && (mddev->in_sync && mddev->recovery_cp == MaxSector) 2415 && mddev->can_decrease_events 2416 && mddev->events != 1) { 2417 mddev->events--; 2418 mddev->can_decrease_events = 0; 2419 } else { 2420 /* otherwise we have to go forward and ... */ 2421 mddev->events ++; 2422 mddev->can_decrease_events = nospares; 2423 } 2424 2425 if (!mddev->events) { 2426 /* 2427 * oops, this 64-bit counter should never wrap. 2428 * Either we are in around ~1 trillion A.C., assuming 2429 * 1 reboot per second, or we have a bug: 2430 */ 2431 MD_BUG(); 2432 mddev->events --; 2433 } 2434 2435 list_for_each_entry(rdev, &mddev->disks, same_set) { 2436 if (rdev->badblocks.changed) 2437 any_badblocks_changed++; 2438 if (test_bit(Faulty, &rdev->flags)) 2439 set_bit(FaultRecorded, &rdev->flags); 2440 } 2441 2442 sync_sbs(mddev, nospares); 2443 spin_unlock_irq(&mddev->write_lock); 2444 2445 dprintk(KERN_INFO 2446 "md: updating %s RAID superblock on device (in sync %d)\n", 2447 mdname(mddev),mddev->in_sync); 2448 2449 bitmap_update_sb(mddev->bitmap); 2450 list_for_each_entry(rdev, &mddev->disks, same_set) { 2451 char b[BDEVNAME_SIZE]; 2452 dprintk(KERN_INFO "md: "); 2453 if (rdev->sb_loaded != 1) 2454 continue; /* no noise on spare devices */ 2455 if (test_bit(Faulty, &rdev->flags)) 2456 dprintk("(skipping faulty "); 2457 2458 dprintk("%s ", bdevname(rdev->bdev,b)); 2459 if (!test_bit(Faulty, &rdev->flags)) { 2460 md_super_write(mddev,rdev, 2461 rdev->sb_start, rdev->sb_size, 2462 rdev->sb_page); 2463 dprintk(KERN_INFO "(write) %s's sb offset: %llu\n", 2464 bdevname(rdev->bdev,b), 2465 (unsigned long long)rdev->sb_start); 2466 rdev->sb_events = mddev->events; 2467 if (rdev->badblocks.size) { 2468 md_super_write(mddev, rdev, 2469 rdev->badblocks.sector, 2470 rdev->badblocks.size << 9, 2471 rdev->bb_page); 2472 rdev->badblocks.size = 0; 2473 } 2474 2475 } else 2476 dprintk(")\n"); 2477 if (mddev->level == LEVEL_MULTIPATH) 2478 /* only need to write one superblock... */ 2479 break; 2480 } 2481 md_super_wait(mddev); 2482 /* if there was a failure, MD_CHANGE_DEVS was set, and we re-write super */ 2483 2484 spin_lock_irq(&mddev->write_lock); 2485 if (mddev->in_sync != sync_req || 2486 test_bit(MD_CHANGE_DEVS, &mddev->flags)) { 2487 /* have to write it out again */ 2488 spin_unlock_irq(&mddev->write_lock); 2489 goto repeat; 2490 } 2491 clear_bit(MD_CHANGE_PENDING, &mddev->flags); 2492 spin_unlock_irq(&mddev->write_lock); 2493 wake_up(&mddev->sb_wait); 2494 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 2495 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 2496 2497 list_for_each_entry(rdev, &mddev->disks, same_set) { 2498 if (test_and_clear_bit(FaultRecorded, &rdev->flags)) 2499 clear_bit(Blocked, &rdev->flags); 2500 2501 if (any_badblocks_changed) 2502 md_ack_all_badblocks(&rdev->badblocks); 2503 clear_bit(BlockedBadBlocks, &rdev->flags); 2504 wake_up(&rdev->blocked_wait); 2505 } 2506 } 2507 2508 /* words written to sysfs files may, or may not, be \n terminated. 2509 * We want to accept with case. For this we use cmd_match. 2510 */ 2511 static int cmd_match(const char *cmd, const char *str) 2512 { 2513 /* See if cmd, written into a sysfs file, matches 2514 * str. They must either be the same, or cmd can 2515 * have a trailing newline 2516 */ 2517 while (*cmd && *str && *cmd == *str) { 2518 cmd++; 2519 str++; 2520 } 2521 if (*cmd == '\n') 2522 cmd++; 2523 if (*str || *cmd) 2524 return 0; 2525 return 1; 2526 } 2527 2528 struct rdev_sysfs_entry { 2529 struct attribute attr; 2530 ssize_t (*show)(mdk_rdev_t *, char *); 2531 ssize_t (*store)(mdk_rdev_t *, const char *, size_t); 2532 }; 2533 2534 static ssize_t 2535 state_show(mdk_rdev_t *rdev, char *page) 2536 { 2537 char *sep = ""; 2538 size_t len = 0; 2539 2540 if (test_bit(Faulty, &rdev->flags) || 2541 rdev->badblocks.unacked_exist) { 2542 len+= sprintf(page+len, "%sfaulty",sep); 2543 sep = ","; 2544 } 2545 if (test_bit(In_sync, &rdev->flags)) { 2546 len += sprintf(page+len, "%sin_sync",sep); 2547 sep = ","; 2548 } 2549 if (test_bit(WriteMostly, &rdev->flags)) { 2550 len += sprintf(page+len, "%swrite_mostly",sep); 2551 sep = ","; 2552 } 2553 if (test_bit(Blocked, &rdev->flags) || 2554 rdev->badblocks.unacked_exist) { 2555 len += sprintf(page+len, "%sblocked", sep); 2556 sep = ","; 2557 } 2558 if (!test_bit(Faulty, &rdev->flags) && 2559 !test_bit(In_sync, &rdev->flags)) { 2560 len += sprintf(page+len, "%sspare", sep); 2561 sep = ","; 2562 } 2563 if (test_bit(WriteErrorSeen, &rdev->flags)) { 2564 len += sprintf(page+len, "%swrite_error", sep); 2565 sep = ","; 2566 } 2567 return len+sprintf(page+len, "\n"); 2568 } 2569 2570 static ssize_t 2571 state_store(mdk_rdev_t *rdev, const char *buf, size_t len) 2572 { 2573 /* can write 2574 * faulty - simulates an error 2575 * remove - disconnects the device 2576 * writemostly - sets write_mostly 2577 * -writemostly - clears write_mostly 2578 * blocked - sets the Blocked flags 2579 * -blocked - clears the Blocked and possibly simulates an error 2580 * insync - sets Insync providing device isn't active 2581 * write_error - sets WriteErrorSeen 2582 * -write_error - clears WriteErrorSeen 2583 */ 2584 int err = -EINVAL; 2585 if (cmd_match(buf, "faulty") && rdev->mddev->pers) { 2586 md_error(rdev->mddev, rdev); 2587 if (test_bit(Faulty, &rdev->flags)) 2588 err = 0; 2589 else 2590 err = -EBUSY; 2591 } else if (cmd_match(buf, "remove")) { 2592 if (rdev->raid_disk >= 0) 2593 err = -EBUSY; 2594 else { 2595 mddev_t *mddev = rdev->mddev; 2596 kick_rdev_from_array(rdev); 2597 if (mddev->pers) 2598 md_update_sb(mddev, 1); 2599 md_new_event(mddev); 2600 err = 0; 2601 } 2602 } else if (cmd_match(buf, "writemostly")) { 2603 set_bit(WriteMostly, &rdev->flags); 2604 err = 0; 2605 } else if (cmd_match(buf, "-writemostly")) { 2606 clear_bit(WriteMostly, &rdev->flags); 2607 err = 0; 2608 } else if (cmd_match(buf, "blocked")) { 2609 set_bit(Blocked, &rdev->flags); 2610 err = 0; 2611 } else if (cmd_match(buf, "-blocked")) { 2612 if (!test_bit(Faulty, &rdev->flags) && 2613 rdev->badblocks.unacked_exist) { 2614 /* metadata handler doesn't understand badblocks, 2615 * so we need to fail the device 2616 */ 2617 md_error(rdev->mddev, rdev); 2618 } 2619 clear_bit(Blocked, &rdev->flags); 2620 clear_bit(BlockedBadBlocks, &rdev->flags); 2621 wake_up(&rdev->blocked_wait); 2622 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); 2623 md_wakeup_thread(rdev->mddev->thread); 2624 2625 err = 0; 2626 } else if (cmd_match(buf, "insync") && rdev->raid_disk == -1) { 2627 set_bit(In_sync, &rdev->flags); 2628 err = 0; 2629 } else if (cmd_match(buf, "write_error")) { 2630 set_bit(WriteErrorSeen, &rdev->flags); 2631 err = 0; 2632 } else if (cmd_match(buf, "-write_error")) { 2633 clear_bit(WriteErrorSeen, &rdev->flags); 2634 err = 0; 2635 } 2636 if (!err) 2637 sysfs_notify_dirent_safe(rdev->sysfs_state); 2638 return err ? err : len; 2639 } 2640 static struct rdev_sysfs_entry rdev_state = 2641 __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store); 2642 2643 static ssize_t 2644 errors_show(mdk_rdev_t *rdev, char *page) 2645 { 2646 return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors)); 2647 } 2648 2649 static ssize_t 2650 errors_store(mdk_rdev_t *rdev, const char *buf, size_t len) 2651 { 2652 char *e; 2653 unsigned long n = simple_strtoul(buf, &e, 10); 2654 if (*buf && (*e == 0 || *e == '\n')) { 2655 atomic_set(&rdev->corrected_errors, n); 2656 return len; 2657 } 2658 return -EINVAL; 2659 } 2660 static struct rdev_sysfs_entry rdev_errors = 2661 __ATTR(errors, S_IRUGO|S_IWUSR, errors_show, errors_store); 2662 2663 static ssize_t 2664 slot_show(mdk_rdev_t *rdev, char *page) 2665 { 2666 if (rdev->raid_disk < 0) 2667 return sprintf(page, "none\n"); 2668 else 2669 return sprintf(page, "%d\n", rdev->raid_disk); 2670 } 2671 2672 static ssize_t 2673 slot_store(mdk_rdev_t *rdev, const char *buf, size_t len) 2674 { 2675 char *e; 2676 int err; 2677 int slot = simple_strtoul(buf, &e, 10); 2678 if (strncmp(buf, "none", 4)==0) 2679 slot = -1; 2680 else if (e==buf || (*e && *e!= '\n')) 2681 return -EINVAL; 2682 if (rdev->mddev->pers && slot == -1) { 2683 /* Setting 'slot' on an active array requires also 2684 * updating the 'rd%d' link, and communicating 2685 * with the personality with ->hot_*_disk. 2686 * For now we only support removing 2687 * failed/spare devices. This normally happens automatically, 2688 * but not when the metadata is externally managed. 2689 */ 2690 if (rdev->raid_disk == -1) 2691 return -EEXIST; 2692 /* personality does all needed checks */ 2693 if (rdev->mddev->pers->hot_remove_disk == NULL) 2694 return -EINVAL; 2695 err = rdev->mddev->pers-> 2696 hot_remove_disk(rdev->mddev, rdev->raid_disk); 2697 if (err) 2698 return err; 2699 sysfs_unlink_rdev(rdev->mddev, rdev); 2700 rdev->raid_disk = -1; 2701 set_bit(MD_RECOVERY_NEEDED, &rdev->mddev->recovery); 2702 md_wakeup_thread(rdev->mddev->thread); 2703 } else if (rdev->mddev->pers) { 2704 mdk_rdev_t *rdev2; 2705 /* Activating a spare .. or possibly reactivating 2706 * if we ever get bitmaps working here. 2707 */ 2708 2709 if (rdev->raid_disk != -1) 2710 return -EBUSY; 2711 2712 if (test_bit(MD_RECOVERY_RUNNING, &rdev->mddev->recovery)) 2713 return -EBUSY; 2714 2715 if (rdev->mddev->pers->hot_add_disk == NULL) 2716 return -EINVAL; 2717 2718 list_for_each_entry(rdev2, &rdev->mddev->disks, same_set) 2719 if (rdev2->raid_disk == slot) 2720 return -EEXIST; 2721 2722 if (slot >= rdev->mddev->raid_disks && 2723 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks) 2724 return -ENOSPC; 2725 2726 rdev->raid_disk = slot; 2727 if (test_bit(In_sync, &rdev->flags)) 2728 rdev->saved_raid_disk = slot; 2729 else 2730 rdev->saved_raid_disk = -1; 2731 err = rdev->mddev->pers-> 2732 hot_add_disk(rdev->mddev, rdev); 2733 if (err) { 2734 rdev->raid_disk = -1; 2735 return err; 2736 } else 2737 sysfs_notify_dirent_safe(rdev->sysfs_state); 2738 if (sysfs_link_rdev(rdev->mddev, rdev)) 2739 /* failure here is OK */; 2740 /* don't wakeup anyone, leave that to userspace. */ 2741 } else { 2742 if (slot >= rdev->mddev->raid_disks && 2743 slot >= rdev->mddev->raid_disks + rdev->mddev->delta_disks) 2744 return -ENOSPC; 2745 rdev->raid_disk = slot; 2746 /* assume it is working */ 2747 clear_bit(Faulty, &rdev->flags); 2748 clear_bit(WriteMostly, &rdev->flags); 2749 set_bit(In_sync, &rdev->flags); 2750 sysfs_notify_dirent_safe(rdev->sysfs_state); 2751 } 2752 return len; 2753 } 2754 2755 2756 static struct rdev_sysfs_entry rdev_slot = 2757 __ATTR(slot, S_IRUGO|S_IWUSR, slot_show, slot_store); 2758 2759 static ssize_t 2760 offset_show(mdk_rdev_t *rdev, char *page) 2761 { 2762 return sprintf(page, "%llu\n", (unsigned long long)rdev->data_offset); 2763 } 2764 2765 static ssize_t 2766 offset_store(mdk_rdev_t *rdev, const char *buf, size_t len) 2767 { 2768 char *e; 2769 unsigned long long offset = simple_strtoull(buf, &e, 10); 2770 if (e==buf || (*e && *e != '\n')) 2771 return -EINVAL; 2772 if (rdev->mddev->pers && rdev->raid_disk >= 0) 2773 return -EBUSY; 2774 if (rdev->sectors && rdev->mddev->external) 2775 /* Must set offset before size, so overlap checks 2776 * can be sane */ 2777 return -EBUSY; 2778 rdev->data_offset = offset; 2779 return len; 2780 } 2781 2782 static struct rdev_sysfs_entry rdev_offset = 2783 __ATTR(offset, S_IRUGO|S_IWUSR, offset_show, offset_store); 2784 2785 static ssize_t 2786 rdev_size_show(mdk_rdev_t *rdev, char *page) 2787 { 2788 return sprintf(page, "%llu\n", (unsigned long long)rdev->sectors / 2); 2789 } 2790 2791 static int overlaps(sector_t s1, sector_t l1, sector_t s2, sector_t l2) 2792 { 2793 /* check if two start/length pairs overlap */ 2794 if (s1+l1 <= s2) 2795 return 0; 2796 if (s2+l2 <= s1) 2797 return 0; 2798 return 1; 2799 } 2800 2801 static int strict_blocks_to_sectors(const char *buf, sector_t *sectors) 2802 { 2803 unsigned long long blocks; 2804 sector_t new; 2805 2806 if (strict_strtoull(buf, 10, &blocks) < 0) 2807 return -EINVAL; 2808 2809 if (blocks & 1ULL << (8 * sizeof(blocks) - 1)) 2810 return -EINVAL; /* sector conversion overflow */ 2811 2812 new = blocks * 2; 2813 if (new != blocks * 2) 2814 return -EINVAL; /* unsigned long long to sector_t overflow */ 2815 2816 *sectors = new; 2817 return 0; 2818 } 2819 2820 static ssize_t 2821 rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len) 2822 { 2823 mddev_t *my_mddev = rdev->mddev; 2824 sector_t oldsectors = rdev->sectors; 2825 sector_t sectors; 2826 2827 if (strict_blocks_to_sectors(buf, §ors) < 0) 2828 return -EINVAL; 2829 if (my_mddev->pers && rdev->raid_disk >= 0) { 2830 if (my_mddev->persistent) { 2831 sectors = super_types[my_mddev->major_version]. 2832 rdev_size_change(rdev, sectors); 2833 if (!sectors) 2834 return -EBUSY; 2835 } else if (!sectors) 2836 sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) - 2837 rdev->data_offset; 2838 } 2839 if (sectors < my_mddev->dev_sectors) 2840 return -EINVAL; /* component must fit device */ 2841 2842 rdev->sectors = sectors; 2843 if (sectors > oldsectors && my_mddev->external) { 2844 /* need to check that all other rdevs with the same ->bdev 2845 * do not overlap. We need to unlock the mddev to avoid 2846 * a deadlock. We have already changed rdev->sectors, and if 2847 * we have to change it back, we will have the lock again. 2848 */ 2849 mddev_t *mddev; 2850 int overlap = 0; 2851 struct list_head *tmp; 2852 2853 mddev_unlock(my_mddev); 2854 for_each_mddev(mddev, tmp) { 2855 mdk_rdev_t *rdev2; 2856 2857 mddev_lock(mddev); 2858 list_for_each_entry(rdev2, &mddev->disks, same_set) 2859 if (rdev->bdev == rdev2->bdev && 2860 rdev != rdev2 && 2861 overlaps(rdev->data_offset, rdev->sectors, 2862 rdev2->data_offset, 2863 rdev2->sectors)) { 2864 overlap = 1; 2865 break; 2866 } 2867 mddev_unlock(mddev); 2868 if (overlap) { 2869 mddev_put(mddev); 2870 break; 2871 } 2872 } 2873 mddev_lock(my_mddev); 2874 if (overlap) { 2875 /* Someone else could have slipped in a size 2876 * change here, but doing so is just silly. 2877 * We put oldsectors back because we *know* it is 2878 * safe, and trust userspace not to race with 2879 * itself 2880 */ 2881 rdev->sectors = oldsectors; 2882 return -EBUSY; 2883 } 2884 } 2885 return len; 2886 } 2887 2888 static struct rdev_sysfs_entry rdev_size = 2889 __ATTR(size, S_IRUGO|S_IWUSR, rdev_size_show, rdev_size_store); 2890 2891 2892 static ssize_t recovery_start_show(mdk_rdev_t *rdev, char *page) 2893 { 2894 unsigned long long recovery_start = rdev->recovery_offset; 2895 2896 if (test_bit(In_sync, &rdev->flags) || 2897 recovery_start == MaxSector) 2898 return sprintf(page, "none\n"); 2899 2900 return sprintf(page, "%llu\n", recovery_start); 2901 } 2902 2903 static ssize_t recovery_start_store(mdk_rdev_t *rdev, const char *buf, size_t len) 2904 { 2905 unsigned long long recovery_start; 2906 2907 if (cmd_match(buf, "none")) 2908 recovery_start = MaxSector; 2909 else if (strict_strtoull(buf, 10, &recovery_start)) 2910 return -EINVAL; 2911 2912 if (rdev->mddev->pers && 2913 rdev->raid_disk >= 0) 2914 return -EBUSY; 2915 2916 rdev->recovery_offset = recovery_start; 2917 if (recovery_start == MaxSector) 2918 set_bit(In_sync, &rdev->flags); 2919 else 2920 clear_bit(In_sync, &rdev->flags); 2921 return len; 2922 } 2923 2924 static struct rdev_sysfs_entry rdev_recovery_start = 2925 __ATTR(recovery_start, S_IRUGO|S_IWUSR, recovery_start_show, recovery_start_store); 2926 2927 2928 static ssize_t 2929 badblocks_show(struct badblocks *bb, char *page, int unack); 2930 static ssize_t 2931 badblocks_store(struct badblocks *bb, const char *page, size_t len, int unack); 2932 2933 static ssize_t bb_show(mdk_rdev_t *rdev, char *page) 2934 { 2935 return badblocks_show(&rdev->badblocks, page, 0); 2936 } 2937 static ssize_t bb_store(mdk_rdev_t *rdev, const char *page, size_t len) 2938 { 2939 int rv = badblocks_store(&rdev->badblocks, page, len, 0); 2940 /* Maybe that ack was all we needed */ 2941 if (test_and_clear_bit(BlockedBadBlocks, &rdev->flags)) 2942 wake_up(&rdev->blocked_wait); 2943 return rv; 2944 } 2945 static struct rdev_sysfs_entry rdev_bad_blocks = 2946 __ATTR(bad_blocks, S_IRUGO|S_IWUSR, bb_show, bb_store); 2947 2948 2949 static ssize_t ubb_show(mdk_rdev_t *rdev, char *page) 2950 { 2951 return badblocks_show(&rdev->badblocks, page, 1); 2952 } 2953 static ssize_t ubb_store(mdk_rdev_t *rdev, const char *page, size_t len) 2954 { 2955 return badblocks_store(&rdev->badblocks, page, len, 1); 2956 } 2957 static struct rdev_sysfs_entry rdev_unack_bad_blocks = 2958 __ATTR(unacknowledged_bad_blocks, S_IRUGO|S_IWUSR, ubb_show, ubb_store); 2959 2960 static struct attribute *rdev_default_attrs[] = { 2961 &rdev_state.attr, 2962 &rdev_errors.attr, 2963 &rdev_slot.attr, 2964 &rdev_offset.attr, 2965 &rdev_size.attr, 2966 &rdev_recovery_start.attr, 2967 &rdev_bad_blocks.attr, 2968 &rdev_unack_bad_blocks.attr, 2969 NULL, 2970 }; 2971 static ssize_t 2972 rdev_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 2973 { 2974 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); 2975 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj); 2976 mddev_t *mddev = rdev->mddev; 2977 ssize_t rv; 2978 2979 if (!entry->show) 2980 return -EIO; 2981 2982 rv = mddev ? mddev_lock(mddev) : -EBUSY; 2983 if (!rv) { 2984 if (rdev->mddev == NULL) 2985 rv = -EBUSY; 2986 else 2987 rv = entry->show(rdev, page); 2988 mddev_unlock(mddev); 2989 } 2990 return rv; 2991 } 2992 2993 static ssize_t 2994 rdev_attr_store(struct kobject *kobj, struct attribute *attr, 2995 const char *page, size_t length) 2996 { 2997 struct rdev_sysfs_entry *entry = container_of(attr, struct rdev_sysfs_entry, attr); 2998 mdk_rdev_t *rdev = container_of(kobj, mdk_rdev_t, kobj); 2999 ssize_t rv; 3000 mddev_t *mddev = rdev->mddev; 3001 3002 if (!entry->store) 3003 return -EIO; 3004 if (!capable(CAP_SYS_ADMIN)) 3005 return -EACCES; 3006 rv = mddev ? mddev_lock(mddev): -EBUSY; 3007 if (!rv) { 3008 if (rdev->mddev == NULL) 3009 rv = -EBUSY; 3010 else 3011 rv = entry->store(rdev, page, length); 3012 mddev_unlock(mddev); 3013 } 3014 return rv; 3015 } 3016 3017 static void rdev_free(struct kobject *ko) 3018 { 3019 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj); 3020 kfree(rdev); 3021 } 3022 static const struct sysfs_ops rdev_sysfs_ops = { 3023 .show = rdev_attr_show, 3024 .store = rdev_attr_store, 3025 }; 3026 static struct kobj_type rdev_ktype = { 3027 .release = rdev_free, 3028 .sysfs_ops = &rdev_sysfs_ops, 3029 .default_attrs = rdev_default_attrs, 3030 }; 3031 3032 int md_rdev_init(mdk_rdev_t *rdev) 3033 { 3034 rdev->desc_nr = -1; 3035 rdev->saved_raid_disk = -1; 3036 rdev->raid_disk = -1; 3037 rdev->flags = 0; 3038 rdev->data_offset = 0; 3039 rdev->sb_events = 0; 3040 rdev->last_read_error.tv_sec = 0; 3041 rdev->last_read_error.tv_nsec = 0; 3042 rdev->sb_loaded = 0; 3043 rdev->bb_page = NULL; 3044 atomic_set(&rdev->nr_pending, 0); 3045 atomic_set(&rdev->read_errors, 0); 3046 atomic_set(&rdev->corrected_errors, 0); 3047 3048 INIT_LIST_HEAD(&rdev->same_set); 3049 init_waitqueue_head(&rdev->blocked_wait); 3050 3051 /* Add space to store bad block list. 3052 * This reserves the space even on arrays where it cannot 3053 * be used - I wonder if that matters 3054 */ 3055 rdev->badblocks.count = 0; 3056 rdev->badblocks.shift = 0; 3057 rdev->badblocks.page = kmalloc(PAGE_SIZE, GFP_KERNEL); 3058 seqlock_init(&rdev->badblocks.lock); 3059 if (rdev->badblocks.page == NULL) 3060 return -ENOMEM; 3061 3062 return 0; 3063 } 3064 EXPORT_SYMBOL_GPL(md_rdev_init); 3065 /* 3066 * Import a device. If 'super_format' >= 0, then sanity check the superblock 3067 * 3068 * mark the device faulty if: 3069 * 3070 * - the device is nonexistent (zero size) 3071 * - the device has no valid superblock 3072 * 3073 * a faulty rdev _never_ has rdev->sb set. 3074 */ 3075 static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_minor) 3076 { 3077 char b[BDEVNAME_SIZE]; 3078 int err; 3079 mdk_rdev_t *rdev; 3080 sector_t size; 3081 3082 rdev = kzalloc(sizeof(*rdev), GFP_KERNEL); 3083 if (!rdev) { 3084 printk(KERN_ERR "md: could not alloc mem for new device!\n"); 3085 return ERR_PTR(-ENOMEM); 3086 } 3087 3088 err = md_rdev_init(rdev); 3089 if (err) 3090 goto abort_free; 3091 err = alloc_disk_sb(rdev); 3092 if (err) 3093 goto abort_free; 3094 3095 err = lock_rdev(rdev, newdev, super_format == -2); 3096 if (err) 3097 goto abort_free; 3098 3099 kobject_init(&rdev->kobj, &rdev_ktype); 3100 3101 size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS; 3102 if (!size) { 3103 printk(KERN_WARNING 3104 "md: %s has zero or unknown size, marking faulty!\n", 3105 bdevname(rdev->bdev,b)); 3106 err = -EINVAL; 3107 goto abort_free; 3108 } 3109 3110 if (super_format >= 0) { 3111 err = super_types[super_format]. 3112 load_super(rdev, NULL, super_minor); 3113 if (err == -EINVAL) { 3114 printk(KERN_WARNING 3115 "md: %s does not have a valid v%d.%d " 3116 "superblock, not importing!\n", 3117 bdevname(rdev->bdev,b), 3118 super_format, super_minor); 3119 goto abort_free; 3120 } 3121 if (err < 0) { 3122 printk(KERN_WARNING 3123 "md: could not read %s's sb, not importing!\n", 3124 bdevname(rdev->bdev,b)); 3125 goto abort_free; 3126 } 3127 } 3128 if (super_format == -1) 3129 /* hot-add for 0.90, or non-persistent: so no badblocks */ 3130 rdev->badblocks.shift = -1; 3131 3132 return rdev; 3133 3134 abort_free: 3135 if (rdev->bdev) 3136 unlock_rdev(rdev); 3137 free_disk_sb(rdev); 3138 kfree(rdev->badblocks.page); 3139 kfree(rdev); 3140 return ERR_PTR(err); 3141 } 3142 3143 /* 3144 * Check a full RAID array for plausibility 3145 */ 3146 3147 3148 static void analyze_sbs(mddev_t * mddev) 3149 { 3150 int i; 3151 mdk_rdev_t *rdev, *freshest, *tmp; 3152 char b[BDEVNAME_SIZE]; 3153 3154 freshest = NULL; 3155 rdev_for_each(rdev, tmp, mddev) 3156 switch (super_types[mddev->major_version]. 3157 load_super(rdev, freshest, mddev->minor_version)) { 3158 case 1: 3159 freshest = rdev; 3160 break; 3161 case 0: 3162 break; 3163 default: 3164 printk( KERN_ERR \ 3165 "md: fatal superblock inconsistency in %s" 3166 " -- removing from array\n", 3167 bdevname(rdev->bdev,b)); 3168 kick_rdev_from_array(rdev); 3169 } 3170 3171 3172 super_types[mddev->major_version]. 3173 validate_super(mddev, freshest); 3174 3175 i = 0; 3176 rdev_for_each(rdev, tmp, mddev) { 3177 if (mddev->max_disks && 3178 (rdev->desc_nr >= mddev->max_disks || 3179 i > mddev->max_disks)) { 3180 printk(KERN_WARNING 3181 "md: %s: %s: only %d devices permitted\n", 3182 mdname(mddev), bdevname(rdev->bdev, b), 3183 mddev->max_disks); 3184 kick_rdev_from_array(rdev); 3185 continue; 3186 } 3187 if (rdev != freshest) 3188 if (super_types[mddev->major_version]. 3189 validate_super(mddev, rdev)) { 3190 printk(KERN_WARNING "md: kicking non-fresh %s" 3191 " from array!\n", 3192 bdevname(rdev->bdev,b)); 3193 kick_rdev_from_array(rdev); 3194 continue; 3195 } 3196 if (mddev->level == LEVEL_MULTIPATH) { 3197 rdev->desc_nr = i++; 3198 rdev->raid_disk = rdev->desc_nr; 3199 set_bit(In_sync, &rdev->flags); 3200 } else if (rdev->raid_disk >= (mddev->raid_disks - min(0, mddev->delta_disks))) { 3201 rdev->raid_disk = -1; 3202 clear_bit(In_sync, &rdev->flags); 3203 } 3204 } 3205 } 3206 3207 /* Read a fixed-point number. 3208 * Numbers in sysfs attributes should be in "standard" units where 3209 * possible, so time should be in seconds. 3210 * However we internally use a a much smaller unit such as 3211 * milliseconds or jiffies. 3212 * This function takes a decimal number with a possible fractional 3213 * component, and produces an integer which is the result of 3214 * multiplying that number by 10^'scale'. 3215 * all without any floating-point arithmetic. 3216 */ 3217 int strict_strtoul_scaled(const char *cp, unsigned long *res, int scale) 3218 { 3219 unsigned long result = 0; 3220 long decimals = -1; 3221 while (isdigit(*cp) || (*cp == '.' && decimals < 0)) { 3222 if (*cp == '.') 3223 decimals = 0; 3224 else if (decimals < scale) { 3225 unsigned int value; 3226 value = *cp - '0'; 3227 result = result * 10 + value; 3228 if (decimals >= 0) 3229 decimals++; 3230 } 3231 cp++; 3232 } 3233 if (*cp == '\n') 3234 cp++; 3235 if (*cp) 3236 return -EINVAL; 3237 if (decimals < 0) 3238 decimals = 0; 3239 while (decimals < scale) { 3240 result *= 10; 3241 decimals ++; 3242 } 3243 *res = result; 3244 return 0; 3245 } 3246 3247 3248 static void md_safemode_timeout(unsigned long data); 3249 3250 static ssize_t 3251 safe_delay_show(mddev_t *mddev, char *page) 3252 { 3253 int msec = (mddev->safemode_delay*1000)/HZ; 3254 return sprintf(page, "%d.%03d\n", msec/1000, msec%1000); 3255 } 3256 static ssize_t 3257 safe_delay_store(mddev_t *mddev, const char *cbuf, size_t len) 3258 { 3259 unsigned long msec; 3260 3261 if (strict_strtoul_scaled(cbuf, &msec, 3) < 0) 3262 return -EINVAL; 3263 if (msec == 0) 3264 mddev->safemode_delay = 0; 3265 else { 3266 unsigned long old_delay = mddev->safemode_delay; 3267 mddev->safemode_delay = (msec*HZ)/1000; 3268 if (mddev->safemode_delay == 0) 3269 mddev->safemode_delay = 1; 3270 if (mddev->safemode_delay < old_delay) 3271 md_safemode_timeout((unsigned long)mddev); 3272 } 3273 return len; 3274 } 3275 static struct md_sysfs_entry md_safe_delay = 3276 __ATTR(safe_mode_delay, S_IRUGO|S_IWUSR,safe_delay_show, safe_delay_store); 3277 3278 static ssize_t 3279 level_show(mddev_t *mddev, char *page) 3280 { 3281 struct mdk_personality *p = mddev->pers; 3282 if (p) 3283 return sprintf(page, "%s\n", p->name); 3284 else if (mddev->clevel[0]) 3285 return sprintf(page, "%s\n", mddev->clevel); 3286 else if (mddev->level != LEVEL_NONE) 3287 return sprintf(page, "%d\n", mddev->level); 3288 else 3289 return 0; 3290 } 3291 3292 static ssize_t 3293 level_store(mddev_t *mddev, const char *buf, size_t len) 3294 { 3295 char clevel[16]; 3296 ssize_t rv = len; 3297 struct mdk_personality *pers; 3298 long level; 3299 void *priv; 3300 mdk_rdev_t *rdev; 3301 3302 if (mddev->pers == NULL) { 3303 if (len == 0) 3304 return 0; 3305 if (len >= sizeof(mddev->clevel)) 3306 return -ENOSPC; 3307 strncpy(mddev->clevel, buf, len); 3308 if (mddev->clevel[len-1] == '\n') 3309 len--; 3310 mddev->clevel[len] = 0; 3311 mddev->level = LEVEL_NONE; 3312 return rv; 3313 } 3314 3315 /* request to change the personality. Need to ensure: 3316 * - array is not engaged in resync/recovery/reshape 3317 * - old personality can be suspended 3318 * - new personality will access other array. 3319 */ 3320 3321 if (mddev->sync_thread || 3322 mddev->reshape_position != MaxSector || 3323 mddev->sysfs_active) 3324 return -EBUSY; 3325 3326 if (!mddev->pers->quiesce) { 3327 printk(KERN_WARNING "md: %s: %s does not support online personality change\n", 3328 mdname(mddev), mddev->pers->name); 3329 return -EINVAL; 3330 } 3331 3332 /* Now find the new personality */ 3333 if (len == 0 || len >= sizeof(clevel)) 3334 return -EINVAL; 3335 strncpy(clevel, buf, len); 3336 if (clevel[len-1] == '\n') 3337 len--; 3338 clevel[len] = 0; 3339 if (strict_strtol(clevel, 10, &level)) 3340 level = LEVEL_NONE; 3341 3342 if (request_module("md-%s", clevel) != 0) 3343 request_module("md-level-%s", clevel); 3344 spin_lock(&pers_lock); 3345 pers = find_pers(level, clevel); 3346 if (!pers || !try_module_get(pers->owner)) { 3347 spin_unlock(&pers_lock); 3348 printk(KERN_WARNING "md: personality %s not loaded\n", clevel); 3349 return -EINVAL; 3350 } 3351 spin_unlock(&pers_lock); 3352 3353 if (pers == mddev->pers) { 3354 /* Nothing to do! */ 3355 module_put(pers->owner); 3356 return rv; 3357 } 3358 if (!pers->takeover) { 3359 module_put(pers->owner); 3360 printk(KERN_WARNING "md: %s: %s does not support personality takeover\n", 3361 mdname(mddev), clevel); 3362 return -EINVAL; 3363 } 3364 3365 list_for_each_entry(rdev, &mddev->disks, same_set) 3366 rdev->new_raid_disk = rdev->raid_disk; 3367 3368 /* ->takeover must set new_* and/or delta_disks 3369 * if it succeeds, and may set them when it fails. 3370 */ 3371 priv = pers->takeover(mddev); 3372 if (IS_ERR(priv)) { 3373 mddev->new_level = mddev->level; 3374 mddev->new_layout = mddev->layout; 3375 mddev->new_chunk_sectors = mddev->chunk_sectors; 3376 mddev->raid_disks -= mddev->delta_disks; 3377 mddev->delta_disks = 0; 3378 module_put(pers->owner); 3379 printk(KERN_WARNING "md: %s: %s would not accept array\n", 3380 mdname(mddev), clevel); 3381 return PTR_ERR(priv); 3382 } 3383 3384 /* Looks like we have a winner */ 3385 mddev_suspend(mddev); 3386 mddev->pers->stop(mddev); 3387 3388 if (mddev->pers->sync_request == NULL && 3389 pers->sync_request != NULL) { 3390 /* need to add the md_redundancy_group */ 3391 if (sysfs_create_group(&mddev->kobj, &md_redundancy_group)) 3392 printk(KERN_WARNING 3393 "md: cannot register extra attributes for %s\n", 3394 mdname(mddev)); 3395 mddev->sysfs_action = sysfs_get_dirent(mddev->kobj.sd, NULL, "sync_action"); 3396 } 3397 if (mddev->pers->sync_request != NULL && 3398 pers->sync_request == NULL) { 3399 /* need to remove the md_redundancy_group */ 3400 if (mddev->to_remove == NULL) 3401 mddev->to_remove = &md_redundancy_group; 3402 } 3403 3404 if (mddev->pers->sync_request == NULL && 3405 mddev->external) { 3406 /* We are converting from a no-redundancy array 3407 * to a redundancy array and metadata is managed 3408 * externally so we need to be sure that writes 3409 * won't block due to a need to transition 3410 * clean->dirty 3411 * until external management is started. 3412 */ 3413 mddev->in_sync = 0; 3414 mddev->safemode_delay = 0; 3415 mddev->safemode = 0; 3416 } 3417 3418 list_for_each_entry(rdev, &mddev->disks, same_set) { 3419 if (rdev->raid_disk < 0) 3420 continue; 3421 if (rdev->new_raid_disk >= mddev->raid_disks) 3422 rdev->new_raid_disk = -1; 3423 if (rdev->new_raid_disk == rdev->raid_disk) 3424 continue; 3425 sysfs_unlink_rdev(mddev, rdev); 3426 } 3427 list_for_each_entry(rdev, &mddev->disks, same_set) { 3428 if (rdev->raid_disk < 0) 3429 continue; 3430 if (rdev->new_raid_disk == rdev->raid_disk) 3431 continue; 3432 rdev->raid_disk = rdev->new_raid_disk; 3433 if (rdev->raid_disk < 0) 3434 clear_bit(In_sync, &rdev->flags); 3435 else { 3436 if (sysfs_link_rdev(mddev, rdev)) 3437 printk(KERN_WARNING "md: cannot register rd%d" 3438 " for %s after level change\n", 3439 rdev->raid_disk, mdname(mddev)); 3440 } 3441 } 3442 3443 module_put(mddev->pers->owner); 3444 mddev->pers = pers; 3445 mddev->private = priv; 3446 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); 3447 mddev->level = mddev->new_level; 3448 mddev->layout = mddev->new_layout; 3449 mddev->chunk_sectors = mddev->new_chunk_sectors; 3450 mddev->delta_disks = 0; 3451 mddev->degraded = 0; 3452 if (mddev->pers->sync_request == NULL) { 3453 /* this is now an array without redundancy, so 3454 * it must always be in_sync 3455 */ 3456 mddev->in_sync = 1; 3457 del_timer_sync(&mddev->safemode_timer); 3458 } 3459 pers->run(mddev); 3460 mddev_resume(mddev); 3461 set_bit(MD_CHANGE_DEVS, &mddev->flags); 3462 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 3463 md_wakeup_thread(mddev->thread); 3464 sysfs_notify(&mddev->kobj, NULL, "level"); 3465 md_new_event(mddev); 3466 return rv; 3467 } 3468 3469 static struct md_sysfs_entry md_level = 3470 __ATTR(level, S_IRUGO|S_IWUSR, level_show, level_store); 3471 3472 3473 static ssize_t 3474 layout_show(mddev_t *mddev, char *page) 3475 { 3476 /* just a number, not meaningful for all levels */ 3477 if (mddev->reshape_position != MaxSector && 3478 mddev->layout != mddev->new_layout) 3479 return sprintf(page, "%d (%d)\n", 3480 mddev->new_layout, mddev->layout); 3481 return sprintf(page, "%d\n", mddev->layout); 3482 } 3483 3484 static ssize_t 3485 layout_store(mddev_t *mddev, const char *buf, size_t len) 3486 { 3487 char *e; 3488 unsigned long n = simple_strtoul(buf, &e, 10); 3489 3490 if (!*buf || (*e && *e != '\n')) 3491 return -EINVAL; 3492 3493 if (mddev->pers) { 3494 int err; 3495 if (mddev->pers->check_reshape == NULL) 3496 return -EBUSY; 3497 mddev->new_layout = n; 3498 err = mddev->pers->check_reshape(mddev); 3499 if (err) { 3500 mddev->new_layout = mddev->layout; 3501 return err; 3502 } 3503 } else { 3504 mddev->new_layout = n; 3505 if (mddev->reshape_position == MaxSector) 3506 mddev->layout = n; 3507 } 3508 return len; 3509 } 3510 static struct md_sysfs_entry md_layout = 3511 __ATTR(layout, S_IRUGO|S_IWUSR, layout_show, layout_store); 3512 3513 3514 static ssize_t 3515 raid_disks_show(mddev_t *mddev, char *page) 3516 { 3517 if (mddev->raid_disks == 0) 3518 return 0; 3519 if (mddev->reshape_position != MaxSector && 3520 mddev->delta_disks != 0) 3521 return sprintf(page, "%d (%d)\n", mddev->raid_disks, 3522 mddev->raid_disks - mddev->delta_disks); 3523 return sprintf(page, "%d\n", mddev->raid_disks); 3524 } 3525 3526 static int update_raid_disks(mddev_t *mddev, int raid_disks); 3527 3528 static ssize_t 3529 raid_disks_store(mddev_t *mddev, const char *buf, size_t len) 3530 { 3531 char *e; 3532 int rv = 0; 3533 unsigned long n = simple_strtoul(buf, &e, 10); 3534 3535 if (!*buf || (*e && *e != '\n')) 3536 return -EINVAL; 3537 3538 if (mddev->pers) 3539 rv = update_raid_disks(mddev, n); 3540 else if (mddev->reshape_position != MaxSector) { 3541 int olddisks = mddev->raid_disks - mddev->delta_disks; 3542 mddev->delta_disks = n - olddisks; 3543 mddev->raid_disks = n; 3544 } else 3545 mddev->raid_disks = n; 3546 return rv ? rv : len; 3547 } 3548 static struct md_sysfs_entry md_raid_disks = 3549 __ATTR(raid_disks, S_IRUGO|S_IWUSR, raid_disks_show, raid_disks_store); 3550 3551 static ssize_t 3552 chunk_size_show(mddev_t *mddev, char *page) 3553 { 3554 if (mddev->reshape_position != MaxSector && 3555 mddev->chunk_sectors != mddev->new_chunk_sectors) 3556 return sprintf(page, "%d (%d)\n", 3557 mddev->new_chunk_sectors << 9, 3558 mddev->chunk_sectors << 9); 3559 return sprintf(page, "%d\n", mddev->chunk_sectors << 9); 3560 } 3561 3562 static ssize_t 3563 chunk_size_store(mddev_t *mddev, const char *buf, size_t len) 3564 { 3565 char *e; 3566 unsigned long n = simple_strtoul(buf, &e, 10); 3567 3568 if (!*buf || (*e && *e != '\n')) 3569 return -EINVAL; 3570 3571 if (mddev->pers) { 3572 int err; 3573 if (mddev->pers->check_reshape == NULL) 3574 return -EBUSY; 3575 mddev->new_chunk_sectors = n >> 9; 3576 err = mddev->pers->check_reshape(mddev); 3577 if (err) { 3578 mddev->new_chunk_sectors = mddev->chunk_sectors; 3579 return err; 3580 } 3581 } else { 3582 mddev->new_chunk_sectors = n >> 9; 3583 if (mddev->reshape_position == MaxSector) 3584 mddev->chunk_sectors = n >> 9; 3585 } 3586 return len; 3587 } 3588 static struct md_sysfs_entry md_chunk_size = 3589 __ATTR(chunk_size, S_IRUGO|S_IWUSR, chunk_size_show, chunk_size_store); 3590 3591 static ssize_t 3592 resync_start_show(mddev_t *mddev, char *page) 3593 { 3594 if (mddev->recovery_cp == MaxSector) 3595 return sprintf(page, "none\n"); 3596 return sprintf(page, "%llu\n", (unsigned long long)mddev->recovery_cp); 3597 } 3598 3599 static ssize_t 3600 resync_start_store(mddev_t *mddev, const char *buf, size_t len) 3601 { 3602 char *e; 3603 unsigned long long n = simple_strtoull(buf, &e, 10); 3604 3605 if (mddev->pers && !test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) 3606 return -EBUSY; 3607 if (cmd_match(buf, "none")) 3608 n = MaxSector; 3609 else if (!*buf || (*e && *e != '\n')) 3610 return -EINVAL; 3611 3612 mddev->recovery_cp = n; 3613 return len; 3614 } 3615 static struct md_sysfs_entry md_resync_start = 3616 __ATTR(resync_start, S_IRUGO|S_IWUSR, resync_start_show, resync_start_store); 3617 3618 /* 3619 * The array state can be: 3620 * 3621 * clear 3622 * No devices, no size, no level 3623 * Equivalent to STOP_ARRAY ioctl 3624 * inactive 3625 * May have some settings, but array is not active 3626 * all IO results in error 3627 * When written, doesn't tear down array, but just stops it 3628 * suspended (not supported yet) 3629 * All IO requests will block. The array can be reconfigured. 3630 * Writing this, if accepted, will block until array is quiescent 3631 * readonly 3632 * no resync can happen. no superblocks get written. 3633 * write requests fail 3634 * read-auto 3635 * like readonly, but behaves like 'clean' on a write request. 3636 * 3637 * clean - no pending writes, but otherwise active. 3638 * When written to inactive array, starts without resync 3639 * If a write request arrives then 3640 * if metadata is known, mark 'dirty' and switch to 'active'. 3641 * if not known, block and switch to write-pending 3642 * If written to an active array that has pending writes, then fails. 3643 * active 3644 * fully active: IO and resync can be happening. 3645 * When written to inactive array, starts with resync 3646 * 3647 * write-pending 3648 * clean, but writes are blocked waiting for 'active' to be written. 3649 * 3650 * active-idle 3651 * like active, but no writes have been seen for a while (100msec). 3652 * 3653 */ 3654 enum array_state { clear, inactive, suspended, readonly, read_auto, clean, active, 3655 write_pending, active_idle, bad_word}; 3656 static char *array_states[] = { 3657 "clear", "inactive", "suspended", "readonly", "read-auto", "clean", "active", 3658 "write-pending", "active-idle", NULL }; 3659 3660 static int match_word(const char *word, char **list) 3661 { 3662 int n; 3663 for (n=0; list[n]; n++) 3664 if (cmd_match(word, list[n])) 3665 break; 3666 return n; 3667 } 3668 3669 static ssize_t 3670 array_state_show(mddev_t *mddev, char *page) 3671 { 3672 enum array_state st = inactive; 3673 3674 if (mddev->pers) 3675 switch(mddev->ro) { 3676 case 1: 3677 st = readonly; 3678 break; 3679 case 2: 3680 st = read_auto; 3681 break; 3682 case 0: 3683 if (mddev->in_sync) 3684 st = clean; 3685 else if (test_bit(MD_CHANGE_PENDING, &mddev->flags)) 3686 st = write_pending; 3687 else if (mddev->safemode) 3688 st = active_idle; 3689 else 3690 st = active; 3691 } 3692 else { 3693 if (list_empty(&mddev->disks) && 3694 mddev->raid_disks == 0 && 3695 mddev->dev_sectors == 0) 3696 st = clear; 3697 else 3698 st = inactive; 3699 } 3700 return sprintf(page, "%s\n", array_states[st]); 3701 } 3702 3703 static int do_md_stop(mddev_t * mddev, int ro, int is_open); 3704 static int md_set_readonly(mddev_t * mddev, int is_open); 3705 static int do_md_run(mddev_t * mddev); 3706 static int restart_array(mddev_t *mddev); 3707 3708 static ssize_t 3709 array_state_store(mddev_t *mddev, const char *buf, size_t len) 3710 { 3711 int err = -EINVAL; 3712 enum array_state st = match_word(buf, array_states); 3713 switch(st) { 3714 case bad_word: 3715 break; 3716 case clear: 3717 /* stopping an active array */ 3718 if (atomic_read(&mddev->openers) > 0) 3719 return -EBUSY; 3720 err = do_md_stop(mddev, 0, 0); 3721 break; 3722 case inactive: 3723 /* stopping an active array */ 3724 if (mddev->pers) { 3725 if (atomic_read(&mddev->openers) > 0) 3726 return -EBUSY; 3727 err = do_md_stop(mddev, 2, 0); 3728 } else 3729 err = 0; /* already inactive */ 3730 break; 3731 case suspended: 3732 break; /* not supported yet */ 3733 case readonly: 3734 if (mddev->pers) 3735 err = md_set_readonly(mddev, 0); 3736 else { 3737 mddev->ro = 1; 3738 set_disk_ro(mddev->gendisk, 1); 3739 err = do_md_run(mddev); 3740 } 3741 break; 3742 case read_auto: 3743 if (mddev->pers) { 3744 if (mddev->ro == 0) 3745 err = md_set_readonly(mddev, 0); 3746 else if (mddev->ro == 1) 3747 err = restart_array(mddev); 3748 if (err == 0) { 3749 mddev->ro = 2; 3750 set_disk_ro(mddev->gendisk, 0); 3751 } 3752 } else { 3753 mddev->ro = 2; 3754 err = do_md_run(mddev); 3755 } 3756 break; 3757 case clean: 3758 if (mddev->pers) { 3759 restart_array(mddev); 3760 spin_lock_irq(&mddev->write_lock); 3761 if (atomic_read(&mddev->writes_pending) == 0) { 3762 if (mddev->in_sync == 0) { 3763 mddev->in_sync = 1; 3764 if (mddev->safemode == 1) 3765 mddev->safemode = 0; 3766 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 3767 } 3768 err = 0; 3769 } else 3770 err = -EBUSY; 3771 spin_unlock_irq(&mddev->write_lock); 3772 } else 3773 err = -EINVAL; 3774 break; 3775 case active: 3776 if (mddev->pers) { 3777 restart_array(mddev); 3778 clear_bit(MD_CHANGE_PENDING, &mddev->flags); 3779 wake_up(&mddev->sb_wait); 3780 err = 0; 3781 } else { 3782 mddev->ro = 0; 3783 set_disk_ro(mddev->gendisk, 0); 3784 err = do_md_run(mddev); 3785 } 3786 break; 3787 case write_pending: 3788 case active_idle: 3789 /* these cannot be set */ 3790 break; 3791 } 3792 if (err) 3793 return err; 3794 else { 3795 sysfs_notify_dirent_safe(mddev->sysfs_state); 3796 return len; 3797 } 3798 } 3799 static struct md_sysfs_entry md_array_state = 3800 __ATTR(array_state, S_IRUGO|S_IWUSR, array_state_show, array_state_store); 3801 3802 static ssize_t 3803 max_corrected_read_errors_show(mddev_t *mddev, char *page) { 3804 return sprintf(page, "%d\n", 3805 atomic_read(&mddev->max_corr_read_errors)); 3806 } 3807 3808 static ssize_t 3809 max_corrected_read_errors_store(mddev_t *mddev, const char *buf, size_t len) 3810 { 3811 char *e; 3812 unsigned long n = simple_strtoul(buf, &e, 10); 3813 3814 if (*buf && (*e == 0 || *e == '\n')) { 3815 atomic_set(&mddev->max_corr_read_errors, n); 3816 return len; 3817 } 3818 return -EINVAL; 3819 } 3820 3821 static struct md_sysfs_entry max_corr_read_errors = 3822 __ATTR(max_read_errors, S_IRUGO|S_IWUSR, max_corrected_read_errors_show, 3823 max_corrected_read_errors_store); 3824 3825 static ssize_t 3826 null_show(mddev_t *mddev, char *page) 3827 { 3828 return -EINVAL; 3829 } 3830 3831 static ssize_t 3832 new_dev_store(mddev_t *mddev, const char *buf, size_t len) 3833 { 3834 /* buf must be %d:%d\n? giving major and minor numbers */ 3835 /* The new device is added to the array. 3836 * If the array has a persistent superblock, we read the 3837 * superblock to initialise info and check validity. 3838 * Otherwise, only checking done is that in bind_rdev_to_array, 3839 * which mainly checks size. 3840 */ 3841 char *e; 3842 int major = simple_strtoul(buf, &e, 10); 3843 int minor; 3844 dev_t dev; 3845 mdk_rdev_t *rdev; 3846 int err; 3847 3848 if (!*buf || *e != ':' || !e[1] || e[1] == '\n') 3849 return -EINVAL; 3850 minor = simple_strtoul(e+1, &e, 10); 3851 if (*e && *e != '\n') 3852 return -EINVAL; 3853 dev = MKDEV(major, minor); 3854 if (major != MAJOR(dev) || 3855 minor != MINOR(dev)) 3856 return -EOVERFLOW; 3857 3858 3859 if (mddev->persistent) { 3860 rdev = md_import_device(dev, mddev->major_version, 3861 mddev->minor_version); 3862 if (!IS_ERR(rdev) && !list_empty(&mddev->disks)) { 3863 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next, 3864 mdk_rdev_t, same_set); 3865 err = super_types[mddev->major_version] 3866 .load_super(rdev, rdev0, mddev->minor_version); 3867 if (err < 0) 3868 goto out; 3869 } 3870 } else if (mddev->external) 3871 rdev = md_import_device(dev, -2, -1); 3872 else 3873 rdev = md_import_device(dev, -1, -1); 3874 3875 if (IS_ERR(rdev)) 3876 return PTR_ERR(rdev); 3877 err = bind_rdev_to_array(rdev, mddev); 3878 out: 3879 if (err) 3880 export_rdev(rdev); 3881 return err ? err : len; 3882 } 3883 3884 static struct md_sysfs_entry md_new_device = 3885 __ATTR(new_dev, S_IWUSR, null_show, new_dev_store); 3886 3887 static ssize_t 3888 bitmap_store(mddev_t *mddev, const char *buf, size_t len) 3889 { 3890 char *end; 3891 unsigned long chunk, end_chunk; 3892 3893 if (!mddev->bitmap) 3894 goto out; 3895 /* buf should be <chunk> <chunk> ... or <chunk>-<chunk> ... (range) */ 3896 while (*buf) { 3897 chunk = end_chunk = simple_strtoul(buf, &end, 0); 3898 if (buf == end) break; 3899 if (*end == '-') { /* range */ 3900 buf = end + 1; 3901 end_chunk = simple_strtoul(buf, &end, 0); 3902 if (buf == end) break; 3903 } 3904 if (*end && !isspace(*end)) break; 3905 bitmap_dirty_bits(mddev->bitmap, chunk, end_chunk); 3906 buf = skip_spaces(end); 3907 } 3908 bitmap_unplug(mddev->bitmap); /* flush the bits to disk */ 3909 out: 3910 return len; 3911 } 3912 3913 static struct md_sysfs_entry md_bitmap = 3914 __ATTR(bitmap_set_bits, S_IWUSR, null_show, bitmap_store); 3915 3916 static ssize_t 3917 size_show(mddev_t *mddev, char *page) 3918 { 3919 return sprintf(page, "%llu\n", 3920 (unsigned long long)mddev->dev_sectors / 2); 3921 } 3922 3923 static int update_size(mddev_t *mddev, sector_t num_sectors); 3924 3925 static ssize_t 3926 size_store(mddev_t *mddev, const char *buf, size_t len) 3927 { 3928 /* If array is inactive, we can reduce the component size, but 3929 * not increase it (except from 0). 3930 * If array is active, we can try an on-line resize 3931 */ 3932 sector_t sectors; 3933 int err = strict_blocks_to_sectors(buf, §ors); 3934 3935 if (err < 0) 3936 return err; 3937 if (mddev->pers) { 3938 err = update_size(mddev, sectors); 3939 md_update_sb(mddev, 1); 3940 } else { 3941 if (mddev->dev_sectors == 0 || 3942 mddev->dev_sectors > sectors) 3943 mddev->dev_sectors = sectors; 3944 else 3945 err = -ENOSPC; 3946 } 3947 return err ? err : len; 3948 } 3949 3950 static struct md_sysfs_entry md_size = 3951 __ATTR(component_size, S_IRUGO|S_IWUSR, size_show, size_store); 3952 3953 3954 /* Metdata version. 3955 * This is one of 3956 * 'none' for arrays with no metadata (good luck...) 3957 * 'external' for arrays with externally managed metadata, 3958 * or N.M for internally known formats 3959 */ 3960 static ssize_t 3961 metadata_show(mddev_t *mddev, char *page) 3962 { 3963 if (mddev->persistent) 3964 return sprintf(page, "%d.%d\n", 3965 mddev->major_version, mddev->minor_version); 3966 else if (mddev->external) 3967 return sprintf(page, "external:%s\n", mddev->metadata_type); 3968 else 3969 return sprintf(page, "none\n"); 3970 } 3971 3972 static ssize_t 3973 metadata_store(mddev_t *mddev, const char *buf, size_t len) 3974 { 3975 int major, minor; 3976 char *e; 3977 /* Changing the details of 'external' metadata is 3978 * always permitted. Otherwise there must be 3979 * no devices attached to the array. 3980 */ 3981 if (mddev->external && strncmp(buf, "external:", 9) == 0) 3982 ; 3983 else if (!list_empty(&mddev->disks)) 3984 return -EBUSY; 3985 3986 if (cmd_match(buf, "none")) { 3987 mddev->persistent = 0; 3988 mddev->external = 0; 3989 mddev->major_version = 0; 3990 mddev->minor_version = 90; 3991 return len; 3992 } 3993 if (strncmp(buf, "external:", 9) == 0) { 3994 size_t namelen = len-9; 3995 if (namelen >= sizeof(mddev->metadata_type)) 3996 namelen = sizeof(mddev->metadata_type)-1; 3997 strncpy(mddev->metadata_type, buf+9, namelen); 3998 mddev->metadata_type[namelen] = 0; 3999 if (namelen && mddev->metadata_type[namelen-1] == '\n') 4000 mddev->metadata_type[--namelen] = 0; 4001 mddev->persistent = 0; 4002 mddev->external = 1; 4003 mddev->major_version = 0; 4004 mddev->minor_version = 90; 4005 return len; 4006 } 4007 major = simple_strtoul(buf, &e, 10); 4008 if (e==buf || *e != '.') 4009 return -EINVAL; 4010 buf = e+1; 4011 minor = simple_strtoul(buf, &e, 10); 4012 if (e==buf || (*e && *e != '\n') ) 4013 return -EINVAL; 4014 if (major >= ARRAY_SIZE(super_types) || super_types[major].name == NULL) 4015 return -ENOENT; 4016 mddev->major_version = major; 4017 mddev->minor_version = minor; 4018 mddev->persistent = 1; 4019 mddev->external = 0; 4020 return len; 4021 } 4022 4023 static struct md_sysfs_entry md_metadata = 4024 __ATTR(metadata_version, S_IRUGO|S_IWUSR, metadata_show, metadata_store); 4025 4026 static ssize_t 4027 action_show(mddev_t *mddev, char *page) 4028 { 4029 char *type = "idle"; 4030 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) 4031 type = "frozen"; 4032 else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 4033 (!mddev->ro && test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))) { 4034 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 4035 type = "reshape"; 4036 else if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 4037 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 4038 type = "resync"; 4039 else if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) 4040 type = "check"; 4041 else 4042 type = "repair"; 4043 } else if (test_bit(MD_RECOVERY_RECOVER, &mddev->recovery)) 4044 type = "recover"; 4045 } 4046 return sprintf(page, "%s\n", type); 4047 } 4048 4049 static void reap_sync_thread(mddev_t *mddev); 4050 4051 static ssize_t 4052 action_store(mddev_t *mddev, const char *page, size_t len) 4053 { 4054 if (!mddev->pers || !mddev->pers->sync_request) 4055 return -EINVAL; 4056 4057 if (cmd_match(page, "frozen")) 4058 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4059 else 4060 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 4061 4062 if (cmd_match(page, "idle") || cmd_match(page, "frozen")) { 4063 if (mddev->sync_thread) { 4064 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 4065 reap_sync_thread(mddev); 4066 } 4067 } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) || 4068 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) 4069 return -EBUSY; 4070 else if (cmd_match(page, "resync")) 4071 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4072 else if (cmd_match(page, "recover")) { 4073 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 4074 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4075 } else if (cmd_match(page, "reshape")) { 4076 int err; 4077 if (mddev->pers->start_reshape == NULL) 4078 return -EINVAL; 4079 err = mddev->pers->start_reshape(mddev); 4080 if (err) 4081 return err; 4082 sysfs_notify(&mddev->kobj, NULL, "degraded"); 4083 } else { 4084 if (cmd_match(page, "check")) 4085 set_bit(MD_RECOVERY_CHECK, &mddev->recovery); 4086 else if (!cmd_match(page, "repair")) 4087 return -EINVAL; 4088 set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 4089 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 4090 } 4091 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4092 md_wakeup_thread(mddev->thread); 4093 sysfs_notify_dirent_safe(mddev->sysfs_action); 4094 return len; 4095 } 4096 4097 static ssize_t 4098 mismatch_cnt_show(mddev_t *mddev, char *page) 4099 { 4100 return sprintf(page, "%llu\n", 4101 (unsigned long long) mddev->resync_mismatches); 4102 } 4103 4104 static struct md_sysfs_entry md_scan_mode = 4105 __ATTR(sync_action, S_IRUGO|S_IWUSR, action_show, action_store); 4106 4107 4108 static struct md_sysfs_entry md_mismatches = __ATTR_RO(mismatch_cnt); 4109 4110 static ssize_t 4111 sync_min_show(mddev_t *mddev, char *page) 4112 { 4113 return sprintf(page, "%d (%s)\n", speed_min(mddev), 4114 mddev->sync_speed_min ? "local": "system"); 4115 } 4116 4117 static ssize_t 4118 sync_min_store(mddev_t *mddev, const char *buf, size_t len) 4119 { 4120 int min; 4121 char *e; 4122 if (strncmp(buf, "system", 6)==0) { 4123 mddev->sync_speed_min = 0; 4124 return len; 4125 } 4126 min = simple_strtoul(buf, &e, 10); 4127 if (buf == e || (*e && *e != '\n') || min <= 0) 4128 return -EINVAL; 4129 mddev->sync_speed_min = min; 4130 return len; 4131 } 4132 4133 static struct md_sysfs_entry md_sync_min = 4134 __ATTR(sync_speed_min, S_IRUGO|S_IWUSR, sync_min_show, sync_min_store); 4135 4136 static ssize_t 4137 sync_max_show(mddev_t *mddev, char *page) 4138 { 4139 return sprintf(page, "%d (%s)\n", speed_max(mddev), 4140 mddev->sync_speed_max ? "local": "system"); 4141 } 4142 4143 static ssize_t 4144 sync_max_store(mddev_t *mddev, const char *buf, size_t len) 4145 { 4146 int max; 4147 char *e; 4148 if (strncmp(buf, "system", 6)==0) { 4149 mddev->sync_speed_max = 0; 4150 return len; 4151 } 4152 max = simple_strtoul(buf, &e, 10); 4153 if (buf == e || (*e && *e != '\n') || max <= 0) 4154 return -EINVAL; 4155 mddev->sync_speed_max = max; 4156 return len; 4157 } 4158 4159 static struct md_sysfs_entry md_sync_max = 4160 __ATTR(sync_speed_max, S_IRUGO|S_IWUSR, sync_max_show, sync_max_store); 4161 4162 static ssize_t 4163 degraded_show(mddev_t *mddev, char *page) 4164 { 4165 return sprintf(page, "%d\n", mddev->degraded); 4166 } 4167 static struct md_sysfs_entry md_degraded = __ATTR_RO(degraded); 4168 4169 static ssize_t 4170 sync_force_parallel_show(mddev_t *mddev, char *page) 4171 { 4172 return sprintf(page, "%d\n", mddev->parallel_resync); 4173 } 4174 4175 static ssize_t 4176 sync_force_parallel_store(mddev_t *mddev, const char *buf, size_t len) 4177 { 4178 long n; 4179 4180 if (strict_strtol(buf, 10, &n)) 4181 return -EINVAL; 4182 4183 if (n != 0 && n != 1) 4184 return -EINVAL; 4185 4186 mddev->parallel_resync = n; 4187 4188 if (mddev->sync_thread) 4189 wake_up(&resync_wait); 4190 4191 return len; 4192 } 4193 4194 /* force parallel resync, even with shared block devices */ 4195 static struct md_sysfs_entry md_sync_force_parallel = 4196 __ATTR(sync_force_parallel, S_IRUGO|S_IWUSR, 4197 sync_force_parallel_show, sync_force_parallel_store); 4198 4199 static ssize_t 4200 sync_speed_show(mddev_t *mddev, char *page) 4201 { 4202 unsigned long resync, dt, db; 4203 if (mddev->curr_resync == 0) 4204 return sprintf(page, "none\n"); 4205 resync = mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active); 4206 dt = (jiffies - mddev->resync_mark) / HZ; 4207 if (!dt) dt++; 4208 db = resync - mddev->resync_mark_cnt; 4209 return sprintf(page, "%lu\n", db/dt/2); /* K/sec */ 4210 } 4211 4212 static struct md_sysfs_entry md_sync_speed = __ATTR_RO(sync_speed); 4213 4214 static ssize_t 4215 sync_completed_show(mddev_t *mddev, char *page) 4216 { 4217 unsigned long long max_sectors, resync; 4218 4219 if (!test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4220 return sprintf(page, "none\n"); 4221 4222 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 4223 max_sectors = mddev->resync_max_sectors; 4224 else 4225 max_sectors = mddev->dev_sectors; 4226 4227 resync = mddev->curr_resync_completed; 4228 return sprintf(page, "%llu / %llu\n", resync, max_sectors); 4229 } 4230 4231 static struct md_sysfs_entry md_sync_completed = __ATTR_RO(sync_completed); 4232 4233 static ssize_t 4234 min_sync_show(mddev_t *mddev, char *page) 4235 { 4236 return sprintf(page, "%llu\n", 4237 (unsigned long long)mddev->resync_min); 4238 } 4239 static ssize_t 4240 min_sync_store(mddev_t *mddev, const char *buf, size_t len) 4241 { 4242 unsigned long long min; 4243 if (strict_strtoull(buf, 10, &min)) 4244 return -EINVAL; 4245 if (min > mddev->resync_max) 4246 return -EINVAL; 4247 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4248 return -EBUSY; 4249 4250 /* Must be a multiple of chunk_size */ 4251 if (mddev->chunk_sectors) { 4252 sector_t temp = min; 4253 if (sector_div(temp, mddev->chunk_sectors)) 4254 return -EINVAL; 4255 } 4256 mddev->resync_min = min; 4257 4258 return len; 4259 } 4260 4261 static struct md_sysfs_entry md_min_sync = 4262 __ATTR(sync_min, S_IRUGO|S_IWUSR, min_sync_show, min_sync_store); 4263 4264 static ssize_t 4265 max_sync_show(mddev_t *mddev, char *page) 4266 { 4267 if (mddev->resync_max == MaxSector) 4268 return sprintf(page, "max\n"); 4269 else 4270 return sprintf(page, "%llu\n", 4271 (unsigned long long)mddev->resync_max); 4272 } 4273 static ssize_t 4274 max_sync_store(mddev_t *mddev, const char *buf, size_t len) 4275 { 4276 if (strncmp(buf, "max", 3) == 0) 4277 mddev->resync_max = MaxSector; 4278 else { 4279 unsigned long long max; 4280 if (strict_strtoull(buf, 10, &max)) 4281 return -EINVAL; 4282 if (max < mddev->resync_min) 4283 return -EINVAL; 4284 if (max < mddev->resync_max && 4285 mddev->ro == 0 && 4286 test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4287 return -EBUSY; 4288 4289 /* Must be a multiple of chunk_size */ 4290 if (mddev->chunk_sectors) { 4291 sector_t temp = max; 4292 if (sector_div(temp, mddev->chunk_sectors)) 4293 return -EINVAL; 4294 } 4295 mddev->resync_max = max; 4296 } 4297 wake_up(&mddev->recovery_wait); 4298 return len; 4299 } 4300 4301 static struct md_sysfs_entry md_max_sync = 4302 __ATTR(sync_max, S_IRUGO|S_IWUSR, max_sync_show, max_sync_store); 4303 4304 static ssize_t 4305 suspend_lo_show(mddev_t *mddev, char *page) 4306 { 4307 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_lo); 4308 } 4309 4310 static ssize_t 4311 suspend_lo_store(mddev_t *mddev, const char *buf, size_t len) 4312 { 4313 char *e; 4314 unsigned long long new = simple_strtoull(buf, &e, 10); 4315 unsigned long long old = mddev->suspend_lo; 4316 4317 if (mddev->pers == NULL || 4318 mddev->pers->quiesce == NULL) 4319 return -EINVAL; 4320 if (buf == e || (*e && *e != '\n')) 4321 return -EINVAL; 4322 4323 mddev->suspend_lo = new; 4324 if (new >= old) 4325 /* Shrinking suspended region */ 4326 mddev->pers->quiesce(mddev, 2); 4327 else { 4328 /* Expanding suspended region - need to wait */ 4329 mddev->pers->quiesce(mddev, 1); 4330 mddev->pers->quiesce(mddev, 0); 4331 } 4332 return len; 4333 } 4334 static struct md_sysfs_entry md_suspend_lo = 4335 __ATTR(suspend_lo, S_IRUGO|S_IWUSR, suspend_lo_show, suspend_lo_store); 4336 4337 4338 static ssize_t 4339 suspend_hi_show(mddev_t *mddev, char *page) 4340 { 4341 return sprintf(page, "%llu\n", (unsigned long long)mddev->suspend_hi); 4342 } 4343 4344 static ssize_t 4345 suspend_hi_store(mddev_t *mddev, const char *buf, size_t len) 4346 { 4347 char *e; 4348 unsigned long long new = simple_strtoull(buf, &e, 10); 4349 unsigned long long old = mddev->suspend_hi; 4350 4351 if (mddev->pers == NULL || 4352 mddev->pers->quiesce == NULL) 4353 return -EINVAL; 4354 if (buf == e || (*e && *e != '\n')) 4355 return -EINVAL; 4356 4357 mddev->suspend_hi = new; 4358 if (new <= old) 4359 /* Shrinking suspended region */ 4360 mddev->pers->quiesce(mddev, 2); 4361 else { 4362 /* Expanding suspended region - need to wait */ 4363 mddev->pers->quiesce(mddev, 1); 4364 mddev->pers->quiesce(mddev, 0); 4365 } 4366 return len; 4367 } 4368 static struct md_sysfs_entry md_suspend_hi = 4369 __ATTR(suspend_hi, S_IRUGO|S_IWUSR, suspend_hi_show, suspend_hi_store); 4370 4371 static ssize_t 4372 reshape_position_show(mddev_t *mddev, char *page) 4373 { 4374 if (mddev->reshape_position != MaxSector) 4375 return sprintf(page, "%llu\n", 4376 (unsigned long long)mddev->reshape_position); 4377 strcpy(page, "none\n"); 4378 return 5; 4379 } 4380 4381 static ssize_t 4382 reshape_position_store(mddev_t *mddev, const char *buf, size_t len) 4383 { 4384 char *e; 4385 unsigned long long new = simple_strtoull(buf, &e, 10); 4386 if (mddev->pers) 4387 return -EBUSY; 4388 if (buf == e || (*e && *e != '\n')) 4389 return -EINVAL; 4390 mddev->reshape_position = new; 4391 mddev->delta_disks = 0; 4392 mddev->new_level = mddev->level; 4393 mddev->new_layout = mddev->layout; 4394 mddev->new_chunk_sectors = mddev->chunk_sectors; 4395 return len; 4396 } 4397 4398 static struct md_sysfs_entry md_reshape_position = 4399 __ATTR(reshape_position, S_IRUGO|S_IWUSR, reshape_position_show, 4400 reshape_position_store); 4401 4402 static ssize_t 4403 array_size_show(mddev_t *mddev, char *page) 4404 { 4405 if (mddev->external_size) 4406 return sprintf(page, "%llu\n", 4407 (unsigned long long)mddev->array_sectors/2); 4408 else 4409 return sprintf(page, "default\n"); 4410 } 4411 4412 static ssize_t 4413 array_size_store(mddev_t *mddev, const char *buf, size_t len) 4414 { 4415 sector_t sectors; 4416 4417 if (strncmp(buf, "default", 7) == 0) { 4418 if (mddev->pers) 4419 sectors = mddev->pers->size(mddev, 0, 0); 4420 else 4421 sectors = mddev->array_sectors; 4422 4423 mddev->external_size = 0; 4424 } else { 4425 if (strict_blocks_to_sectors(buf, §ors) < 0) 4426 return -EINVAL; 4427 if (mddev->pers && mddev->pers->size(mddev, 0, 0) < sectors) 4428 return -E2BIG; 4429 4430 mddev->external_size = 1; 4431 } 4432 4433 mddev->array_sectors = sectors; 4434 if (mddev->pers) { 4435 set_capacity(mddev->gendisk, mddev->array_sectors); 4436 revalidate_disk(mddev->gendisk); 4437 } 4438 return len; 4439 } 4440 4441 static struct md_sysfs_entry md_array_size = 4442 __ATTR(array_size, S_IRUGO|S_IWUSR, array_size_show, 4443 array_size_store); 4444 4445 static struct attribute *md_default_attrs[] = { 4446 &md_level.attr, 4447 &md_layout.attr, 4448 &md_raid_disks.attr, 4449 &md_chunk_size.attr, 4450 &md_size.attr, 4451 &md_resync_start.attr, 4452 &md_metadata.attr, 4453 &md_new_device.attr, 4454 &md_safe_delay.attr, 4455 &md_array_state.attr, 4456 &md_reshape_position.attr, 4457 &md_array_size.attr, 4458 &max_corr_read_errors.attr, 4459 NULL, 4460 }; 4461 4462 static struct attribute *md_redundancy_attrs[] = { 4463 &md_scan_mode.attr, 4464 &md_mismatches.attr, 4465 &md_sync_min.attr, 4466 &md_sync_max.attr, 4467 &md_sync_speed.attr, 4468 &md_sync_force_parallel.attr, 4469 &md_sync_completed.attr, 4470 &md_min_sync.attr, 4471 &md_max_sync.attr, 4472 &md_suspend_lo.attr, 4473 &md_suspend_hi.attr, 4474 &md_bitmap.attr, 4475 &md_degraded.attr, 4476 NULL, 4477 }; 4478 static struct attribute_group md_redundancy_group = { 4479 .name = NULL, 4480 .attrs = md_redundancy_attrs, 4481 }; 4482 4483 4484 static ssize_t 4485 md_attr_show(struct kobject *kobj, struct attribute *attr, char *page) 4486 { 4487 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); 4488 mddev_t *mddev = container_of(kobj, struct mddev_s, kobj); 4489 ssize_t rv; 4490 4491 if (!entry->show) 4492 return -EIO; 4493 rv = mddev_lock(mddev); 4494 if (!rv) { 4495 rv = entry->show(mddev, page); 4496 mddev_unlock(mddev); 4497 } 4498 return rv; 4499 } 4500 4501 static ssize_t 4502 md_attr_store(struct kobject *kobj, struct attribute *attr, 4503 const char *page, size_t length) 4504 { 4505 struct md_sysfs_entry *entry = container_of(attr, struct md_sysfs_entry, attr); 4506 mddev_t *mddev = container_of(kobj, struct mddev_s, kobj); 4507 ssize_t rv; 4508 4509 if (!entry->store) 4510 return -EIO; 4511 if (!capable(CAP_SYS_ADMIN)) 4512 return -EACCES; 4513 rv = mddev_lock(mddev); 4514 if (mddev->hold_active == UNTIL_IOCTL) 4515 mddev->hold_active = 0; 4516 if (!rv) { 4517 rv = entry->store(mddev, page, length); 4518 mddev_unlock(mddev); 4519 } 4520 return rv; 4521 } 4522 4523 static void md_free(struct kobject *ko) 4524 { 4525 mddev_t *mddev = container_of(ko, mddev_t, kobj); 4526 4527 if (mddev->sysfs_state) 4528 sysfs_put(mddev->sysfs_state); 4529 4530 if (mddev->gendisk) { 4531 del_gendisk(mddev->gendisk); 4532 put_disk(mddev->gendisk); 4533 } 4534 if (mddev->queue) 4535 blk_cleanup_queue(mddev->queue); 4536 4537 kfree(mddev); 4538 } 4539 4540 static const struct sysfs_ops md_sysfs_ops = { 4541 .show = md_attr_show, 4542 .store = md_attr_store, 4543 }; 4544 static struct kobj_type md_ktype = { 4545 .release = md_free, 4546 .sysfs_ops = &md_sysfs_ops, 4547 .default_attrs = md_default_attrs, 4548 }; 4549 4550 int mdp_major = 0; 4551 4552 static void mddev_delayed_delete(struct work_struct *ws) 4553 { 4554 mddev_t *mddev = container_of(ws, mddev_t, del_work); 4555 4556 sysfs_remove_group(&mddev->kobj, &md_bitmap_group); 4557 kobject_del(&mddev->kobj); 4558 kobject_put(&mddev->kobj); 4559 } 4560 4561 static int md_alloc(dev_t dev, char *name) 4562 { 4563 static DEFINE_MUTEX(disks_mutex); 4564 mddev_t *mddev = mddev_find(dev); 4565 struct gendisk *disk; 4566 int partitioned; 4567 int shift; 4568 int unit; 4569 int error; 4570 4571 if (!mddev) 4572 return -ENODEV; 4573 4574 partitioned = (MAJOR(mddev->unit) != MD_MAJOR); 4575 shift = partitioned ? MdpMinorShift : 0; 4576 unit = MINOR(mddev->unit) >> shift; 4577 4578 /* wait for any previous instance of this device to be 4579 * completely removed (mddev_delayed_delete). 4580 */ 4581 flush_workqueue(md_misc_wq); 4582 4583 mutex_lock(&disks_mutex); 4584 error = -EEXIST; 4585 if (mddev->gendisk) 4586 goto abort; 4587 4588 if (name) { 4589 /* Need to ensure that 'name' is not a duplicate. 4590 */ 4591 mddev_t *mddev2; 4592 spin_lock(&all_mddevs_lock); 4593 4594 list_for_each_entry(mddev2, &all_mddevs, all_mddevs) 4595 if (mddev2->gendisk && 4596 strcmp(mddev2->gendisk->disk_name, name) == 0) { 4597 spin_unlock(&all_mddevs_lock); 4598 goto abort; 4599 } 4600 spin_unlock(&all_mddevs_lock); 4601 } 4602 4603 error = -ENOMEM; 4604 mddev->queue = blk_alloc_queue(GFP_KERNEL); 4605 if (!mddev->queue) 4606 goto abort; 4607 mddev->queue->queuedata = mddev; 4608 4609 blk_queue_make_request(mddev->queue, md_make_request); 4610 4611 disk = alloc_disk(1 << shift); 4612 if (!disk) { 4613 blk_cleanup_queue(mddev->queue); 4614 mddev->queue = NULL; 4615 goto abort; 4616 } 4617 disk->major = MAJOR(mddev->unit); 4618 disk->first_minor = unit << shift; 4619 if (name) 4620 strcpy(disk->disk_name, name); 4621 else if (partitioned) 4622 sprintf(disk->disk_name, "md_d%d", unit); 4623 else 4624 sprintf(disk->disk_name, "md%d", unit); 4625 disk->fops = &md_fops; 4626 disk->private_data = mddev; 4627 disk->queue = mddev->queue; 4628 blk_queue_flush(mddev->queue, REQ_FLUSH | REQ_FUA); 4629 /* Allow extended partitions. This makes the 4630 * 'mdp' device redundant, but we can't really 4631 * remove it now. 4632 */ 4633 disk->flags |= GENHD_FL_EXT_DEVT; 4634 mddev->gendisk = disk; 4635 /* As soon as we call add_disk(), another thread could get 4636 * through to md_open, so make sure it doesn't get too far 4637 */ 4638 mutex_lock(&mddev->open_mutex); 4639 add_disk(disk); 4640 4641 error = kobject_init_and_add(&mddev->kobj, &md_ktype, 4642 &disk_to_dev(disk)->kobj, "%s", "md"); 4643 if (error) { 4644 /* This isn't possible, but as kobject_init_and_add is marked 4645 * __must_check, we must do something with the result 4646 */ 4647 printk(KERN_WARNING "md: cannot register %s/md - name in use\n", 4648 disk->disk_name); 4649 error = 0; 4650 } 4651 if (mddev->kobj.sd && 4652 sysfs_create_group(&mddev->kobj, &md_bitmap_group)) 4653 printk(KERN_DEBUG "pointless warning\n"); 4654 mutex_unlock(&mddev->open_mutex); 4655 abort: 4656 mutex_unlock(&disks_mutex); 4657 if (!error && mddev->kobj.sd) { 4658 kobject_uevent(&mddev->kobj, KOBJ_ADD); 4659 mddev->sysfs_state = sysfs_get_dirent_safe(mddev->kobj.sd, "array_state"); 4660 } 4661 mddev_put(mddev); 4662 return error; 4663 } 4664 4665 static struct kobject *md_probe(dev_t dev, int *part, void *data) 4666 { 4667 md_alloc(dev, NULL); 4668 return NULL; 4669 } 4670 4671 static int add_named_array(const char *val, struct kernel_param *kp) 4672 { 4673 /* val must be "md_*" where * is not all digits. 4674 * We allocate an array with a large free minor number, and 4675 * set the name to val. val must not already be an active name. 4676 */ 4677 int len = strlen(val); 4678 char buf[DISK_NAME_LEN]; 4679 4680 while (len && val[len-1] == '\n') 4681 len--; 4682 if (len >= DISK_NAME_LEN) 4683 return -E2BIG; 4684 strlcpy(buf, val, len+1); 4685 if (strncmp(buf, "md_", 3) != 0) 4686 return -EINVAL; 4687 return md_alloc(0, buf); 4688 } 4689 4690 static void md_safemode_timeout(unsigned long data) 4691 { 4692 mddev_t *mddev = (mddev_t *) data; 4693 4694 if (!atomic_read(&mddev->writes_pending)) { 4695 mddev->safemode = 1; 4696 if (mddev->external) 4697 sysfs_notify_dirent_safe(mddev->sysfs_state); 4698 } 4699 md_wakeup_thread(mddev->thread); 4700 } 4701 4702 static int start_dirty_degraded; 4703 4704 int md_run(mddev_t *mddev) 4705 { 4706 int err; 4707 mdk_rdev_t *rdev; 4708 struct mdk_personality *pers; 4709 4710 if (list_empty(&mddev->disks)) 4711 /* cannot run an array with no devices.. */ 4712 return -EINVAL; 4713 4714 if (mddev->pers) 4715 return -EBUSY; 4716 /* Cannot run until previous stop completes properly */ 4717 if (mddev->sysfs_active) 4718 return -EBUSY; 4719 4720 /* 4721 * Analyze all RAID superblock(s) 4722 */ 4723 if (!mddev->raid_disks) { 4724 if (!mddev->persistent) 4725 return -EINVAL; 4726 analyze_sbs(mddev); 4727 } 4728 4729 if (mddev->level != LEVEL_NONE) 4730 request_module("md-level-%d", mddev->level); 4731 else if (mddev->clevel[0]) 4732 request_module("md-%s", mddev->clevel); 4733 4734 /* 4735 * Drop all container device buffers, from now on 4736 * the only valid external interface is through the md 4737 * device. 4738 */ 4739 list_for_each_entry(rdev, &mddev->disks, same_set) { 4740 if (test_bit(Faulty, &rdev->flags)) 4741 continue; 4742 sync_blockdev(rdev->bdev); 4743 invalidate_bdev(rdev->bdev); 4744 4745 /* perform some consistency tests on the device. 4746 * We don't want the data to overlap the metadata, 4747 * Internal Bitmap issues have been handled elsewhere. 4748 */ 4749 if (rdev->meta_bdev) { 4750 /* Nothing to check */; 4751 } else if (rdev->data_offset < rdev->sb_start) { 4752 if (mddev->dev_sectors && 4753 rdev->data_offset + mddev->dev_sectors 4754 > rdev->sb_start) { 4755 printk("md: %s: data overlaps metadata\n", 4756 mdname(mddev)); 4757 return -EINVAL; 4758 } 4759 } else { 4760 if (rdev->sb_start + rdev->sb_size/512 4761 > rdev->data_offset) { 4762 printk("md: %s: metadata overlaps data\n", 4763 mdname(mddev)); 4764 return -EINVAL; 4765 } 4766 } 4767 sysfs_notify_dirent_safe(rdev->sysfs_state); 4768 } 4769 4770 if (mddev->bio_set == NULL) 4771 mddev->bio_set = bioset_create(BIO_POOL_SIZE, 4772 sizeof(mddev_t *)); 4773 4774 spin_lock(&pers_lock); 4775 pers = find_pers(mddev->level, mddev->clevel); 4776 if (!pers || !try_module_get(pers->owner)) { 4777 spin_unlock(&pers_lock); 4778 if (mddev->level != LEVEL_NONE) 4779 printk(KERN_WARNING "md: personality for level %d is not loaded!\n", 4780 mddev->level); 4781 else 4782 printk(KERN_WARNING "md: personality for level %s is not loaded!\n", 4783 mddev->clevel); 4784 return -EINVAL; 4785 } 4786 mddev->pers = pers; 4787 spin_unlock(&pers_lock); 4788 if (mddev->level != pers->level) { 4789 mddev->level = pers->level; 4790 mddev->new_level = pers->level; 4791 } 4792 strlcpy(mddev->clevel, pers->name, sizeof(mddev->clevel)); 4793 4794 if (mddev->reshape_position != MaxSector && 4795 pers->start_reshape == NULL) { 4796 /* This personality cannot handle reshaping... */ 4797 mddev->pers = NULL; 4798 module_put(pers->owner); 4799 return -EINVAL; 4800 } 4801 4802 if (pers->sync_request) { 4803 /* Warn if this is a potentially silly 4804 * configuration. 4805 */ 4806 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 4807 mdk_rdev_t *rdev2; 4808 int warned = 0; 4809 4810 list_for_each_entry(rdev, &mddev->disks, same_set) 4811 list_for_each_entry(rdev2, &mddev->disks, same_set) { 4812 if (rdev < rdev2 && 4813 rdev->bdev->bd_contains == 4814 rdev2->bdev->bd_contains) { 4815 printk(KERN_WARNING 4816 "%s: WARNING: %s appears to be" 4817 " on the same physical disk as" 4818 " %s.\n", 4819 mdname(mddev), 4820 bdevname(rdev->bdev,b), 4821 bdevname(rdev2->bdev,b2)); 4822 warned = 1; 4823 } 4824 } 4825 4826 if (warned) 4827 printk(KERN_WARNING 4828 "True protection against single-disk" 4829 " failure might be compromised.\n"); 4830 } 4831 4832 mddev->recovery = 0; 4833 /* may be over-ridden by personality */ 4834 mddev->resync_max_sectors = mddev->dev_sectors; 4835 4836 mddev->ok_start_degraded = start_dirty_degraded; 4837 4838 if (start_readonly && mddev->ro == 0) 4839 mddev->ro = 2; /* read-only, but switch on first write */ 4840 4841 err = mddev->pers->run(mddev); 4842 if (err) 4843 printk(KERN_ERR "md: pers->run() failed ...\n"); 4844 else if (mddev->pers->size(mddev, 0, 0) < mddev->array_sectors) { 4845 WARN_ONCE(!mddev->external_size, "%s: default size too small," 4846 " but 'external_size' not in effect?\n", __func__); 4847 printk(KERN_ERR 4848 "md: invalid array_size %llu > default size %llu\n", 4849 (unsigned long long)mddev->array_sectors / 2, 4850 (unsigned long long)mddev->pers->size(mddev, 0, 0) / 2); 4851 err = -EINVAL; 4852 mddev->pers->stop(mddev); 4853 } 4854 if (err == 0 && mddev->pers->sync_request) { 4855 err = bitmap_create(mddev); 4856 if (err) { 4857 printk(KERN_ERR "%s: failed to create bitmap (%d)\n", 4858 mdname(mddev), err); 4859 mddev->pers->stop(mddev); 4860 } 4861 } 4862 if (err) { 4863 module_put(mddev->pers->owner); 4864 mddev->pers = NULL; 4865 bitmap_destroy(mddev); 4866 return err; 4867 } 4868 if (mddev->pers->sync_request) { 4869 if (mddev->kobj.sd && 4870 sysfs_create_group(&mddev->kobj, &md_redundancy_group)) 4871 printk(KERN_WARNING 4872 "md: cannot register extra attributes for %s\n", 4873 mdname(mddev)); 4874 mddev->sysfs_action = sysfs_get_dirent_safe(mddev->kobj.sd, "sync_action"); 4875 } else if (mddev->ro == 2) /* auto-readonly not meaningful */ 4876 mddev->ro = 0; 4877 4878 atomic_set(&mddev->writes_pending,0); 4879 atomic_set(&mddev->max_corr_read_errors, 4880 MD_DEFAULT_MAX_CORRECTED_READ_ERRORS); 4881 mddev->safemode = 0; 4882 mddev->safemode_timer.function = md_safemode_timeout; 4883 mddev->safemode_timer.data = (unsigned long) mddev; 4884 mddev->safemode_delay = (200 * HZ)/1000 +1; /* 200 msec delay */ 4885 mddev->in_sync = 1; 4886 smp_wmb(); 4887 mddev->ready = 1; 4888 list_for_each_entry(rdev, &mddev->disks, same_set) 4889 if (rdev->raid_disk >= 0) 4890 if (sysfs_link_rdev(mddev, rdev)) 4891 /* failure here is OK */; 4892 4893 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4894 4895 if (mddev->flags) 4896 md_update_sb(mddev, 0); 4897 4898 md_new_event(mddev); 4899 sysfs_notify_dirent_safe(mddev->sysfs_state); 4900 sysfs_notify_dirent_safe(mddev->sysfs_action); 4901 sysfs_notify(&mddev->kobj, NULL, "degraded"); 4902 return 0; 4903 } 4904 EXPORT_SYMBOL_GPL(md_run); 4905 4906 static int do_md_run(mddev_t *mddev) 4907 { 4908 int err; 4909 4910 err = md_run(mddev); 4911 if (err) 4912 goto out; 4913 err = bitmap_load(mddev); 4914 if (err) { 4915 bitmap_destroy(mddev); 4916 goto out; 4917 } 4918 4919 md_wakeup_thread(mddev->thread); 4920 md_wakeup_thread(mddev->sync_thread); /* possibly kick off a reshape */ 4921 4922 set_capacity(mddev->gendisk, mddev->array_sectors); 4923 revalidate_disk(mddev->gendisk); 4924 mddev->changed = 1; 4925 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); 4926 out: 4927 return err; 4928 } 4929 4930 static int restart_array(mddev_t *mddev) 4931 { 4932 struct gendisk *disk = mddev->gendisk; 4933 4934 /* Complain if it has no devices */ 4935 if (list_empty(&mddev->disks)) 4936 return -ENXIO; 4937 if (!mddev->pers) 4938 return -EINVAL; 4939 if (!mddev->ro) 4940 return -EBUSY; 4941 mddev->safemode = 0; 4942 mddev->ro = 0; 4943 set_disk_ro(disk, 0); 4944 printk(KERN_INFO "md: %s switched to read-write mode.\n", 4945 mdname(mddev)); 4946 /* Kick recovery or resync if necessary */ 4947 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4948 md_wakeup_thread(mddev->thread); 4949 md_wakeup_thread(mddev->sync_thread); 4950 sysfs_notify_dirent_safe(mddev->sysfs_state); 4951 return 0; 4952 } 4953 4954 /* similar to deny_write_access, but accounts for our holding a reference 4955 * to the file ourselves */ 4956 static int deny_bitmap_write_access(struct file * file) 4957 { 4958 struct inode *inode = file->f_mapping->host; 4959 4960 spin_lock(&inode->i_lock); 4961 if (atomic_read(&inode->i_writecount) > 1) { 4962 spin_unlock(&inode->i_lock); 4963 return -ETXTBSY; 4964 } 4965 atomic_set(&inode->i_writecount, -1); 4966 spin_unlock(&inode->i_lock); 4967 4968 return 0; 4969 } 4970 4971 void restore_bitmap_write_access(struct file *file) 4972 { 4973 struct inode *inode = file->f_mapping->host; 4974 4975 spin_lock(&inode->i_lock); 4976 atomic_set(&inode->i_writecount, 1); 4977 spin_unlock(&inode->i_lock); 4978 } 4979 4980 static void md_clean(mddev_t *mddev) 4981 { 4982 mddev->array_sectors = 0; 4983 mddev->external_size = 0; 4984 mddev->dev_sectors = 0; 4985 mddev->raid_disks = 0; 4986 mddev->recovery_cp = 0; 4987 mddev->resync_min = 0; 4988 mddev->resync_max = MaxSector; 4989 mddev->reshape_position = MaxSector; 4990 mddev->external = 0; 4991 mddev->persistent = 0; 4992 mddev->level = LEVEL_NONE; 4993 mddev->clevel[0] = 0; 4994 mddev->flags = 0; 4995 mddev->ro = 0; 4996 mddev->metadata_type[0] = 0; 4997 mddev->chunk_sectors = 0; 4998 mddev->ctime = mddev->utime = 0; 4999 mddev->layout = 0; 5000 mddev->max_disks = 0; 5001 mddev->events = 0; 5002 mddev->can_decrease_events = 0; 5003 mddev->delta_disks = 0; 5004 mddev->new_level = LEVEL_NONE; 5005 mddev->new_layout = 0; 5006 mddev->new_chunk_sectors = 0; 5007 mddev->curr_resync = 0; 5008 mddev->resync_mismatches = 0; 5009 mddev->suspend_lo = mddev->suspend_hi = 0; 5010 mddev->sync_speed_min = mddev->sync_speed_max = 0; 5011 mddev->recovery = 0; 5012 mddev->in_sync = 0; 5013 mddev->changed = 0; 5014 mddev->degraded = 0; 5015 mddev->safemode = 0; 5016 mddev->bitmap_info.offset = 0; 5017 mddev->bitmap_info.default_offset = 0; 5018 mddev->bitmap_info.chunksize = 0; 5019 mddev->bitmap_info.daemon_sleep = 0; 5020 mddev->bitmap_info.max_write_behind = 0; 5021 } 5022 5023 static void __md_stop_writes(mddev_t *mddev) 5024 { 5025 if (mddev->sync_thread) { 5026 set_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5027 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 5028 reap_sync_thread(mddev); 5029 } 5030 5031 del_timer_sync(&mddev->safemode_timer); 5032 5033 bitmap_flush(mddev); 5034 md_super_wait(mddev); 5035 5036 if (!mddev->in_sync || mddev->flags) { 5037 /* mark array as shutdown cleanly */ 5038 mddev->in_sync = 1; 5039 md_update_sb(mddev, 1); 5040 } 5041 } 5042 5043 void md_stop_writes(mddev_t *mddev) 5044 { 5045 mddev_lock(mddev); 5046 __md_stop_writes(mddev); 5047 mddev_unlock(mddev); 5048 } 5049 EXPORT_SYMBOL_GPL(md_stop_writes); 5050 5051 void md_stop(mddev_t *mddev) 5052 { 5053 mddev->ready = 0; 5054 mddev->pers->stop(mddev); 5055 if (mddev->pers->sync_request && mddev->to_remove == NULL) 5056 mddev->to_remove = &md_redundancy_group; 5057 module_put(mddev->pers->owner); 5058 mddev->pers = NULL; 5059 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5060 } 5061 EXPORT_SYMBOL_GPL(md_stop); 5062 5063 static int md_set_readonly(mddev_t *mddev, int is_open) 5064 { 5065 int err = 0; 5066 mutex_lock(&mddev->open_mutex); 5067 if (atomic_read(&mddev->openers) > is_open) { 5068 printk("md: %s still in use.\n",mdname(mddev)); 5069 err = -EBUSY; 5070 goto out; 5071 } 5072 if (mddev->pers) { 5073 __md_stop_writes(mddev); 5074 5075 err = -ENXIO; 5076 if (mddev->ro==1) 5077 goto out; 5078 mddev->ro = 1; 5079 set_disk_ro(mddev->gendisk, 1); 5080 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5081 sysfs_notify_dirent_safe(mddev->sysfs_state); 5082 err = 0; 5083 } 5084 out: 5085 mutex_unlock(&mddev->open_mutex); 5086 return err; 5087 } 5088 5089 /* mode: 5090 * 0 - completely stop and dis-assemble array 5091 * 2 - stop but do not disassemble array 5092 */ 5093 static int do_md_stop(mddev_t * mddev, int mode, int is_open) 5094 { 5095 struct gendisk *disk = mddev->gendisk; 5096 mdk_rdev_t *rdev; 5097 5098 mutex_lock(&mddev->open_mutex); 5099 if (atomic_read(&mddev->openers) > is_open || 5100 mddev->sysfs_active) { 5101 printk("md: %s still in use.\n",mdname(mddev)); 5102 mutex_unlock(&mddev->open_mutex); 5103 return -EBUSY; 5104 } 5105 5106 if (mddev->pers) { 5107 if (mddev->ro) 5108 set_disk_ro(disk, 0); 5109 5110 __md_stop_writes(mddev); 5111 md_stop(mddev); 5112 mddev->queue->merge_bvec_fn = NULL; 5113 mddev->queue->backing_dev_info.congested_fn = NULL; 5114 5115 /* tell userspace to handle 'inactive' */ 5116 sysfs_notify_dirent_safe(mddev->sysfs_state); 5117 5118 list_for_each_entry(rdev, &mddev->disks, same_set) 5119 if (rdev->raid_disk >= 0) 5120 sysfs_unlink_rdev(mddev, rdev); 5121 5122 set_capacity(disk, 0); 5123 mutex_unlock(&mddev->open_mutex); 5124 mddev->changed = 1; 5125 revalidate_disk(disk); 5126 5127 if (mddev->ro) 5128 mddev->ro = 0; 5129 } else 5130 mutex_unlock(&mddev->open_mutex); 5131 /* 5132 * Free resources if final stop 5133 */ 5134 if (mode == 0) { 5135 printk(KERN_INFO "md: %s stopped.\n", mdname(mddev)); 5136 5137 bitmap_destroy(mddev); 5138 if (mddev->bitmap_info.file) { 5139 restore_bitmap_write_access(mddev->bitmap_info.file); 5140 fput(mddev->bitmap_info.file); 5141 mddev->bitmap_info.file = NULL; 5142 } 5143 mddev->bitmap_info.offset = 0; 5144 5145 export_array(mddev); 5146 5147 md_clean(mddev); 5148 kobject_uevent(&disk_to_dev(mddev->gendisk)->kobj, KOBJ_CHANGE); 5149 if (mddev->hold_active == UNTIL_STOP) 5150 mddev->hold_active = 0; 5151 } 5152 blk_integrity_unregister(disk); 5153 md_new_event(mddev); 5154 sysfs_notify_dirent_safe(mddev->sysfs_state); 5155 return 0; 5156 } 5157 5158 #ifndef MODULE 5159 static void autorun_array(mddev_t *mddev) 5160 { 5161 mdk_rdev_t *rdev; 5162 int err; 5163 5164 if (list_empty(&mddev->disks)) 5165 return; 5166 5167 printk(KERN_INFO "md: running: "); 5168 5169 list_for_each_entry(rdev, &mddev->disks, same_set) { 5170 char b[BDEVNAME_SIZE]; 5171 printk("<%s>", bdevname(rdev->bdev,b)); 5172 } 5173 printk("\n"); 5174 5175 err = do_md_run(mddev); 5176 if (err) { 5177 printk(KERN_WARNING "md: do_md_run() returned %d\n", err); 5178 do_md_stop(mddev, 0, 0); 5179 } 5180 } 5181 5182 /* 5183 * lets try to run arrays based on all disks that have arrived 5184 * until now. (those are in pending_raid_disks) 5185 * 5186 * the method: pick the first pending disk, collect all disks with 5187 * the same UUID, remove all from the pending list and put them into 5188 * the 'same_array' list. Then order this list based on superblock 5189 * update time (freshest comes first), kick out 'old' disks and 5190 * compare superblocks. If everything's fine then run it. 5191 * 5192 * If "unit" is allocated, then bump its reference count 5193 */ 5194 static void autorun_devices(int part) 5195 { 5196 mdk_rdev_t *rdev0, *rdev, *tmp; 5197 mddev_t *mddev; 5198 char b[BDEVNAME_SIZE]; 5199 5200 printk(KERN_INFO "md: autorun ...\n"); 5201 while (!list_empty(&pending_raid_disks)) { 5202 int unit; 5203 dev_t dev; 5204 LIST_HEAD(candidates); 5205 rdev0 = list_entry(pending_raid_disks.next, 5206 mdk_rdev_t, same_set); 5207 5208 printk(KERN_INFO "md: considering %s ...\n", 5209 bdevname(rdev0->bdev,b)); 5210 INIT_LIST_HEAD(&candidates); 5211 rdev_for_each_list(rdev, tmp, &pending_raid_disks) 5212 if (super_90_load(rdev, rdev0, 0) >= 0) { 5213 printk(KERN_INFO "md: adding %s ...\n", 5214 bdevname(rdev->bdev,b)); 5215 list_move(&rdev->same_set, &candidates); 5216 } 5217 /* 5218 * now we have a set of devices, with all of them having 5219 * mostly sane superblocks. It's time to allocate the 5220 * mddev. 5221 */ 5222 if (part) { 5223 dev = MKDEV(mdp_major, 5224 rdev0->preferred_minor << MdpMinorShift); 5225 unit = MINOR(dev) >> MdpMinorShift; 5226 } else { 5227 dev = MKDEV(MD_MAJOR, rdev0->preferred_minor); 5228 unit = MINOR(dev); 5229 } 5230 if (rdev0->preferred_minor != unit) { 5231 printk(KERN_INFO "md: unit number in %s is bad: %d\n", 5232 bdevname(rdev0->bdev, b), rdev0->preferred_minor); 5233 break; 5234 } 5235 5236 md_probe(dev, NULL, NULL); 5237 mddev = mddev_find(dev); 5238 if (!mddev || !mddev->gendisk) { 5239 if (mddev) 5240 mddev_put(mddev); 5241 printk(KERN_ERR 5242 "md: cannot allocate memory for md drive.\n"); 5243 break; 5244 } 5245 if (mddev_lock(mddev)) 5246 printk(KERN_WARNING "md: %s locked, cannot run\n", 5247 mdname(mddev)); 5248 else if (mddev->raid_disks || mddev->major_version 5249 || !list_empty(&mddev->disks)) { 5250 printk(KERN_WARNING 5251 "md: %s already running, cannot run %s\n", 5252 mdname(mddev), bdevname(rdev0->bdev,b)); 5253 mddev_unlock(mddev); 5254 } else { 5255 printk(KERN_INFO "md: created %s\n", mdname(mddev)); 5256 mddev->persistent = 1; 5257 rdev_for_each_list(rdev, tmp, &candidates) { 5258 list_del_init(&rdev->same_set); 5259 if (bind_rdev_to_array(rdev, mddev)) 5260 export_rdev(rdev); 5261 } 5262 autorun_array(mddev); 5263 mddev_unlock(mddev); 5264 } 5265 /* on success, candidates will be empty, on error 5266 * it won't... 5267 */ 5268 rdev_for_each_list(rdev, tmp, &candidates) { 5269 list_del_init(&rdev->same_set); 5270 export_rdev(rdev); 5271 } 5272 mddev_put(mddev); 5273 } 5274 printk(KERN_INFO "md: ... autorun DONE.\n"); 5275 } 5276 #endif /* !MODULE */ 5277 5278 static int get_version(void __user * arg) 5279 { 5280 mdu_version_t ver; 5281 5282 ver.major = MD_MAJOR_VERSION; 5283 ver.minor = MD_MINOR_VERSION; 5284 ver.patchlevel = MD_PATCHLEVEL_VERSION; 5285 5286 if (copy_to_user(arg, &ver, sizeof(ver))) 5287 return -EFAULT; 5288 5289 return 0; 5290 } 5291 5292 static int get_array_info(mddev_t * mddev, void __user * arg) 5293 { 5294 mdu_array_info_t info; 5295 int nr,working,insync,failed,spare; 5296 mdk_rdev_t *rdev; 5297 5298 nr=working=insync=failed=spare=0; 5299 list_for_each_entry(rdev, &mddev->disks, same_set) { 5300 nr++; 5301 if (test_bit(Faulty, &rdev->flags)) 5302 failed++; 5303 else { 5304 working++; 5305 if (test_bit(In_sync, &rdev->flags)) 5306 insync++; 5307 else 5308 spare++; 5309 } 5310 } 5311 5312 info.major_version = mddev->major_version; 5313 info.minor_version = mddev->minor_version; 5314 info.patch_version = MD_PATCHLEVEL_VERSION; 5315 info.ctime = mddev->ctime; 5316 info.level = mddev->level; 5317 info.size = mddev->dev_sectors / 2; 5318 if (info.size != mddev->dev_sectors / 2) /* overflow */ 5319 info.size = -1; 5320 info.nr_disks = nr; 5321 info.raid_disks = mddev->raid_disks; 5322 info.md_minor = mddev->md_minor; 5323 info.not_persistent= !mddev->persistent; 5324 5325 info.utime = mddev->utime; 5326 info.state = 0; 5327 if (mddev->in_sync) 5328 info.state = (1<<MD_SB_CLEAN); 5329 if (mddev->bitmap && mddev->bitmap_info.offset) 5330 info.state = (1<<MD_SB_BITMAP_PRESENT); 5331 info.active_disks = insync; 5332 info.working_disks = working; 5333 info.failed_disks = failed; 5334 info.spare_disks = spare; 5335 5336 info.layout = mddev->layout; 5337 info.chunk_size = mddev->chunk_sectors << 9; 5338 5339 if (copy_to_user(arg, &info, sizeof(info))) 5340 return -EFAULT; 5341 5342 return 0; 5343 } 5344 5345 static int get_bitmap_file(mddev_t * mddev, void __user * arg) 5346 { 5347 mdu_bitmap_file_t *file = NULL; /* too big for stack allocation */ 5348 char *ptr, *buf = NULL; 5349 int err = -ENOMEM; 5350 5351 if (md_allow_write(mddev)) 5352 file = kmalloc(sizeof(*file), GFP_NOIO); 5353 else 5354 file = kmalloc(sizeof(*file), GFP_KERNEL); 5355 5356 if (!file) 5357 goto out; 5358 5359 /* bitmap disabled, zero the first byte and copy out */ 5360 if (!mddev->bitmap || !mddev->bitmap->file) { 5361 file->pathname[0] = '\0'; 5362 goto copy_out; 5363 } 5364 5365 buf = kmalloc(sizeof(file->pathname), GFP_KERNEL); 5366 if (!buf) 5367 goto out; 5368 5369 ptr = d_path(&mddev->bitmap->file->f_path, buf, sizeof(file->pathname)); 5370 if (IS_ERR(ptr)) 5371 goto out; 5372 5373 strcpy(file->pathname, ptr); 5374 5375 copy_out: 5376 err = 0; 5377 if (copy_to_user(arg, file, sizeof(*file))) 5378 err = -EFAULT; 5379 out: 5380 kfree(buf); 5381 kfree(file); 5382 return err; 5383 } 5384 5385 static int get_disk_info(mddev_t * mddev, void __user * arg) 5386 { 5387 mdu_disk_info_t info; 5388 mdk_rdev_t *rdev; 5389 5390 if (copy_from_user(&info, arg, sizeof(info))) 5391 return -EFAULT; 5392 5393 rdev = find_rdev_nr(mddev, info.number); 5394 if (rdev) { 5395 info.major = MAJOR(rdev->bdev->bd_dev); 5396 info.minor = MINOR(rdev->bdev->bd_dev); 5397 info.raid_disk = rdev->raid_disk; 5398 info.state = 0; 5399 if (test_bit(Faulty, &rdev->flags)) 5400 info.state |= (1<<MD_DISK_FAULTY); 5401 else if (test_bit(In_sync, &rdev->flags)) { 5402 info.state |= (1<<MD_DISK_ACTIVE); 5403 info.state |= (1<<MD_DISK_SYNC); 5404 } 5405 if (test_bit(WriteMostly, &rdev->flags)) 5406 info.state |= (1<<MD_DISK_WRITEMOSTLY); 5407 } else { 5408 info.major = info.minor = 0; 5409 info.raid_disk = -1; 5410 info.state = (1<<MD_DISK_REMOVED); 5411 } 5412 5413 if (copy_to_user(arg, &info, sizeof(info))) 5414 return -EFAULT; 5415 5416 return 0; 5417 } 5418 5419 static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info) 5420 { 5421 char b[BDEVNAME_SIZE], b2[BDEVNAME_SIZE]; 5422 mdk_rdev_t *rdev; 5423 dev_t dev = MKDEV(info->major,info->minor); 5424 5425 if (info->major != MAJOR(dev) || info->minor != MINOR(dev)) 5426 return -EOVERFLOW; 5427 5428 if (!mddev->raid_disks) { 5429 int err; 5430 /* expecting a device which has a superblock */ 5431 rdev = md_import_device(dev, mddev->major_version, mddev->minor_version); 5432 if (IS_ERR(rdev)) { 5433 printk(KERN_WARNING 5434 "md: md_import_device returned %ld\n", 5435 PTR_ERR(rdev)); 5436 return PTR_ERR(rdev); 5437 } 5438 if (!list_empty(&mddev->disks)) { 5439 mdk_rdev_t *rdev0 = list_entry(mddev->disks.next, 5440 mdk_rdev_t, same_set); 5441 err = super_types[mddev->major_version] 5442 .load_super(rdev, rdev0, mddev->minor_version); 5443 if (err < 0) { 5444 printk(KERN_WARNING 5445 "md: %s has different UUID to %s\n", 5446 bdevname(rdev->bdev,b), 5447 bdevname(rdev0->bdev,b2)); 5448 export_rdev(rdev); 5449 return -EINVAL; 5450 } 5451 } 5452 err = bind_rdev_to_array(rdev, mddev); 5453 if (err) 5454 export_rdev(rdev); 5455 return err; 5456 } 5457 5458 /* 5459 * add_new_disk can be used once the array is assembled 5460 * to add "hot spares". They must already have a superblock 5461 * written 5462 */ 5463 if (mddev->pers) { 5464 int err; 5465 if (!mddev->pers->hot_add_disk) { 5466 printk(KERN_WARNING 5467 "%s: personality does not support diskops!\n", 5468 mdname(mddev)); 5469 return -EINVAL; 5470 } 5471 if (mddev->persistent) 5472 rdev = md_import_device(dev, mddev->major_version, 5473 mddev->minor_version); 5474 else 5475 rdev = md_import_device(dev, -1, -1); 5476 if (IS_ERR(rdev)) { 5477 printk(KERN_WARNING 5478 "md: md_import_device returned %ld\n", 5479 PTR_ERR(rdev)); 5480 return PTR_ERR(rdev); 5481 } 5482 /* set saved_raid_disk if appropriate */ 5483 if (!mddev->persistent) { 5484 if (info->state & (1<<MD_DISK_SYNC) && 5485 info->raid_disk < mddev->raid_disks) { 5486 rdev->raid_disk = info->raid_disk; 5487 set_bit(In_sync, &rdev->flags); 5488 } else 5489 rdev->raid_disk = -1; 5490 } else 5491 super_types[mddev->major_version]. 5492 validate_super(mddev, rdev); 5493 if ((info->state & (1<<MD_DISK_SYNC)) && 5494 (!test_bit(In_sync, &rdev->flags) || 5495 rdev->raid_disk != info->raid_disk)) { 5496 /* This was a hot-add request, but events doesn't 5497 * match, so reject it. 5498 */ 5499 export_rdev(rdev); 5500 return -EINVAL; 5501 } 5502 5503 if (test_bit(In_sync, &rdev->flags)) 5504 rdev->saved_raid_disk = rdev->raid_disk; 5505 else 5506 rdev->saved_raid_disk = -1; 5507 5508 clear_bit(In_sync, &rdev->flags); /* just to be sure */ 5509 if (info->state & (1<<MD_DISK_WRITEMOSTLY)) 5510 set_bit(WriteMostly, &rdev->flags); 5511 else 5512 clear_bit(WriteMostly, &rdev->flags); 5513 5514 rdev->raid_disk = -1; 5515 err = bind_rdev_to_array(rdev, mddev); 5516 if (!err && !mddev->pers->hot_remove_disk) { 5517 /* If there is hot_add_disk but no hot_remove_disk 5518 * then added disks for geometry changes, 5519 * and should be added immediately. 5520 */ 5521 super_types[mddev->major_version]. 5522 validate_super(mddev, rdev); 5523 err = mddev->pers->hot_add_disk(mddev, rdev); 5524 if (err) 5525 unbind_rdev_from_array(rdev); 5526 } 5527 if (err) 5528 export_rdev(rdev); 5529 else 5530 sysfs_notify_dirent_safe(rdev->sysfs_state); 5531 5532 md_update_sb(mddev, 1); 5533 if (mddev->degraded) 5534 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 5535 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5536 if (!err) 5537 md_new_event(mddev); 5538 md_wakeup_thread(mddev->thread); 5539 return err; 5540 } 5541 5542 /* otherwise, add_new_disk is only allowed 5543 * for major_version==0 superblocks 5544 */ 5545 if (mddev->major_version != 0) { 5546 printk(KERN_WARNING "%s: ADD_NEW_DISK not supported\n", 5547 mdname(mddev)); 5548 return -EINVAL; 5549 } 5550 5551 if (!(info->state & (1<<MD_DISK_FAULTY))) { 5552 int err; 5553 rdev = md_import_device(dev, -1, 0); 5554 if (IS_ERR(rdev)) { 5555 printk(KERN_WARNING 5556 "md: error, md_import_device() returned %ld\n", 5557 PTR_ERR(rdev)); 5558 return PTR_ERR(rdev); 5559 } 5560 rdev->desc_nr = info->number; 5561 if (info->raid_disk < mddev->raid_disks) 5562 rdev->raid_disk = info->raid_disk; 5563 else 5564 rdev->raid_disk = -1; 5565 5566 if (rdev->raid_disk < mddev->raid_disks) 5567 if (info->state & (1<<MD_DISK_SYNC)) 5568 set_bit(In_sync, &rdev->flags); 5569 5570 if (info->state & (1<<MD_DISK_WRITEMOSTLY)) 5571 set_bit(WriteMostly, &rdev->flags); 5572 5573 if (!mddev->persistent) { 5574 printk(KERN_INFO "md: nonpersistent superblock ...\n"); 5575 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512; 5576 } else 5577 rdev->sb_start = calc_dev_sboffset(rdev); 5578 rdev->sectors = rdev->sb_start; 5579 5580 err = bind_rdev_to_array(rdev, mddev); 5581 if (err) { 5582 export_rdev(rdev); 5583 return err; 5584 } 5585 } 5586 5587 return 0; 5588 } 5589 5590 static int hot_remove_disk(mddev_t * mddev, dev_t dev) 5591 { 5592 char b[BDEVNAME_SIZE]; 5593 mdk_rdev_t *rdev; 5594 5595 rdev = find_rdev(mddev, dev); 5596 if (!rdev) 5597 return -ENXIO; 5598 5599 if (rdev->raid_disk >= 0) 5600 goto busy; 5601 5602 kick_rdev_from_array(rdev); 5603 md_update_sb(mddev, 1); 5604 md_new_event(mddev); 5605 5606 return 0; 5607 busy: 5608 printk(KERN_WARNING "md: cannot remove active disk %s from %s ...\n", 5609 bdevname(rdev->bdev,b), mdname(mddev)); 5610 return -EBUSY; 5611 } 5612 5613 static int hot_add_disk(mddev_t * mddev, dev_t dev) 5614 { 5615 char b[BDEVNAME_SIZE]; 5616 int err; 5617 mdk_rdev_t *rdev; 5618 5619 if (!mddev->pers) 5620 return -ENODEV; 5621 5622 if (mddev->major_version != 0) { 5623 printk(KERN_WARNING "%s: HOT_ADD may only be used with" 5624 " version-0 superblocks.\n", 5625 mdname(mddev)); 5626 return -EINVAL; 5627 } 5628 if (!mddev->pers->hot_add_disk) { 5629 printk(KERN_WARNING 5630 "%s: personality does not support diskops!\n", 5631 mdname(mddev)); 5632 return -EINVAL; 5633 } 5634 5635 rdev = md_import_device(dev, -1, 0); 5636 if (IS_ERR(rdev)) { 5637 printk(KERN_WARNING 5638 "md: error, md_import_device() returned %ld\n", 5639 PTR_ERR(rdev)); 5640 return -EINVAL; 5641 } 5642 5643 if (mddev->persistent) 5644 rdev->sb_start = calc_dev_sboffset(rdev); 5645 else 5646 rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512; 5647 5648 rdev->sectors = rdev->sb_start; 5649 5650 if (test_bit(Faulty, &rdev->flags)) { 5651 printk(KERN_WARNING 5652 "md: can not hot-add faulty %s disk to %s!\n", 5653 bdevname(rdev->bdev,b), mdname(mddev)); 5654 err = -EINVAL; 5655 goto abort_export; 5656 } 5657 clear_bit(In_sync, &rdev->flags); 5658 rdev->desc_nr = -1; 5659 rdev->saved_raid_disk = -1; 5660 err = bind_rdev_to_array(rdev, mddev); 5661 if (err) 5662 goto abort_export; 5663 5664 /* 5665 * The rest should better be atomic, we can have disk failures 5666 * noticed in interrupt contexts ... 5667 */ 5668 5669 rdev->raid_disk = -1; 5670 5671 md_update_sb(mddev, 1); 5672 5673 /* 5674 * Kick recovery, maybe this spare has to be added to the 5675 * array immediately. 5676 */ 5677 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5678 md_wakeup_thread(mddev->thread); 5679 md_new_event(mddev); 5680 return 0; 5681 5682 abort_export: 5683 export_rdev(rdev); 5684 return err; 5685 } 5686 5687 static int set_bitmap_file(mddev_t *mddev, int fd) 5688 { 5689 int err; 5690 5691 if (mddev->pers) { 5692 if (!mddev->pers->quiesce) 5693 return -EBUSY; 5694 if (mddev->recovery || mddev->sync_thread) 5695 return -EBUSY; 5696 /* we should be able to change the bitmap.. */ 5697 } 5698 5699 5700 if (fd >= 0) { 5701 if (mddev->bitmap) 5702 return -EEXIST; /* cannot add when bitmap is present */ 5703 mddev->bitmap_info.file = fget(fd); 5704 5705 if (mddev->bitmap_info.file == NULL) { 5706 printk(KERN_ERR "%s: error: failed to get bitmap file\n", 5707 mdname(mddev)); 5708 return -EBADF; 5709 } 5710 5711 err = deny_bitmap_write_access(mddev->bitmap_info.file); 5712 if (err) { 5713 printk(KERN_ERR "%s: error: bitmap file is already in use\n", 5714 mdname(mddev)); 5715 fput(mddev->bitmap_info.file); 5716 mddev->bitmap_info.file = NULL; 5717 return err; 5718 } 5719 mddev->bitmap_info.offset = 0; /* file overrides offset */ 5720 } else if (mddev->bitmap == NULL) 5721 return -ENOENT; /* cannot remove what isn't there */ 5722 err = 0; 5723 if (mddev->pers) { 5724 mddev->pers->quiesce(mddev, 1); 5725 if (fd >= 0) { 5726 err = bitmap_create(mddev); 5727 if (!err) 5728 err = bitmap_load(mddev); 5729 } 5730 if (fd < 0 || err) { 5731 bitmap_destroy(mddev); 5732 fd = -1; /* make sure to put the file */ 5733 } 5734 mddev->pers->quiesce(mddev, 0); 5735 } 5736 if (fd < 0) { 5737 if (mddev->bitmap_info.file) { 5738 restore_bitmap_write_access(mddev->bitmap_info.file); 5739 fput(mddev->bitmap_info.file); 5740 } 5741 mddev->bitmap_info.file = NULL; 5742 } 5743 5744 return err; 5745 } 5746 5747 /* 5748 * set_array_info is used two different ways 5749 * The original usage is when creating a new array. 5750 * In this usage, raid_disks is > 0 and it together with 5751 * level, size, not_persistent,layout,chunksize determine the 5752 * shape of the array. 5753 * This will always create an array with a type-0.90.0 superblock. 5754 * The newer usage is when assembling an array. 5755 * In this case raid_disks will be 0, and the major_version field is 5756 * use to determine which style super-blocks are to be found on the devices. 5757 * The minor and patch _version numbers are also kept incase the 5758 * super_block handler wishes to interpret them. 5759 */ 5760 static int set_array_info(mddev_t * mddev, mdu_array_info_t *info) 5761 { 5762 5763 if (info->raid_disks == 0) { 5764 /* just setting version number for superblock loading */ 5765 if (info->major_version < 0 || 5766 info->major_version >= ARRAY_SIZE(super_types) || 5767 super_types[info->major_version].name == NULL) { 5768 /* maybe try to auto-load a module? */ 5769 printk(KERN_INFO 5770 "md: superblock version %d not known\n", 5771 info->major_version); 5772 return -EINVAL; 5773 } 5774 mddev->major_version = info->major_version; 5775 mddev->minor_version = info->minor_version; 5776 mddev->patch_version = info->patch_version; 5777 mddev->persistent = !info->not_persistent; 5778 /* ensure mddev_put doesn't delete this now that there 5779 * is some minimal configuration. 5780 */ 5781 mddev->ctime = get_seconds(); 5782 return 0; 5783 } 5784 mddev->major_version = MD_MAJOR_VERSION; 5785 mddev->minor_version = MD_MINOR_VERSION; 5786 mddev->patch_version = MD_PATCHLEVEL_VERSION; 5787 mddev->ctime = get_seconds(); 5788 5789 mddev->level = info->level; 5790 mddev->clevel[0] = 0; 5791 mddev->dev_sectors = 2 * (sector_t)info->size; 5792 mddev->raid_disks = info->raid_disks; 5793 /* don't set md_minor, it is determined by which /dev/md* was 5794 * openned 5795 */ 5796 if (info->state & (1<<MD_SB_CLEAN)) 5797 mddev->recovery_cp = MaxSector; 5798 else 5799 mddev->recovery_cp = 0; 5800 mddev->persistent = ! info->not_persistent; 5801 mddev->external = 0; 5802 5803 mddev->layout = info->layout; 5804 mddev->chunk_sectors = info->chunk_size >> 9; 5805 5806 mddev->max_disks = MD_SB_DISKS; 5807 5808 if (mddev->persistent) 5809 mddev->flags = 0; 5810 set_bit(MD_CHANGE_DEVS, &mddev->flags); 5811 5812 mddev->bitmap_info.default_offset = MD_SB_BYTES >> 9; 5813 mddev->bitmap_info.offset = 0; 5814 5815 mddev->reshape_position = MaxSector; 5816 5817 /* 5818 * Generate a 128 bit UUID 5819 */ 5820 get_random_bytes(mddev->uuid, 16); 5821 5822 mddev->new_level = mddev->level; 5823 mddev->new_chunk_sectors = mddev->chunk_sectors; 5824 mddev->new_layout = mddev->layout; 5825 mddev->delta_disks = 0; 5826 5827 return 0; 5828 } 5829 5830 void md_set_array_sectors(mddev_t *mddev, sector_t array_sectors) 5831 { 5832 WARN(!mddev_is_locked(mddev), "%s: unlocked mddev!\n", __func__); 5833 5834 if (mddev->external_size) 5835 return; 5836 5837 mddev->array_sectors = array_sectors; 5838 } 5839 EXPORT_SYMBOL(md_set_array_sectors); 5840 5841 static int update_size(mddev_t *mddev, sector_t num_sectors) 5842 { 5843 mdk_rdev_t *rdev; 5844 int rv; 5845 int fit = (num_sectors == 0); 5846 5847 if (mddev->pers->resize == NULL) 5848 return -EINVAL; 5849 /* The "num_sectors" is the number of sectors of each device that 5850 * is used. This can only make sense for arrays with redundancy. 5851 * linear and raid0 always use whatever space is available. We can only 5852 * consider changing this number if no resync or reconstruction is 5853 * happening, and if the new size is acceptable. It must fit before the 5854 * sb_start or, if that is <data_offset, it must fit before the size 5855 * of each device. If num_sectors is zero, we find the largest size 5856 * that fits. 5857 */ 5858 if (mddev->sync_thread) 5859 return -EBUSY; 5860 if (mddev->bitmap) 5861 /* Sorry, cannot grow a bitmap yet, just remove it, 5862 * grow, and re-add. 5863 */ 5864 return -EBUSY; 5865 list_for_each_entry(rdev, &mddev->disks, same_set) { 5866 sector_t avail = rdev->sectors; 5867 5868 if (fit && (num_sectors == 0 || num_sectors > avail)) 5869 num_sectors = avail; 5870 if (avail < num_sectors) 5871 return -ENOSPC; 5872 } 5873 rv = mddev->pers->resize(mddev, num_sectors); 5874 if (!rv) 5875 revalidate_disk(mddev->gendisk); 5876 return rv; 5877 } 5878 5879 static int update_raid_disks(mddev_t *mddev, int raid_disks) 5880 { 5881 int rv; 5882 /* change the number of raid disks */ 5883 if (mddev->pers->check_reshape == NULL) 5884 return -EINVAL; 5885 if (raid_disks <= 0 || 5886 (mddev->max_disks && raid_disks >= mddev->max_disks)) 5887 return -EINVAL; 5888 if (mddev->sync_thread || mddev->reshape_position != MaxSector) 5889 return -EBUSY; 5890 mddev->delta_disks = raid_disks - mddev->raid_disks; 5891 5892 rv = mddev->pers->check_reshape(mddev); 5893 if (rv < 0) 5894 mddev->delta_disks = 0; 5895 return rv; 5896 } 5897 5898 5899 /* 5900 * update_array_info is used to change the configuration of an 5901 * on-line array. 5902 * The version, ctime,level,size,raid_disks,not_persistent, layout,chunk_size 5903 * fields in the info are checked against the array. 5904 * Any differences that cannot be handled will cause an error. 5905 * Normally, only one change can be managed at a time. 5906 */ 5907 static int update_array_info(mddev_t *mddev, mdu_array_info_t *info) 5908 { 5909 int rv = 0; 5910 int cnt = 0; 5911 int state = 0; 5912 5913 /* calculate expected state,ignoring low bits */ 5914 if (mddev->bitmap && mddev->bitmap_info.offset) 5915 state |= (1 << MD_SB_BITMAP_PRESENT); 5916 5917 if (mddev->major_version != info->major_version || 5918 mddev->minor_version != info->minor_version || 5919 /* mddev->patch_version != info->patch_version || */ 5920 mddev->ctime != info->ctime || 5921 mddev->level != info->level || 5922 /* mddev->layout != info->layout || */ 5923 !mddev->persistent != info->not_persistent|| 5924 mddev->chunk_sectors != info->chunk_size >> 9 || 5925 /* ignore bottom 8 bits of state, and allow SB_BITMAP_PRESENT to change */ 5926 ((state^info->state) & 0xfffffe00) 5927 ) 5928 return -EINVAL; 5929 /* Check there is only one change */ 5930 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) 5931 cnt++; 5932 if (mddev->raid_disks != info->raid_disks) 5933 cnt++; 5934 if (mddev->layout != info->layout) 5935 cnt++; 5936 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) 5937 cnt++; 5938 if (cnt == 0) 5939 return 0; 5940 if (cnt > 1) 5941 return -EINVAL; 5942 5943 if (mddev->layout != info->layout) { 5944 /* Change layout 5945 * we don't need to do anything at the md level, the 5946 * personality will take care of it all. 5947 */ 5948 if (mddev->pers->check_reshape == NULL) 5949 return -EINVAL; 5950 else { 5951 mddev->new_layout = info->layout; 5952 rv = mddev->pers->check_reshape(mddev); 5953 if (rv) 5954 mddev->new_layout = mddev->layout; 5955 return rv; 5956 } 5957 } 5958 if (info->size >= 0 && mddev->dev_sectors / 2 != info->size) 5959 rv = update_size(mddev, (sector_t)info->size * 2); 5960 5961 if (mddev->raid_disks != info->raid_disks) 5962 rv = update_raid_disks(mddev, info->raid_disks); 5963 5964 if ((state ^ info->state) & (1<<MD_SB_BITMAP_PRESENT)) { 5965 if (mddev->pers->quiesce == NULL) 5966 return -EINVAL; 5967 if (mddev->recovery || mddev->sync_thread) 5968 return -EBUSY; 5969 if (info->state & (1<<MD_SB_BITMAP_PRESENT)) { 5970 /* add the bitmap */ 5971 if (mddev->bitmap) 5972 return -EEXIST; 5973 if (mddev->bitmap_info.default_offset == 0) 5974 return -EINVAL; 5975 mddev->bitmap_info.offset = 5976 mddev->bitmap_info.default_offset; 5977 mddev->pers->quiesce(mddev, 1); 5978 rv = bitmap_create(mddev); 5979 if (!rv) 5980 rv = bitmap_load(mddev); 5981 if (rv) 5982 bitmap_destroy(mddev); 5983 mddev->pers->quiesce(mddev, 0); 5984 } else { 5985 /* remove the bitmap */ 5986 if (!mddev->bitmap) 5987 return -ENOENT; 5988 if (mddev->bitmap->file) 5989 return -EINVAL; 5990 mddev->pers->quiesce(mddev, 1); 5991 bitmap_destroy(mddev); 5992 mddev->pers->quiesce(mddev, 0); 5993 mddev->bitmap_info.offset = 0; 5994 } 5995 } 5996 md_update_sb(mddev, 1); 5997 return rv; 5998 } 5999 6000 static int set_disk_faulty(mddev_t *mddev, dev_t dev) 6001 { 6002 mdk_rdev_t *rdev; 6003 6004 if (mddev->pers == NULL) 6005 return -ENODEV; 6006 6007 rdev = find_rdev(mddev, dev); 6008 if (!rdev) 6009 return -ENODEV; 6010 6011 md_error(mddev, rdev); 6012 if (!test_bit(Faulty, &rdev->flags)) 6013 return -EBUSY; 6014 return 0; 6015 } 6016 6017 /* 6018 * We have a problem here : there is no easy way to give a CHS 6019 * virtual geometry. We currently pretend that we have a 2 heads 6020 * 4 sectors (with a BIG number of cylinders...). This drives 6021 * dosfs just mad... ;-) 6022 */ 6023 static int md_getgeo(struct block_device *bdev, struct hd_geometry *geo) 6024 { 6025 mddev_t *mddev = bdev->bd_disk->private_data; 6026 6027 geo->heads = 2; 6028 geo->sectors = 4; 6029 geo->cylinders = mddev->array_sectors / 8; 6030 return 0; 6031 } 6032 6033 static int md_ioctl(struct block_device *bdev, fmode_t mode, 6034 unsigned int cmd, unsigned long arg) 6035 { 6036 int err = 0; 6037 void __user *argp = (void __user *)arg; 6038 mddev_t *mddev = NULL; 6039 int ro; 6040 6041 if (!capable(CAP_SYS_ADMIN)) 6042 return -EACCES; 6043 6044 /* 6045 * Commands dealing with the RAID driver but not any 6046 * particular array: 6047 */ 6048 switch (cmd) 6049 { 6050 case RAID_VERSION: 6051 err = get_version(argp); 6052 goto done; 6053 6054 case PRINT_RAID_DEBUG: 6055 err = 0; 6056 md_print_devices(); 6057 goto done; 6058 6059 #ifndef MODULE 6060 case RAID_AUTORUN: 6061 err = 0; 6062 autostart_arrays(arg); 6063 goto done; 6064 #endif 6065 default:; 6066 } 6067 6068 /* 6069 * Commands creating/starting a new array: 6070 */ 6071 6072 mddev = bdev->bd_disk->private_data; 6073 6074 if (!mddev) { 6075 BUG(); 6076 goto abort; 6077 } 6078 6079 err = mddev_lock(mddev); 6080 if (err) { 6081 printk(KERN_INFO 6082 "md: ioctl lock interrupted, reason %d, cmd %d\n", 6083 err, cmd); 6084 goto abort; 6085 } 6086 6087 switch (cmd) 6088 { 6089 case SET_ARRAY_INFO: 6090 { 6091 mdu_array_info_t info; 6092 if (!arg) 6093 memset(&info, 0, sizeof(info)); 6094 else if (copy_from_user(&info, argp, sizeof(info))) { 6095 err = -EFAULT; 6096 goto abort_unlock; 6097 } 6098 if (mddev->pers) { 6099 err = update_array_info(mddev, &info); 6100 if (err) { 6101 printk(KERN_WARNING "md: couldn't update" 6102 " array info. %d\n", err); 6103 goto abort_unlock; 6104 } 6105 goto done_unlock; 6106 } 6107 if (!list_empty(&mddev->disks)) { 6108 printk(KERN_WARNING 6109 "md: array %s already has disks!\n", 6110 mdname(mddev)); 6111 err = -EBUSY; 6112 goto abort_unlock; 6113 } 6114 if (mddev->raid_disks) { 6115 printk(KERN_WARNING 6116 "md: array %s already initialised!\n", 6117 mdname(mddev)); 6118 err = -EBUSY; 6119 goto abort_unlock; 6120 } 6121 err = set_array_info(mddev, &info); 6122 if (err) { 6123 printk(KERN_WARNING "md: couldn't set" 6124 " array info. %d\n", err); 6125 goto abort_unlock; 6126 } 6127 } 6128 goto done_unlock; 6129 6130 default:; 6131 } 6132 6133 /* 6134 * Commands querying/configuring an existing array: 6135 */ 6136 /* if we are not initialised yet, only ADD_NEW_DISK, STOP_ARRAY, 6137 * RUN_ARRAY, and GET_ and SET_BITMAP_FILE are allowed */ 6138 if ((!mddev->raid_disks && !mddev->external) 6139 && cmd != ADD_NEW_DISK && cmd != STOP_ARRAY 6140 && cmd != RUN_ARRAY && cmd != SET_BITMAP_FILE 6141 && cmd != GET_BITMAP_FILE) { 6142 err = -ENODEV; 6143 goto abort_unlock; 6144 } 6145 6146 /* 6147 * Commands even a read-only array can execute: 6148 */ 6149 switch (cmd) 6150 { 6151 case GET_ARRAY_INFO: 6152 err = get_array_info(mddev, argp); 6153 goto done_unlock; 6154 6155 case GET_BITMAP_FILE: 6156 err = get_bitmap_file(mddev, argp); 6157 goto done_unlock; 6158 6159 case GET_DISK_INFO: 6160 err = get_disk_info(mddev, argp); 6161 goto done_unlock; 6162 6163 case RESTART_ARRAY_RW: 6164 err = restart_array(mddev); 6165 goto done_unlock; 6166 6167 case STOP_ARRAY: 6168 err = do_md_stop(mddev, 0, 1); 6169 goto done_unlock; 6170 6171 case STOP_ARRAY_RO: 6172 err = md_set_readonly(mddev, 1); 6173 goto done_unlock; 6174 6175 case BLKROSET: 6176 if (get_user(ro, (int __user *)(arg))) { 6177 err = -EFAULT; 6178 goto done_unlock; 6179 } 6180 err = -EINVAL; 6181 6182 /* if the bdev is going readonly the value of mddev->ro 6183 * does not matter, no writes are coming 6184 */ 6185 if (ro) 6186 goto done_unlock; 6187 6188 /* are we are already prepared for writes? */ 6189 if (mddev->ro != 1) 6190 goto done_unlock; 6191 6192 /* transitioning to readauto need only happen for 6193 * arrays that call md_write_start 6194 */ 6195 if (mddev->pers) { 6196 err = restart_array(mddev); 6197 if (err == 0) { 6198 mddev->ro = 2; 6199 set_disk_ro(mddev->gendisk, 0); 6200 } 6201 } 6202 goto done_unlock; 6203 } 6204 6205 /* 6206 * The remaining ioctls are changing the state of the 6207 * superblock, so we do not allow them on read-only arrays. 6208 * However non-MD ioctls (e.g. get-size) will still come through 6209 * here and hit the 'default' below, so only disallow 6210 * 'md' ioctls, and switch to rw mode if started auto-readonly. 6211 */ 6212 if (_IOC_TYPE(cmd) == MD_MAJOR && mddev->ro && mddev->pers) { 6213 if (mddev->ro == 2) { 6214 mddev->ro = 0; 6215 sysfs_notify_dirent_safe(mddev->sysfs_state); 6216 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6217 md_wakeup_thread(mddev->thread); 6218 } else { 6219 err = -EROFS; 6220 goto abort_unlock; 6221 } 6222 } 6223 6224 switch (cmd) 6225 { 6226 case ADD_NEW_DISK: 6227 { 6228 mdu_disk_info_t info; 6229 if (copy_from_user(&info, argp, sizeof(info))) 6230 err = -EFAULT; 6231 else 6232 err = add_new_disk(mddev, &info); 6233 goto done_unlock; 6234 } 6235 6236 case HOT_REMOVE_DISK: 6237 err = hot_remove_disk(mddev, new_decode_dev(arg)); 6238 goto done_unlock; 6239 6240 case HOT_ADD_DISK: 6241 err = hot_add_disk(mddev, new_decode_dev(arg)); 6242 goto done_unlock; 6243 6244 case SET_DISK_FAULTY: 6245 err = set_disk_faulty(mddev, new_decode_dev(arg)); 6246 goto done_unlock; 6247 6248 case RUN_ARRAY: 6249 err = do_md_run(mddev); 6250 goto done_unlock; 6251 6252 case SET_BITMAP_FILE: 6253 err = set_bitmap_file(mddev, (int)arg); 6254 goto done_unlock; 6255 6256 default: 6257 err = -EINVAL; 6258 goto abort_unlock; 6259 } 6260 6261 done_unlock: 6262 abort_unlock: 6263 if (mddev->hold_active == UNTIL_IOCTL && 6264 err != -EINVAL) 6265 mddev->hold_active = 0; 6266 mddev_unlock(mddev); 6267 6268 return err; 6269 done: 6270 if (err) 6271 MD_BUG(); 6272 abort: 6273 return err; 6274 } 6275 #ifdef CONFIG_COMPAT 6276 static int md_compat_ioctl(struct block_device *bdev, fmode_t mode, 6277 unsigned int cmd, unsigned long arg) 6278 { 6279 switch (cmd) { 6280 case HOT_REMOVE_DISK: 6281 case HOT_ADD_DISK: 6282 case SET_DISK_FAULTY: 6283 case SET_BITMAP_FILE: 6284 /* These take in integer arg, do not convert */ 6285 break; 6286 default: 6287 arg = (unsigned long)compat_ptr(arg); 6288 break; 6289 } 6290 6291 return md_ioctl(bdev, mode, cmd, arg); 6292 } 6293 #endif /* CONFIG_COMPAT */ 6294 6295 static int md_open(struct block_device *bdev, fmode_t mode) 6296 { 6297 /* 6298 * Succeed if we can lock the mddev, which confirms that 6299 * it isn't being stopped right now. 6300 */ 6301 mddev_t *mddev = mddev_find(bdev->bd_dev); 6302 int err; 6303 6304 if (mddev->gendisk != bdev->bd_disk) { 6305 /* we are racing with mddev_put which is discarding this 6306 * bd_disk. 6307 */ 6308 mddev_put(mddev); 6309 /* Wait until bdev->bd_disk is definitely gone */ 6310 flush_workqueue(md_misc_wq); 6311 /* Then retry the open from the top */ 6312 return -ERESTARTSYS; 6313 } 6314 BUG_ON(mddev != bdev->bd_disk->private_data); 6315 6316 if ((err = mutex_lock_interruptible(&mddev->open_mutex))) 6317 goto out; 6318 6319 err = 0; 6320 atomic_inc(&mddev->openers); 6321 mutex_unlock(&mddev->open_mutex); 6322 6323 check_disk_change(bdev); 6324 out: 6325 return err; 6326 } 6327 6328 static int md_release(struct gendisk *disk, fmode_t mode) 6329 { 6330 mddev_t *mddev = disk->private_data; 6331 6332 BUG_ON(!mddev); 6333 atomic_dec(&mddev->openers); 6334 mddev_put(mddev); 6335 6336 return 0; 6337 } 6338 6339 static int md_media_changed(struct gendisk *disk) 6340 { 6341 mddev_t *mddev = disk->private_data; 6342 6343 return mddev->changed; 6344 } 6345 6346 static int md_revalidate(struct gendisk *disk) 6347 { 6348 mddev_t *mddev = disk->private_data; 6349 6350 mddev->changed = 0; 6351 return 0; 6352 } 6353 static const struct block_device_operations md_fops = 6354 { 6355 .owner = THIS_MODULE, 6356 .open = md_open, 6357 .release = md_release, 6358 .ioctl = md_ioctl, 6359 #ifdef CONFIG_COMPAT 6360 .compat_ioctl = md_compat_ioctl, 6361 #endif 6362 .getgeo = md_getgeo, 6363 .media_changed = md_media_changed, 6364 .revalidate_disk= md_revalidate, 6365 }; 6366 6367 static int md_thread(void * arg) 6368 { 6369 mdk_thread_t *thread = arg; 6370 6371 /* 6372 * md_thread is a 'system-thread', it's priority should be very 6373 * high. We avoid resource deadlocks individually in each 6374 * raid personality. (RAID5 does preallocation) We also use RR and 6375 * the very same RT priority as kswapd, thus we will never get 6376 * into a priority inversion deadlock. 6377 * 6378 * we definitely have to have equal or higher priority than 6379 * bdflush, otherwise bdflush will deadlock if there are too 6380 * many dirty RAID5 blocks. 6381 */ 6382 6383 allow_signal(SIGKILL); 6384 while (!kthread_should_stop()) { 6385 6386 /* We need to wait INTERRUPTIBLE so that 6387 * we don't add to the load-average. 6388 * That means we need to be sure no signals are 6389 * pending 6390 */ 6391 if (signal_pending(current)) 6392 flush_signals(current); 6393 6394 wait_event_interruptible_timeout 6395 (thread->wqueue, 6396 test_bit(THREAD_WAKEUP, &thread->flags) 6397 || kthread_should_stop(), 6398 thread->timeout); 6399 6400 clear_bit(THREAD_WAKEUP, &thread->flags); 6401 if (!kthread_should_stop()) 6402 thread->run(thread->mddev); 6403 } 6404 6405 return 0; 6406 } 6407 6408 void md_wakeup_thread(mdk_thread_t *thread) 6409 { 6410 if (thread) { 6411 dprintk("md: waking up MD thread %s.\n", thread->tsk->comm); 6412 set_bit(THREAD_WAKEUP, &thread->flags); 6413 wake_up(&thread->wqueue); 6414 } 6415 } 6416 6417 mdk_thread_t *md_register_thread(void (*run) (mddev_t *), mddev_t *mddev, 6418 const char *name) 6419 { 6420 mdk_thread_t *thread; 6421 6422 thread = kzalloc(sizeof(mdk_thread_t), GFP_KERNEL); 6423 if (!thread) 6424 return NULL; 6425 6426 init_waitqueue_head(&thread->wqueue); 6427 6428 thread->run = run; 6429 thread->mddev = mddev; 6430 thread->timeout = MAX_SCHEDULE_TIMEOUT; 6431 thread->tsk = kthread_run(md_thread, thread, 6432 "%s_%s", 6433 mdname(thread->mddev), 6434 name ?: mddev->pers->name); 6435 if (IS_ERR(thread->tsk)) { 6436 kfree(thread); 6437 return NULL; 6438 } 6439 return thread; 6440 } 6441 6442 void md_unregister_thread(mdk_thread_t **threadp) 6443 { 6444 mdk_thread_t *thread = *threadp; 6445 if (!thread) 6446 return; 6447 dprintk("interrupting MD-thread pid %d\n", task_pid_nr(thread->tsk)); 6448 /* Locking ensures that mddev_unlock does not wake_up a 6449 * non-existent thread 6450 */ 6451 spin_lock(&pers_lock); 6452 *threadp = NULL; 6453 spin_unlock(&pers_lock); 6454 6455 kthread_stop(thread->tsk); 6456 kfree(thread); 6457 } 6458 6459 void md_error(mddev_t *mddev, mdk_rdev_t *rdev) 6460 { 6461 if (!mddev) { 6462 MD_BUG(); 6463 return; 6464 } 6465 6466 if (!rdev || test_bit(Faulty, &rdev->flags)) 6467 return; 6468 6469 if (!mddev->pers || !mddev->pers->error_handler) 6470 return; 6471 mddev->pers->error_handler(mddev,rdev); 6472 if (mddev->degraded) 6473 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 6474 sysfs_notify_dirent_safe(rdev->sysfs_state); 6475 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 6476 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6477 md_wakeup_thread(mddev->thread); 6478 if (mddev->event_work.func) 6479 queue_work(md_misc_wq, &mddev->event_work); 6480 md_new_event_inintr(mddev); 6481 } 6482 6483 /* seq_file implementation /proc/mdstat */ 6484 6485 static void status_unused(struct seq_file *seq) 6486 { 6487 int i = 0; 6488 mdk_rdev_t *rdev; 6489 6490 seq_printf(seq, "unused devices: "); 6491 6492 list_for_each_entry(rdev, &pending_raid_disks, same_set) { 6493 char b[BDEVNAME_SIZE]; 6494 i++; 6495 seq_printf(seq, "%s ", 6496 bdevname(rdev->bdev,b)); 6497 } 6498 if (!i) 6499 seq_printf(seq, "<none>"); 6500 6501 seq_printf(seq, "\n"); 6502 } 6503 6504 6505 static void status_resync(struct seq_file *seq, mddev_t * mddev) 6506 { 6507 sector_t max_sectors, resync, res; 6508 unsigned long dt, db; 6509 sector_t rt; 6510 int scale; 6511 unsigned int per_milli; 6512 6513 resync = mddev->curr_resync - atomic_read(&mddev->recovery_active); 6514 6515 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 6516 max_sectors = mddev->resync_max_sectors; 6517 else 6518 max_sectors = mddev->dev_sectors; 6519 6520 /* 6521 * Should not happen. 6522 */ 6523 if (!max_sectors) { 6524 MD_BUG(); 6525 return; 6526 } 6527 /* Pick 'scale' such that (resync>>scale)*1000 will fit 6528 * in a sector_t, and (max_sectors>>scale) will fit in a 6529 * u32, as those are the requirements for sector_div. 6530 * Thus 'scale' must be at least 10 6531 */ 6532 scale = 10; 6533 if (sizeof(sector_t) > sizeof(unsigned long)) { 6534 while ( max_sectors/2 > (1ULL<<(scale+32))) 6535 scale++; 6536 } 6537 res = (resync>>scale)*1000; 6538 sector_div(res, (u32)((max_sectors>>scale)+1)); 6539 6540 per_milli = res; 6541 { 6542 int i, x = per_milli/50, y = 20-x; 6543 seq_printf(seq, "["); 6544 for (i = 0; i < x; i++) 6545 seq_printf(seq, "="); 6546 seq_printf(seq, ">"); 6547 for (i = 0; i < y; i++) 6548 seq_printf(seq, "."); 6549 seq_printf(seq, "] "); 6550 } 6551 seq_printf(seq, " %s =%3u.%u%% (%llu/%llu)", 6552 (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)? 6553 "reshape" : 6554 (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)? 6555 "check" : 6556 (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) ? 6557 "resync" : "recovery"))), 6558 per_milli/10, per_milli % 10, 6559 (unsigned long long) resync/2, 6560 (unsigned long long) max_sectors/2); 6561 6562 /* 6563 * dt: time from mark until now 6564 * db: blocks written from mark until now 6565 * rt: remaining time 6566 * 6567 * rt is a sector_t, so could be 32bit or 64bit. 6568 * So we divide before multiply in case it is 32bit and close 6569 * to the limit. 6570 * We scale the divisor (db) by 32 to avoid losing precision 6571 * near the end of resync when the number of remaining sectors 6572 * is close to 'db'. 6573 * We then divide rt by 32 after multiplying by db to compensate. 6574 * The '+1' avoids division by zero if db is very small. 6575 */ 6576 dt = ((jiffies - mddev->resync_mark) / HZ); 6577 if (!dt) dt++; 6578 db = (mddev->curr_mark_cnt - atomic_read(&mddev->recovery_active)) 6579 - mddev->resync_mark_cnt; 6580 6581 rt = max_sectors - resync; /* number of remaining sectors */ 6582 sector_div(rt, db/32+1); 6583 rt *= dt; 6584 rt >>= 5; 6585 6586 seq_printf(seq, " finish=%lu.%lumin", (unsigned long)rt / 60, 6587 ((unsigned long)rt % 60)/6); 6588 6589 seq_printf(seq, " speed=%ldK/sec", db/2/dt); 6590 } 6591 6592 static void *md_seq_start(struct seq_file *seq, loff_t *pos) 6593 { 6594 struct list_head *tmp; 6595 loff_t l = *pos; 6596 mddev_t *mddev; 6597 6598 if (l >= 0x10000) 6599 return NULL; 6600 if (!l--) 6601 /* header */ 6602 return (void*)1; 6603 6604 spin_lock(&all_mddevs_lock); 6605 list_for_each(tmp,&all_mddevs) 6606 if (!l--) { 6607 mddev = list_entry(tmp, mddev_t, all_mddevs); 6608 mddev_get(mddev); 6609 spin_unlock(&all_mddevs_lock); 6610 return mddev; 6611 } 6612 spin_unlock(&all_mddevs_lock); 6613 if (!l--) 6614 return (void*)2;/* tail */ 6615 return NULL; 6616 } 6617 6618 static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos) 6619 { 6620 struct list_head *tmp; 6621 mddev_t *next_mddev, *mddev = v; 6622 6623 ++*pos; 6624 if (v == (void*)2) 6625 return NULL; 6626 6627 spin_lock(&all_mddevs_lock); 6628 if (v == (void*)1) 6629 tmp = all_mddevs.next; 6630 else 6631 tmp = mddev->all_mddevs.next; 6632 if (tmp != &all_mddevs) 6633 next_mddev = mddev_get(list_entry(tmp,mddev_t,all_mddevs)); 6634 else { 6635 next_mddev = (void*)2; 6636 *pos = 0x10000; 6637 } 6638 spin_unlock(&all_mddevs_lock); 6639 6640 if (v != (void*)1) 6641 mddev_put(mddev); 6642 return next_mddev; 6643 6644 } 6645 6646 static void md_seq_stop(struct seq_file *seq, void *v) 6647 { 6648 mddev_t *mddev = v; 6649 6650 if (mddev && v != (void*)1 && v != (void*)2) 6651 mddev_put(mddev); 6652 } 6653 6654 static int md_seq_show(struct seq_file *seq, void *v) 6655 { 6656 mddev_t *mddev = v; 6657 sector_t sectors; 6658 mdk_rdev_t *rdev; 6659 struct bitmap *bitmap; 6660 6661 if (v == (void*)1) { 6662 struct mdk_personality *pers; 6663 seq_printf(seq, "Personalities : "); 6664 spin_lock(&pers_lock); 6665 list_for_each_entry(pers, &pers_list, list) 6666 seq_printf(seq, "[%s] ", pers->name); 6667 6668 spin_unlock(&pers_lock); 6669 seq_printf(seq, "\n"); 6670 seq->poll_event = atomic_read(&md_event_count); 6671 return 0; 6672 } 6673 if (v == (void*)2) { 6674 status_unused(seq); 6675 return 0; 6676 } 6677 6678 if (mddev_lock(mddev) < 0) 6679 return -EINTR; 6680 6681 if (mddev->pers || mddev->raid_disks || !list_empty(&mddev->disks)) { 6682 seq_printf(seq, "%s : %sactive", mdname(mddev), 6683 mddev->pers ? "" : "in"); 6684 if (mddev->pers) { 6685 if (mddev->ro==1) 6686 seq_printf(seq, " (read-only)"); 6687 if (mddev->ro==2) 6688 seq_printf(seq, " (auto-read-only)"); 6689 seq_printf(seq, " %s", mddev->pers->name); 6690 } 6691 6692 sectors = 0; 6693 list_for_each_entry(rdev, &mddev->disks, same_set) { 6694 char b[BDEVNAME_SIZE]; 6695 seq_printf(seq, " %s[%d]", 6696 bdevname(rdev->bdev,b), rdev->desc_nr); 6697 if (test_bit(WriteMostly, &rdev->flags)) 6698 seq_printf(seq, "(W)"); 6699 if (test_bit(Faulty, &rdev->flags)) { 6700 seq_printf(seq, "(F)"); 6701 continue; 6702 } else if (rdev->raid_disk < 0) 6703 seq_printf(seq, "(S)"); /* spare */ 6704 sectors += rdev->sectors; 6705 } 6706 6707 if (!list_empty(&mddev->disks)) { 6708 if (mddev->pers) 6709 seq_printf(seq, "\n %llu blocks", 6710 (unsigned long long) 6711 mddev->array_sectors / 2); 6712 else 6713 seq_printf(seq, "\n %llu blocks", 6714 (unsigned long long)sectors / 2); 6715 } 6716 if (mddev->persistent) { 6717 if (mddev->major_version != 0 || 6718 mddev->minor_version != 90) { 6719 seq_printf(seq," super %d.%d", 6720 mddev->major_version, 6721 mddev->minor_version); 6722 } 6723 } else if (mddev->external) 6724 seq_printf(seq, " super external:%s", 6725 mddev->metadata_type); 6726 else 6727 seq_printf(seq, " super non-persistent"); 6728 6729 if (mddev->pers) { 6730 mddev->pers->status(seq, mddev); 6731 seq_printf(seq, "\n "); 6732 if (mddev->pers->sync_request) { 6733 if (mddev->curr_resync > 2) { 6734 status_resync(seq, mddev); 6735 seq_printf(seq, "\n "); 6736 } else if (mddev->curr_resync == 1 || mddev->curr_resync == 2) 6737 seq_printf(seq, "\tresync=DELAYED\n "); 6738 else if (mddev->recovery_cp < MaxSector) 6739 seq_printf(seq, "\tresync=PENDING\n "); 6740 } 6741 } else 6742 seq_printf(seq, "\n "); 6743 6744 if ((bitmap = mddev->bitmap)) { 6745 unsigned long chunk_kb; 6746 unsigned long flags; 6747 spin_lock_irqsave(&bitmap->lock, flags); 6748 chunk_kb = mddev->bitmap_info.chunksize >> 10; 6749 seq_printf(seq, "bitmap: %lu/%lu pages [%luKB], " 6750 "%lu%s chunk", 6751 bitmap->pages - bitmap->missing_pages, 6752 bitmap->pages, 6753 (bitmap->pages - bitmap->missing_pages) 6754 << (PAGE_SHIFT - 10), 6755 chunk_kb ? chunk_kb : mddev->bitmap_info.chunksize, 6756 chunk_kb ? "KB" : "B"); 6757 if (bitmap->file) { 6758 seq_printf(seq, ", file: "); 6759 seq_path(seq, &bitmap->file->f_path, " \t\n"); 6760 } 6761 6762 seq_printf(seq, "\n"); 6763 spin_unlock_irqrestore(&bitmap->lock, flags); 6764 } 6765 6766 seq_printf(seq, "\n"); 6767 } 6768 mddev_unlock(mddev); 6769 6770 return 0; 6771 } 6772 6773 static const struct seq_operations md_seq_ops = { 6774 .start = md_seq_start, 6775 .next = md_seq_next, 6776 .stop = md_seq_stop, 6777 .show = md_seq_show, 6778 }; 6779 6780 static int md_seq_open(struct inode *inode, struct file *file) 6781 { 6782 struct seq_file *seq; 6783 int error; 6784 6785 error = seq_open(file, &md_seq_ops); 6786 if (error) 6787 return error; 6788 6789 seq = file->private_data; 6790 seq->poll_event = atomic_read(&md_event_count); 6791 return error; 6792 } 6793 6794 static unsigned int mdstat_poll(struct file *filp, poll_table *wait) 6795 { 6796 struct seq_file *seq = filp->private_data; 6797 int mask; 6798 6799 poll_wait(filp, &md_event_waiters, wait); 6800 6801 /* always allow read */ 6802 mask = POLLIN | POLLRDNORM; 6803 6804 if (seq->poll_event != atomic_read(&md_event_count)) 6805 mask |= POLLERR | POLLPRI; 6806 return mask; 6807 } 6808 6809 static const struct file_operations md_seq_fops = { 6810 .owner = THIS_MODULE, 6811 .open = md_seq_open, 6812 .read = seq_read, 6813 .llseek = seq_lseek, 6814 .release = seq_release_private, 6815 .poll = mdstat_poll, 6816 }; 6817 6818 int register_md_personality(struct mdk_personality *p) 6819 { 6820 spin_lock(&pers_lock); 6821 list_add_tail(&p->list, &pers_list); 6822 printk(KERN_INFO "md: %s personality registered for level %d\n", p->name, p->level); 6823 spin_unlock(&pers_lock); 6824 return 0; 6825 } 6826 6827 int unregister_md_personality(struct mdk_personality *p) 6828 { 6829 printk(KERN_INFO "md: %s personality unregistered\n", p->name); 6830 spin_lock(&pers_lock); 6831 list_del_init(&p->list); 6832 spin_unlock(&pers_lock); 6833 return 0; 6834 } 6835 6836 static int is_mddev_idle(mddev_t *mddev, int init) 6837 { 6838 mdk_rdev_t * rdev; 6839 int idle; 6840 int curr_events; 6841 6842 idle = 1; 6843 rcu_read_lock(); 6844 rdev_for_each_rcu(rdev, mddev) { 6845 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk; 6846 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) + 6847 (int)part_stat_read(&disk->part0, sectors[1]) - 6848 atomic_read(&disk->sync_io); 6849 /* sync IO will cause sync_io to increase before the disk_stats 6850 * as sync_io is counted when a request starts, and 6851 * disk_stats is counted when it completes. 6852 * So resync activity will cause curr_events to be smaller than 6853 * when there was no such activity. 6854 * non-sync IO will cause disk_stat to increase without 6855 * increasing sync_io so curr_events will (eventually) 6856 * be larger than it was before. Once it becomes 6857 * substantially larger, the test below will cause 6858 * the array to appear non-idle, and resync will slow 6859 * down. 6860 * If there is a lot of outstanding resync activity when 6861 * we set last_event to curr_events, then all that activity 6862 * completing might cause the array to appear non-idle 6863 * and resync will be slowed down even though there might 6864 * not have been non-resync activity. This will only 6865 * happen once though. 'last_events' will soon reflect 6866 * the state where there is little or no outstanding 6867 * resync requests, and further resync activity will 6868 * always make curr_events less than last_events. 6869 * 6870 */ 6871 if (init || curr_events - rdev->last_events > 64) { 6872 rdev->last_events = curr_events; 6873 idle = 0; 6874 } 6875 } 6876 rcu_read_unlock(); 6877 return idle; 6878 } 6879 6880 void md_done_sync(mddev_t *mddev, int blocks, int ok) 6881 { 6882 /* another "blocks" (512byte) blocks have been synced */ 6883 atomic_sub(blocks, &mddev->recovery_active); 6884 wake_up(&mddev->recovery_wait); 6885 if (!ok) { 6886 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 6887 md_wakeup_thread(mddev->thread); 6888 // stop recovery, signal do_sync .... 6889 } 6890 } 6891 6892 6893 /* md_write_start(mddev, bi) 6894 * If we need to update some array metadata (e.g. 'active' flag 6895 * in superblock) before writing, schedule a superblock update 6896 * and wait for it to complete. 6897 */ 6898 void md_write_start(mddev_t *mddev, struct bio *bi) 6899 { 6900 int did_change = 0; 6901 if (bio_data_dir(bi) != WRITE) 6902 return; 6903 6904 BUG_ON(mddev->ro == 1); 6905 if (mddev->ro == 2) { 6906 /* need to switch to read/write */ 6907 mddev->ro = 0; 6908 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 6909 md_wakeup_thread(mddev->thread); 6910 md_wakeup_thread(mddev->sync_thread); 6911 did_change = 1; 6912 } 6913 atomic_inc(&mddev->writes_pending); 6914 if (mddev->safemode == 1) 6915 mddev->safemode = 0; 6916 if (mddev->in_sync) { 6917 spin_lock_irq(&mddev->write_lock); 6918 if (mddev->in_sync) { 6919 mddev->in_sync = 0; 6920 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 6921 set_bit(MD_CHANGE_PENDING, &mddev->flags); 6922 md_wakeup_thread(mddev->thread); 6923 did_change = 1; 6924 } 6925 spin_unlock_irq(&mddev->write_lock); 6926 } 6927 if (did_change) 6928 sysfs_notify_dirent_safe(mddev->sysfs_state); 6929 wait_event(mddev->sb_wait, 6930 !test_bit(MD_CHANGE_PENDING, &mddev->flags)); 6931 } 6932 6933 void md_write_end(mddev_t *mddev) 6934 { 6935 if (atomic_dec_and_test(&mddev->writes_pending)) { 6936 if (mddev->safemode == 2) 6937 md_wakeup_thread(mddev->thread); 6938 else if (mddev->safemode_delay) 6939 mod_timer(&mddev->safemode_timer, jiffies + mddev->safemode_delay); 6940 } 6941 } 6942 6943 /* md_allow_write(mddev) 6944 * Calling this ensures that the array is marked 'active' so that writes 6945 * may proceed without blocking. It is important to call this before 6946 * attempting a GFP_KERNEL allocation while holding the mddev lock. 6947 * Must be called with mddev_lock held. 6948 * 6949 * In the ->external case MD_CHANGE_CLEAN can not be cleared until mddev->lock 6950 * is dropped, so return -EAGAIN after notifying userspace. 6951 */ 6952 int md_allow_write(mddev_t *mddev) 6953 { 6954 if (!mddev->pers) 6955 return 0; 6956 if (mddev->ro) 6957 return 0; 6958 if (!mddev->pers->sync_request) 6959 return 0; 6960 6961 spin_lock_irq(&mddev->write_lock); 6962 if (mddev->in_sync) { 6963 mddev->in_sync = 0; 6964 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 6965 set_bit(MD_CHANGE_PENDING, &mddev->flags); 6966 if (mddev->safemode_delay && 6967 mddev->safemode == 0) 6968 mddev->safemode = 1; 6969 spin_unlock_irq(&mddev->write_lock); 6970 md_update_sb(mddev, 0); 6971 sysfs_notify_dirent_safe(mddev->sysfs_state); 6972 } else 6973 spin_unlock_irq(&mddev->write_lock); 6974 6975 if (test_bit(MD_CHANGE_PENDING, &mddev->flags)) 6976 return -EAGAIN; 6977 else 6978 return 0; 6979 } 6980 EXPORT_SYMBOL_GPL(md_allow_write); 6981 6982 #define SYNC_MARKS 10 6983 #define SYNC_MARK_STEP (3*HZ) 6984 void md_do_sync(mddev_t *mddev) 6985 { 6986 mddev_t *mddev2; 6987 unsigned int currspeed = 0, 6988 window; 6989 sector_t max_sectors,j, io_sectors; 6990 unsigned long mark[SYNC_MARKS]; 6991 sector_t mark_cnt[SYNC_MARKS]; 6992 int last_mark,m; 6993 struct list_head *tmp; 6994 sector_t last_check; 6995 int skipped = 0; 6996 mdk_rdev_t *rdev; 6997 char *desc; 6998 6999 /* just incase thread restarts... */ 7000 if (test_bit(MD_RECOVERY_DONE, &mddev->recovery)) 7001 return; 7002 if (mddev->ro) /* never try to sync a read-only array */ 7003 return; 7004 7005 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 7006 if (test_bit(MD_RECOVERY_CHECK, &mddev->recovery)) 7007 desc = "data-check"; 7008 else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 7009 desc = "requested-resync"; 7010 else 7011 desc = "resync"; 7012 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 7013 desc = "reshape"; 7014 else 7015 desc = "recovery"; 7016 7017 /* we overload curr_resync somewhat here. 7018 * 0 == not engaged in resync at all 7019 * 2 == checking that there is no conflict with another sync 7020 * 1 == like 2, but have yielded to allow conflicting resync to 7021 * commense 7022 * other == active in resync - this many blocks 7023 * 7024 * Before starting a resync we must have set curr_resync to 7025 * 2, and then checked that every "conflicting" array has curr_resync 7026 * less than ours. When we find one that is the same or higher 7027 * we wait on resync_wait. To avoid deadlock, we reduce curr_resync 7028 * to 1 if we choose to yield (based arbitrarily on address of mddev structure). 7029 * This will mean we have to start checking from the beginning again. 7030 * 7031 */ 7032 7033 do { 7034 mddev->curr_resync = 2; 7035 7036 try_again: 7037 if (kthread_should_stop()) 7038 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 7039 7040 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 7041 goto skip; 7042 for_each_mddev(mddev2, tmp) { 7043 if (mddev2 == mddev) 7044 continue; 7045 if (!mddev->parallel_resync 7046 && mddev2->curr_resync 7047 && match_mddev_units(mddev, mddev2)) { 7048 DEFINE_WAIT(wq); 7049 if (mddev < mddev2 && mddev->curr_resync == 2) { 7050 /* arbitrarily yield */ 7051 mddev->curr_resync = 1; 7052 wake_up(&resync_wait); 7053 } 7054 if (mddev > mddev2 && mddev->curr_resync == 1) 7055 /* no need to wait here, we can wait the next 7056 * time 'round when curr_resync == 2 7057 */ 7058 continue; 7059 /* We need to wait 'interruptible' so as not to 7060 * contribute to the load average, and not to 7061 * be caught by 'softlockup' 7062 */ 7063 prepare_to_wait(&resync_wait, &wq, TASK_INTERRUPTIBLE); 7064 if (!kthread_should_stop() && 7065 mddev2->curr_resync >= mddev->curr_resync) { 7066 printk(KERN_INFO "md: delaying %s of %s" 7067 " until %s has finished (they" 7068 " share one or more physical units)\n", 7069 desc, mdname(mddev), mdname(mddev2)); 7070 mddev_put(mddev2); 7071 if (signal_pending(current)) 7072 flush_signals(current); 7073 schedule(); 7074 finish_wait(&resync_wait, &wq); 7075 goto try_again; 7076 } 7077 finish_wait(&resync_wait, &wq); 7078 } 7079 } 7080 } while (mddev->curr_resync < 2); 7081 7082 j = 0; 7083 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 7084 /* resync follows the size requested by the personality, 7085 * which defaults to physical size, but can be virtual size 7086 */ 7087 max_sectors = mddev->resync_max_sectors; 7088 mddev->resync_mismatches = 0; 7089 /* we don't use the checkpoint if there's a bitmap */ 7090 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 7091 j = mddev->resync_min; 7092 else if (!mddev->bitmap) 7093 j = mddev->recovery_cp; 7094 7095 } else if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 7096 max_sectors = mddev->dev_sectors; 7097 else { 7098 /* recovery follows the physical size of devices */ 7099 max_sectors = mddev->dev_sectors; 7100 j = MaxSector; 7101 rcu_read_lock(); 7102 list_for_each_entry_rcu(rdev, &mddev->disks, same_set) 7103 if (rdev->raid_disk >= 0 && 7104 !test_bit(Faulty, &rdev->flags) && 7105 !test_bit(In_sync, &rdev->flags) && 7106 rdev->recovery_offset < j) 7107 j = rdev->recovery_offset; 7108 rcu_read_unlock(); 7109 } 7110 7111 printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev)); 7112 printk(KERN_INFO "md: minimum _guaranteed_ speed:" 7113 " %d KB/sec/disk.\n", speed_min(mddev)); 7114 printk(KERN_INFO "md: using maximum available idle IO bandwidth " 7115 "(but not more than %d KB/sec) for %s.\n", 7116 speed_max(mddev), desc); 7117 7118 is_mddev_idle(mddev, 1); /* this initializes IO event counters */ 7119 7120 io_sectors = 0; 7121 for (m = 0; m < SYNC_MARKS; m++) { 7122 mark[m] = jiffies; 7123 mark_cnt[m] = io_sectors; 7124 } 7125 last_mark = 0; 7126 mddev->resync_mark = mark[last_mark]; 7127 mddev->resync_mark_cnt = mark_cnt[last_mark]; 7128 7129 /* 7130 * Tune reconstruction: 7131 */ 7132 window = 32*(PAGE_SIZE/512); 7133 printk(KERN_INFO "md: using %dk window, over a total of %lluk.\n", 7134 window/2, (unsigned long long)max_sectors/2); 7135 7136 atomic_set(&mddev->recovery_active, 0); 7137 last_check = 0; 7138 7139 if (j>2) { 7140 printk(KERN_INFO 7141 "md: resuming %s of %s from checkpoint.\n", 7142 desc, mdname(mddev)); 7143 mddev->curr_resync = j; 7144 } 7145 mddev->curr_resync_completed = j; 7146 7147 while (j < max_sectors) { 7148 sector_t sectors; 7149 7150 skipped = 0; 7151 7152 if (!test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 7153 ((mddev->curr_resync > mddev->curr_resync_completed && 7154 (mddev->curr_resync - mddev->curr_resync_completed) 7155 > (max_sectors >> 4)) || 7156 (j - mddev->curr_resync_completed)*2 7157 >= mddev->resync_max - mddev->curr_resync_completed 7158 )) { 7159 /* time to update curr_resync_completed */ 7160 wait_event(mddev->recovery_wait, 7161 atomic_read(&mddev->recovery_active) == 0); 7162 mddev->curr_resync_completed = j; 7163 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 7164 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 7165 } 7166 7167 while (j >= mddev->resync_max && !kthread_should_stop()) { 7168 /* As this condition is controlled by user-space, 7169 * we can block indefinitely, so use '_interruptible' 7170 * to avoid triggering warnings. 7171 */ 7172 flush_signals(current); /* just in case */ 7173 wait_event_interruptible(mddev->recovery_wait, 7174 mddev->resync_max > j 7175 || kthread_should_stop()); 7176 } 7177 7178 if (kthread_should_stop()) 7179 goto interrupted; 7180 7181 sectors = mddev->pers->sync_request(mddev, j, &skipped, 7182 currspeed < speed_min(mddev)); 7183 if (sectors == 0) { 7184 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 7185 goto out; 7186 } 7187 7188 if (!skipped) { /* actual IO requested */ 7189 io_sectors += sectors; 7190 atomic_add(sectors, &mddev->recovery_active); 7191 } 7192 7193 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 7194 break; 7195 7196 j += sectors; 7197 if (j>1) mddev->curr_resync = j; 7198 mddev->curr_mark_cnt = io_sectors; 7199 if (last_check == 0) 7200 /* this is the earliest that rebuild will be 7201 * visible in /proc/mdstat 7202 */ 7203 md_new_event(mddev); 7204 7205 if (last_check + window > io_sectors || j == max_sectors) 7206 continue; 7207 7208 last_check = io_sectors; 7209 repeat: 7210 if (time_after_eq(jiffies, mark[last_mark] + SYNC_MARK_STEP )) { 7211 /* step marks */ 7212 int next = (last_mark+1) % SYNC_MARKS; 7213 7214 mddev->resync_mark = mark[next]; 7215 mddev->resync_mark_cnt = mark_cnt[next]; 7216 mark[next] = jiffies; 7217 mark_cnt[next] = io_sectors - atomic_read(&mddev->recovery_active); 7218 last_mark = next; 7219 } 7220 7221 7222 if (kthread_should_stop()) 7223 goto interrupted; 7224 7225 7226 /* 7227 * this loop exits only if either when we are slower than 7228 * the 'hard' speed limit, or the system was IO-idle for 7229 * a jiffy. 7230 * the system might be non-idle CPU-wise, but we only care 7231 * about not overloading the IO subsystem. (things like an 7232 * e2fsck being done on the RAID array should execute fast) 7233 */ 7234 cond_resched(); 7235 7236 currspeed = ((unsigned long)(io_sectors-mddev->resync_mark_cnt))/2 7237 /((jiffies-mddev->resync_mark)/HZ +1) +1; 7238 7239 if (currspeed > speed_min(mddev)) { 7240 if ((currspeed > speed_max(mddev)) || 7241 !is_mddev_idle(mddev, 0)) { 7242 msleep(500); 7243 goto repeat; 7244 } 7245 } 7246 } 7247 printk(KERN_INFO "md: %s: %s done.\n",mdname(mddev), desc); 7248 /* 7249 * this also signals 'finished resyncing' to md_stop 7250 */ 7251 out: 7252 wait_event(mddev->recovery_wait, !atomic_read(&mddev->recovery_active)); 7253 7254 /* tell personality that we are finished */ 7255 mddev->pers->sync_request(mddev, max_sectors, &skipped, 1); 7256 7257 if (!test_bit(MD_RECOVERY_CHECK, &mddev->recovery) && 7258 mddev->curr_resync > 2) { 7259 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 7260 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 7261 if (mddev->curr_resync >= mddev->recovery_cp) { 7262 printk(KERN_INFO 7263 "md: checkpointing %s of %s.\n", 7264 desc, mdname(mddev)); 7265 mddev->recovery_cp = mddev->curr_resync; 7266 } 7267 } else 7268 mddev->recovery_cp = MaxSector; 7269 } else { 7270 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 7271 mddev->curr_resync = MaxSector; 7272 rcu_read_lock(); 7273 list_for_each_entry_rcu(rdev, &mddev->disks, same_set) 7274 if (rdev->raid_disk >= 0 && 7275 mddev->delta_disks >= 0 && 7276 !test_bit(Faulty, &rdev->flags) && 7277 !test_bit(In_sync, &rdev->flags) && 7278 rdev->recovery_offset < mddev->curr_resync) 7279 rdev->recovery_offset = mddev->curr_resync; 7280 rcu_read_unlock(); 7281 } 7282 } 7283 set_bit(MD_CHANGE_DEVS, &mddev->flags); 7284 7285 skip: 7286 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 7287 /* We completed so min/max setting can be forgotten if used. */ 7288 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 7289 mddev->resync_min = 0; 7290 mddev->resync_max = MaxSector; 7291 } else if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 7292 mddev->resync_min = mddev->curr_resync_completed; 7293 mddev->curr_resync = 0; 7294 wake_up(&resync_wait); 7295 set_bit(MD_RECOVERY_DONE, &mddev->recovery); 7296 md_wakeup_thread(mddev->thread); 7297 return; 7298 7299 interrupted: 7300 /* 7301 * got a signal, exit. 7302 */ 7303 printk(KERN_INFO 7304 "md: md_do_sync() got signal ... exiting\n"); 7305 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 7306 goto out; 7307 7308 } 7309 EXPORT_SYMBOL_GPL(md_do_sync); 7310 7311 static int remove_and_add_spares(mddev_t *mddev) 7312 { 7313 mdk_rdev_t *rdev; 7314 int spares = 0; 7315 7316 mddev->curr_resync_completed = 0; 7317 7318 list_for_each_entry(rdev, &mddev->disks, same_set) 7319 if (rdev->raid_disk >= 0 && 7320 !test_bit(Blocked, &rdev->flags) && 7321 (test_bit(Faulty, &rdev->flags) || 7322 ! test_bit(In_sync, &rdev->flags)) && 7323 atomic_read(&rdev->nr_pending)==0) { 7324 if (mddev->pers->hot_remove_disk( 7325 mddev, rdev->raid_disk)==0) { 7326 sysfs_unlink_rdev(mddev, rdev); 7327 rdev->raid_disk = -1; 7328 } 7329 } 7330 7331 if (mddev->degraded) { 7332 list_for_each_entry(rdev, &mddev->disks, same_set) { 7333 if (rdev->raid_disk >= 0 && 7334 !test_bit(In_sync, &rdev->flags) && 7335 !test_bit(Faulty, &rdev->flags)) 7336 spares++; 7337 if (rdev->raid_disk < 0 7338 && !test_bit(Faulty, &rdev->flags)) { 7339 rdev->recovery_offset = 0; 7340 if (mddev->pers-> 7341 hot_add_disk(mddev, rdev) == 0) { 7342 if (sysfs_link_rdev(mddev, rdev)) 7343 /* failure here is OK */; 7344 spares++; 7345 md_new_event(mddev); 7346 set_bit(MD_CHANGE_DEVS, &mddev->flags); 7347 } else 7348 break; 7349 } 7350 } 7351 } 7352 return spares; 7353 } 7354 7355 static void reap_sync_thread(mddev_t *mddev) 7356 { 7357 mdk_rdev_t *rdev; 7358 7359 /* resync has finished, collect result */ 7360 md_unregister_thread(&mddev->sync_thread); 7361 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery) && 7362 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 7363 /* success...*/ 7364 /* activate any spares */ 7365 if (mddev->pers->spare_active(mddev)) 7366 sysfs_notify(&mddev->kobj, NULL, 7367 "degraded"); 7368 } 7369 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 7370 mddev->pers->finish_reshape) 7371 mddev->pers->finish_reshape(mddev); 7372 md_update_sb(mddev, 1); 7373 7374 /* if array is no-longer degraded, then any saved_raid_disk 7375 * information must be scrapped 7376 */ 7377 if (!mddev->degraded) 7378 list_for_each_entry(rdev, &mddev->disks, same_set) 7379 rdev->saved_raid_disk = -1; 7380 7381 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 7382 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 7383 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 7384 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 7385 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 7386 /* flag recovery needed just to double check */ 7387 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7388 sysfs_notify_dirent_safe(mddev->sysfs_action); 7389 md_new_event(mddev); 7390 if (mddev->event_work.func) 7391 queue_work(md_misc_wq, &mddev->event_work); 7392 } 7393 7394 /* 7395 * This routine is regularly called by all per-raid-array threads to 7396 * deal with generic issues like resync and super-block update. 7397 * Raid personalities that don't have a thread (linear/raid0) do not 7398 * need this as they never do any recovery or update the superblock. 7399 * 7400 * It does not do any resync itself, but rather "forks" off other threads 7401 * to do that as needed. 7402 * When it is determined that resync is needed, we set MD_RECOVERY_RUNNING in 7403 * "->recovery" and create a thread at ->sync_thread. 7404 * When the thread finishes it sets MD_RECOVERY_DONE 7405 * and wakeups up this thread which will reap the thread and finish up. 7406 * This thread also removes any faulty devices (with nr_pending == 0). 7407 * 7408 * The overall approach is: 7409 * 1/ if the superblock needs updating, update it. 7410 * 2/ If a recovery thread is running, don't do anything else. 7411 * 3/ If recovery has finished, clean up, possibly marking spares active. 7412 * 4/ If there are any faulty devices, remove them. 7413 * 5/ If array is degraded, try to add spares devices 7414 * 6/ If array has spares or is not in-sync, start a resync thread. 7415 */ 7416 void md_check_recovery(mddev_t *mddev) 7417 { 7418 if (mddev->suspended) 7419 return; 7420 7421 if (mddev->bitmap) 7422 bitmap_daemon_work(mddev); 7423 7424 if (signal_pending(current)) { 7425 if (mddev->pers->sync_request && !mddev->external) { 7426 printk(KERN_INFO "md: %s in immediate safe mode\n", 7427 mdname(mddev)); 7428 mddev->safemode = 2; 7429 } 7430 flush_signals(current); 7431 } 7432 7433 if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery)) 7434 return; 7435 if ( ! ( 7436 (mddev->flags & ~ (1<<MD_CHANGE_PENDING)) || 7437 test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) || 7438 test_bit(MD_RECOVERY_DONE, &mddev->recovery) || 7439 (mddev->external == 0 && mddev->safemode == 1) || 7440 (mddev->safemode == 2 && ! atomic_read(&mddev->writes_pending) 7441 && !mddev->in_sync && mddev->recovery_cp == MaxSector) 7442 )) 7443 return; 7444 7445 if (mddev_trylock(mddev)) { 7446 int spares = 0; 7447 7448 if (mddev->ro) { 7449 /* Only thing we do on a ro array is remove 7450 * failed devices. 7451 */ 7452 mdk_rdev_t *rdev; 7453 list_for_each_entry(rdev, &mddev->disks, same_set) 7454 if (rdev->raid_disk >= 0 && 7455 !test_bit(Blocked, &rdev->flags) && 7456 test_bit(Faulty, &rdev->flags) && 7457 atomic_read(&rdev->nr_pending)==0) { 7458 if (mddev->pers->hot_remove_disk( 7459 mddev, rdev->raid_disk)==0) { 7460 sysfs_unlink_rdev(mddev, rdev); 7461 rdev->raid_disk = -1; 7462 } 7463 } 7464 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7465 goto unlock; 7466 } 7467 7468 if (!mddev->external) { 7469 int did_change = 0; 7470 spin_lock_irq(&mddev->write_lock); 7471 if (mddev->safemode && 7472 !atomic_read(&mddev->writes_pending) && 7473 !mddev->in_sync && 7474 mddev->recovery_cp == MaxSector) { 7475 mddev->in_sync = 1; 7476 did_change = 1; 7477 set_bit(MD_CHANGE_CLEAN, &mddev->flags); 7478 } 7479 if (mddev->safemode == 1) 7480 mddev->safemode = 0; 7481 spin_unlock_irq(&mddev->write_lock); 7482 if (did_change) 7483 sysfs_notify_dirent_safe(mddev->sysfs_state); 7484 } 7485 7486 if (mddev->flags) 7487 md_update_sb(mddev, 0); 7488 7489 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) && 7490 !test_bit(MD_RECOVERY_DONE, &mddev->recovery)) { 7491 /* resync/recovery still happening */ 7492 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7493 goto unlock; 7494 } 7495 if (mddev->sync_thread) { 7496 reap_sync_thread(mddev); 7497 goto unlock; 7498 } 7499 /* Set RUNNING before clearing NEEDED to avoid 7500 * any transients in the value of "sync_action". 7501 */ 7502 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 7503 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7504 /* Clear some bits that don't mean anything, but 7505 * might be left set 7506 */ 7507 clear_bit(MD_RECOVERY_INTR, &mddev->recovery); 7508 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); 7509 7510 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) 7511 goto unlock; 7512 /* no recovery is running. 7513 * remove any failed drives, then 7514 * add spares if possible. 7515 * Spare are also removed and re-added, to allow 7516 * the personality to fail the re-add. 7517 */ 7518 7519 if (mddev->reshape_position != MaxSector) { 7520 if (mddev->pers->check_reshape == NULL || 7521 mddev->pers->check_reshape(mddev) != 0) 7522 /* Cannot proceed */ 7523 goto unlock; 7524 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 7525 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 7526 } else if ((spares = remove_and_add_spares(mddev))) { 7527 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 7528 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 7529 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 7530 set_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 7531 } else if (mddev->recovery_cp < MaxSector) { 7532 set_bit(MD_RECOVERY_SYNC, &mddev->recovery); 7533 clear_bit(MD_RECOVERY_RECOVER, &mddev->recovery); 7534 } else if (!test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) 7535 /* nothing to be done ... */ 7536 goto unlock; 7537 7538 if (mddev->pers->sync_request) { 7539 if (spares && mddev->bitmap && ! mddev->bitmap->file) { 7540 /* We are adding a device or devices to an array 7541 * which has the bitmap stored on all devices. 7542 * So make sure all bitmap pages get written 7543 */ 7544 bitmap_write_all(mddev->bitmap); 7545 } 7546 mddev->sync_thread = md_register_thread(md_do_sync, 7547 mddev, 7548 "resync"); 7549 if (!mddev->sync_thread) { 7550 printk(KERN_ERR "%s: could not start resync" 7551 " thread...\n", 7552 mdname(mddev)); 7553 /* leave the spares where they are, it shouldn't hurt */ 7554 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 7555 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 7556 clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 7557 clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery); 7558 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 7559 } else 7560 md_wakeup_thread(mddev->sync_thread); 7561 sysfs_notify_dirent_safe(mddev->sysfs_action); 7562 md_new_event(mddev); 7563 } 7564 unlock: 7565 if (!mddev->sync_thread) { 7566 clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 7567 if (test_and_clear_bit(MD_RECOVERY_RECOVER, 7568 &mddev->recovery)) 7569 if (mddev->sysfs_action) 7570 sysfs_notify_dirent_safe(mddev->sysfs_action); 7571 } 7572 mddev_unlock(mddev); 7573 } 7574 } 7575 7576 void md_wait_for_blocked_rdev(mdk_rdev_t *rdev, mddev_t *mddev) 7577 { 7578 sysfs_notify_dirent_safe(rdev->sysfs_state); 7579 wait_event_timeout(rdev->blocked_wait, 7580 !test_bit(Blocked, &rdev->flags) && 7581 !test_bit(BlockedBadBlocks, &rdev->flags), 7582 msecs_to_jiffies(5000)); 7583 rdev_dec_pending(rdev, mddev); 7584 } 7585 EXPORT_SYMBOL(md_wait_for_blocked_rdev); 7586 7587 7588 /* Bad block management. 7589 * We can record which blocks on each device are 'bad' and so just 7590 * fail those blocks, or that stripe, rather than the whole device. 7591 * Entries in the bad-block table are 64bits wide. This comprises: 7592 * Length of bad-range, in sectors: 0-511 for lengths 1-512 7593 * Start of bad-range, sector offset, 54 bits (allows 8 exbibytes) 7594 * A 'shift' can be set so that larger blocks are tracked and 7595 * consequently larger devices can be covered. 7596 * 'Acknowledged' flag - 1 bit. - the most significant bit. 7597 * 7598 * Locking of the bad-block table uses a seqlock so md_is_badblock 7599 * might need to retry if it is very unlucky. 7600 * We will sometimes want to check for bad blocks in a bi_end_io function, 7601 * so we use the write_seqlock_irq variant. 7602 * 7603 * When looking for a bad block we specify a range and want to 7604 * know if any block in the range is bad. So we binary-search 7605 * to the last range that starts at-or-before the given endpoint, 7606 * (or "before the sector after the target range") 7607 * then see if it ends after the given start. 7608 * We return 7609 * 0 if there are no known bad blocks in the range 7610 * 1 if there are known bad block which are all acknowledged 7611 * -1 if there are bad blocks which have not yet been acknowledged in metadata. 7612 * plus the start/length of the first bad section we overlap. 7613 */ 7614 int md_is_badblock(struct badblocks *bb, sector_t s, int sectors, 7615 sector_t *first_bad, int *bad_sectors) 7616 { 7617 int hi; 7618 int lo = 0; 7619 u64 *p = bb->page; 7620 int rv = 0; 7621 sector_t target = s + sectors; 7622 unsigned seq; 7623 7624 if (bb->shift > 0) { 7625 /* round the start down, and the end up */ 7626 s >>= bb->shift; 7627 target += (1<<bb->shift) - 1; 7628 target >>= bb->shift; 7629 sectors = target - s; 7630 } 7631 /* 'target' is now the first block after the bad range */ 7632 7633 retry: 7634 seq = read_seqbegin(&bb->lock); 7635 7636 hi = bb->count; 7637 7638 /* Binary search between lo and hi for 'target' 7639 * i.e. for the last range that starts before 'target' 7640 */ 7641 /* INVARIANT: ranges before 'lo' and at-or-after 'hi' 7642 * are known not to be the last range before target. 7643 * VARIANT: hi-lo is the number of possible 7644 * ranges, and decreases until it reaches 1 7645 */ 7646 while (hi - lo > 1) { 7647 int mid = (lo + hi) / 2; 7648 sector_t a = BB_OFFSET(p[mid]); 7649 if (a < target) 7650 /* This could still be the one, earlier ranges 7651 * could not. */ 7652 lo = mid; 7653 else 7654 /* This and later ranges are definitely out. */ 7655 hi = mid; 7656 } 7657 /* 'lo' might be the last that started before target, but 'hi' isn't */ 7658 if (hi > lo) { 7659 /* need to check all range that end after 's' to see if 7660 * any are unacknowledged. 7661 */ 7662 while (lo >= 0 && 7663 BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) { 7664 if (BB_OFFSET(p[lo]) < target) { 7665 /* starts before the end, and finishes after 7666 * the start, so they must overlap 7667 */ 7668 if (rv != -1 && BB_ACK(p[lo])) 7669 rv = 1; 7670 else 7671 rv = -1; 7672 *first_bad = BB_OFFSET(p[lo]); 7673 *bad_sectors = BB_LEN(p[lo]); 7674 } 7675 lo--; 7676 } 7677 } 7678 7679 if (read_seqretry(&bb->lock, seq)) 7680 goto retry; 7681 7682 return rv; 7683 } 7684 EXPORT_SYMBOL_GPL(md_is_badblock); 7685 7686 /* 7687 * Add a range of bad blocks to the table. 7688 * This might extend the table, or might contract it 7689 * if two adjacent ranges can be merged. 7690 * We binary-search to find the 'insertion' point, then 7691 * decide how best to handle it. 7692 */ 7693 static int md_set_badblocks(struct badblocks *bb, sector_t s, int sectors, 7694 int acknowledged) 7695 { 7696 u64 *p; 7697 int lo, hi; 7698 int rv = 1; 7699 7700 if (bb->shift < 0) 7701 /* badblocks are disabled */ 7702 return 0; 7703 7704 if (bb->shift) { 7705 /* round the start down, and the end up */ 7706 sector_t next = s + sectors; 7707 s >>= bb->shift; 7708 next += (1<<bb->shift) - 1; 7709 next >>= bb->shift; 7710 sectors = next - s; 7711 } 7712 7713 write_seqlock_irq(&bb->lock); 7714 7715 p = bb->page; 7716 lo = 0; 7717 hi = bb->count; 7718 /* Find the last range that starts at-or-before 's' */ 7719 while (hi - lo > 1) { 7720 int mid = (lo + hi) / 2; 7721 sector_t a = BB_OFFSET(p[mid]); 7722 if (a <= s) 7723 lo = mid; 7724 else 7725 hi = mid; 7726 } 7727 if (hi > lo && BB_OFFSET(p[lo]) > s) 7728 hi = lo; 7729 7730 if (hi > lo) { 7731 /* we found a range that might merge with the start 7732 * of our new range 7733 */ 7734 sector_t a = BB_OFFSET(p[lo]); 7735 sector_t e = a + BB_LEN(p[lo]); 7736 int ack = BB_ACK(p[lo]); 7737 if (e >= s) { 7738 /* Yes, we can merge with a previous range */ 7739 if (s == a && s + sectors >= e) 7740 /* new range covers old */ 7741 ack = acknowledged; 7742 else 7743 ack = ack && acknowledged; 7744 7745 if (e < s + sectors) 7746 e = s + sectors; 7747 if (e - a <= BB_MAX_LEN) { 7748 p[lo] = BB_MAKE(a, e-a, ack); 7749 s = e; 7750 } else { 7751 /* does not all fit in one range, 7752 * make p[lo] maximal 7753 */ 7754 if (BB_LEN(p[lo]) != BB_MAX_LEN) 7755 p[lo] = BB_MAKE(a, BB_MAX_LEN, ack); 7756 s = a + BB_MAX_LEN; 7757 } 7758 sectors = e - s; 7759 } 7760 } 7761 if (sectors && hi < bb->count) { 7762 /* 'hi' points to the first range that starts after 's'. 7763 * Maybe we can merge with the start of that range */ 7764 sector_t a = BB_OFFSET(p[hi]); 7765 sector_t e = a + BB_LEN(p[hi]); 7766 int ack = BB_ACK(p[hi]); 7767 if (a <= s + sectors) { 7768 /* merging is possible */ 7769 if (e <= s + sectors) { 7770 /* full overlap */ 7771 e = s + sectors; 7772 ack = acknowledged; 7773 } else 7774 ack = ack && acknowledged; 7775 7776 a = s; 7777 if (e - a <= BB_MAX_LEN) { 7778 p[hi] = BB_MAKE(a, e-a, ack); 7779 s = e; 7780 } else { 7781 p[hi] = BB_MAKE(a, BB_MAX_LEN, ack); 7782 s = a + BB_MAX_LEN; 7783 } 7784 sectors = e - s; 7785 lo = hi; 7786 hi++; 7787 } 7788 } 7789 if (sectors == 0 && hi < bb->count) { 7790 /* we might be able to combine lo and hi */ 7791 /* Note: 's' is at the end of 'lo' */ 7792 sector_t a = BB_OFFSET(p[hi]); 7793 int lolen = BB_LEN(p[lo]); 7794 int hilen = BB_LEN(p[hi]); 7795 int newlen = lolen + hilen - (s - a); 7796 if (s >= a && newlen < BB_MAX_LEN) { 7797 /* yes, we can combine them */ 7798 int ack = BB_ACK(p[lo]) && BB_ACK(p[hi]); 7799 p[lo] = BB_MAKE(BB_OFFSET(p[lo]), newlen, ack); 7800 memmove(p + hi, p + hi + 1, 7801 (bb->count - hi - 1) * 8); 7802 bb->count--; 7803 } 7804 } 7805 while (sectors) { 7806 /* didn't merge (it all). 7807 * Need to add a range just before 'hi' */ 7808 if (bb->count >= MD_MAX_BADBLOCKS) { 7809 /* No room for more */ 7810 rv = 0; 7811 break; 7812 } else { 7813 int this_sectors = sectors; 7814 memmove(p + hi + 1, p + hi, 7815 (bb->count - hi) * 8); 7816 bb->count++; 7817 7818 if (this_sectors > BB_MAX_LEN) 7819 this_sectors = BB_MAX_LEN; 7820 p[hi] = BB_MAKE(s, this_sectors, acknowledged); 7821 sectors -= this_sectors; 7822 s += this_sectors; 7823 } 7824 } 7825 7826 bb->changed = 1; 7827 if (!acknowledged) 7828 bb->unacked_exist = 1; 7829 write_sequnlock_irq(&bb->lock); 7830 7831 return rv; 7832 } 7833 7834 int rdev_set_badblocks(mdk_rdev_t *rdev, sector_t s, int sectors, 7835 int acknowledged) 7836 { 7837 int rv = md_set_badblocks(&rdev->badblocks, 7838 s + rdev->data_offset, sectors, acknowledged); 7839 if (rv) { 7840 /* Make sure they get written out promptly */ 7841 set_bit(MD_CHANGE_CLEAN, &rdev->mddev->flags); 7842 md_wakeup_thread(rdev->mddev->thread); 7843 } 7844 return rv; 7845 } 7846 EXPORT_SYMBOL_GPL(rdev_set_badblocks); 7847 7848 /* 7849 * Remove a range of bad blocks from the table. 7850 * This may involve extending the table if we spilt a region, 7851 * but it must not fail. So if the table becomes full, we just 7852 * drop the remove request. 7853 */ 7854 static int md_clear_badblocks(struct badblocks *bb, sector_t s, int sectors) 7855 { 7856 u64 *p; 7857 int lo, hi; 7858 sector_t target = s + sectors; 7859 int rv = 0; 7860 7861 if (bb->shift > 0) { 7862 /* When clearing we round the start up and the end down. 7863 * This should not matter as the shift should align with 7864 * the block size and no rounding should ever be needed. 7865 * However it is better the think a block is bad when it 7866 * isn't than to think a block is not bad when it is. 7867 */ 7868 s += (1<<bb->shift) - 1; 7869 s >>= bb->shift; 7870 target >>= bb->shift; 7871 sectors = target - s; 7872 } 7873 7874 write_seqlock_irq(&bb->lock); 7875 7876 p = bb->page; 7877 lo = 0; 7878 hi = bb->count; 7879 /* Find the last range that starts before 'target' */ 7880 while (hi - lo > 1) { 7881 int mid = (lo + hi) / 2; 7882 sector_t a = BB_OFFSET(p[mid]); 7883 if (a < target) 7884 lo = mid; 7885 else 7886 hi = mid; 7887 } 7888 if (hi > lo) { 7889 /* p[lo] is the last range that could overlap the 7890 * current range. Earlier ranges could also overlap, 7891 * but only this one can overlap the end of the range. 7892 */ 7893 if (BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > target) { 7894 /* Partial overlap, leave the tail of this range */ 7895 int ack = BB_ACK(p[lo]); 7896 sector_t a = BB_OFFSET(p[lo]); 7897 sector_t end = a + BB_LEN(p[lo]); 7898 7899 if (a < s) { 7900 /* we need to split this range */ 7901 if (bb->count >= MD_MAX_BADBLOCKS) { 7902 rv = 0; 7903 goto out; 7904 } 7905 memmove(p+lo+1, p+lo, (bb->count - lo) * 8); 7906 bb->count++; 7907 p[lo] = BB_MAKE(a, s-a, ack); 7908 lo++; 7909 } 7910 p[lo] = BB_MAKE(target, end - target, ack); 7911 /* there is no longer an overlap */ 7912 hi = lo; 7913 lo--; 7914 } 7915 while (lo >= 0 && 7916 BB_OFFSET(p[lo]) + BB_LEN(p[lo]) > s) { 7917 /* This range does overlap */ 7918 if (BB_OFFSET(p[lo]) < s) { 7919 /* Keep the early parts of this range. */ 7920 int ack = BB_ACK(p[lo]); 7921 sector_t start = BB_OFFSET(p[lo]); 7922 p[lo] = BB_MAKE(start, s - start, ack); 7923 /* now low doesn't overlap, so.. */ 7924 break; 7925 } 7926 lo--; 7927 } 7928 /* 'lo' is strictly before, 'hi' is strictly after, 7929 * anything between needs to be discarded 7930 */ 7931 if (hi - lo > 1) { 7932 memmove(p+lo+1, p+hi, (bb->count - hi) * 8); 7933 bb->count -= (hi - lo - 1); 7934 } 7935 } 7936 7937 bb->changed = 1; 7938 out: 7939 write_sequnlock_irq(&bb->lock); 7940 return rv; 7941 } 7942 7943 int rdev_clear_badblocks(mdk_rdev_t *rdev, sector_t s, int sectors) 7944 { 7945 return md_clear_badblocks(&rdev->badblocks, 7946 s + rdev->data_offset, 7947 sectors); 7948 } 7949 EXPORT_SYMBOL_GPL(rdev_clear_badblocks); 7950 7951 /* 7952 * Acknowledge all bad blocks in a list. 7953 * This only succeeds if ->changed is clear. It is used by 7954 * in-kernel metadata updates 7955 */ 7956 void md_ack_all_badblocks(struct badblocks *bb) 7957 { 7958 if (bb->page == NULL || bb->changed) 7959 /* no point even trying */ 7960 return; 7961 write_seqlock_irq(&bb->lock); 7962 7963 if (bb->changed == 0) { 7964 u64 *p = bb->page; 7965 int i; 7966 for (i = 0; i < bb->count ; i++) { 7967 if (!BB_ACK(p[i])) { 7968 sector_t start = BB_OFFSET(p[i]); 7969 int len = BB_LEN(p[i]); 7970 p[i] = BB_MAKE(start, len, 1); 7971 } 7972 } 7973 bb->unacked_exist = 0; 7974 } 7975 write_sequnlock_irq(&bb->lock); 7976 } 7977 EXPORT_SYMBOL_GPL(md_ack_all_badblocks); 7978 7979 /* sysfs access to bad-blocks list. 7980 * We present two files. 7981 * 'bad-blocks' lists sector numbers and lengths of ranges that 7982 * are recorded as bad. The list is truncated to fit within 7983 * the one-page limit of sysfs. 7984 * Writing "sector length" to this file adds an acknowledged 7985 * bad block list. 7986 * 'unacknowledged-bad-blocks' lists bad blocks that have not yet 7987 * been acknowledged. Writing to this file adds bad blocks 7988 * without acknowledging them. This is largely for testing. 7989 */ 7990 7991 static ssize_t 7992 badblocks_show(struct badblocks *bb, char *page, int unack) 7993 { 7994 size_t len; 7995 int i; 7996 u64 *p = bb->page; 7997 unsigned seq; 7998 7999 if (bb->shift < 0) 8000 return 0; 8001 8002 retry: 8003 seq = read_seqbegin(&bb->lock); 8004 8005 len = 0; 8006 i = 0; 8007 8008 while (len < PAGE_SIZE && i < bb->count) { 8009 sector_t s = BB_OFFSET(p[i]); 8010 unsigned int length = BB_LEN(p[i]); 8011 int ack = BB_ACK(p[i]); 8012 i++; 8013 8014 if (unack && ack) 8015 continue; 8016 8017 len += snprintf(page+len, PAGE_SIZE-len, "%llu %u\n", 8018 (unsigned long long)s << bb->shift, 8019 length << bb->shift); 8020 } 8021 if (unack && len == 0) 8022 bb->unacked_exist = 0; 8023 8024 if (read_seqretry(&bb->lock, seq)) 8025 goto retry; 8026 8027 return len; 8028 } 8029 8030 #define DO_DEBUG 1 8031 8032 static ssize_t 8033 badblocks_store(struct badblocks *bb, const char *page, size_t len, int unack) 8034 { 8035 unsigned long long sector; 8036 int length; 8037 char newline; 8038 #ifdef DO_DEBUG 8039 /* Allow clearing via sysfs *only* for testing/debugging. 8040 * Normally only a successful write may clear a badblock 8041 */ 8042 int clear = 0; 8043 if (page[0] == '-') { 8044 clear = 1; 8045 page++; 8046 } 8047 #endif /* DO_DEBUG */ 8048 8049 switch (sscanf(page, "%llu %d%c", §or, &length, &newline)) { 8050 case 3: 8051 if (newline != '\n') 8052 return -EINVAL; 8053 case 2: 8054 if (length <= 0) 8055 return -EINVAL; 8056 break; 8057 default: 8058 return -EINVAL; 8059 } 8060 8061 #ifdef DO_DEBUG 8062 if (clear) { 8063 md_clear_badblocks(bb, sector, length); 8064 return len; 8065 } 8066 #endif /* DO_DEBUG */ 8067 if (md_set_badblocks(bb, sector, length, !unack)) 8068 return len; 8069 else 8070 return -ENOSPC; 8071 } 8072 8073 static int md_notify_reboot(struct notifier_block *this, 8074 unsigned long code, void *x) 8075 { 8076 struct list_head *tmp; 8077 mddev_t *mddev; 8078 8079 if ((code == SYS_DOWN) || (code == SYS_HALT) || (code == SYS_POWER_OFF)) { 8080 8081 printk(KERN_INFO "md: stopping all md devices.\n"); 8082 8083 for_each_mddev(mddev, tmp) 8084 if (mddev_trylock(mddev)) { 8085 /* Force a switch to readonly even array 8086 * appears to still be in use. Hence 8087 * the '100'. 8088 */ 8089 md_set_readonly(mddev, 100); 8090 mddev_unlock(mddev); 8091 } 8092 /* 8093 * certain more exotic SCSI devices are known to be 8094 * volatile wrt too early system reboots. While the 8095 * right place to handle this issue is the given 8096 * driver, we do want to have a safe RAID driver ... 8097 */ 8098 mdelay(1000*1); 8099 } 8100 return NOTIFY_DONE; 8101 } 8102 8103 static struct notifier_block md_notifier = { 8104 .notifier_call = md_notify_reboot, 8105 .next = NULL, 8106 .priority = INT_MAX, /* before any real devices */ 8107 }; 8108 8109 static void md_geninit(void) 8110 { 8111 dprintk("md: sizeof(mdp_super_t) = %d\n", (int)sizeof(mdp_super_t)); 8112 8113 proc_create("mdstat", S_IRUGO, NULL, &md_seq_fops); 8114 } 8115 8116 static int __init md_init(void) 8117 { 8118 int ret = -ENOMEM; 8119 8120 md_wq = alloc_workqueue("md", WQ_MEM_RECLAIM, 0); 8121 if (!md_wq) 8122 goto err_wq; 8123 8124 md_misc_wq = alloc_workqueue("md_misc", 0, 0); 8125 if (!md_misc_wq) 8126 goto err_misc_wq; 8127 8128 if ((ret = register_blkdev(MD_MAJOR, "md")) < 0) 8129 goto err_md; 8130 8131 if ((ret = register_blkdev(0, "mdp")) < 0) 8132 goto err_mdp; 8133 mdp_major = ret; 8134 8135 blk_register_region(MKDEV(MD_MAJOR, 0), 1UL<<MINORBITS, THIS_MODULE, 8136 md_probe, NULL, NULL); 8137 blk_register_region(MKDEV(mdp_major, 0), 1UL<<MINORBITS, THIS_MODULE, 8138 md_probe, NULL, NULL); 8139 8140 register_reboot_notifier(&md_notifier); 8141 raid_table_header = register_sysctl_table(raid_root_table); 8142 8143 md_geninit(); 8144 return 0; 8145 8146 err_mdp: 8147 unregister_blkdev(MD_MAJOR, "md"); 8148 err_md: 8149 destroy_workqueue(md_misc_wq); 8150 err_misc_wq: 8151 destroy_workqueue(md_wq); 8152 err_wq: 8153 return ret; 8154 } 8155 8156 #ifndef MODULE 8157 8158 /* 8159 * Searches all registered partitions for autorun RAID arrays 8160 * at boot time. 8161 */ 8162 8163 static LIST_HEAD(all_detected_devices); 8164 struct detected_devices_node { 8165 struct list_head list; 8166 dev_t dev; 8167 }; 8168 8169 void md_autodetect_dev(dev_t dev) 8170 { 8171 struct detected_devices_node *node_detected_dev; 8172 8173 node_detected_dev = kzalloc(sizeof(*node_detected_dev), GFP_KERNEL); 8174 if (node_detected_dev) { 8175 node_detected_dev->dev = dev; 8176 list_add_tail(&node_detected_dev->list, &all_detected_devices); 8177 } else { 8178 printk(KERN_CRIT "md: md_autodetect_dev: kzalloc failed" 8179 ", skipping dev(%d,%d)\n", MAJOR(dev), MINOR(dev)); 8180 } 8181 } 8182 8183 8184 static void autostart_arrays(int part) 8185 { 8186 mdk_rdev_t *rdev; 8187 struct detected_devices_node *node_detected_dev; 8188 dev_t dev; 8189 int i_scanned, i_passed; 8190 8191 i_scanned = 0; 8192 i_passed = 0; 8193 8194 printk(KERN_INFO "md: Autodetecting RAID arrays.\n"); 8195 8196 while (!list_empty(&all_detected_devices) && i_scanned < INT_MAX) { 8197 i_scanned++; 8198 node_detected_dev = list_entry(all_detected_devices.next, 8199 struct detected_devices_node, list); 8200 list_del(&node_detected_dev->list); 8201 dev = node_detected_dev->dev; 8202 kfree(node_detected_dev); 8203 rdev = md_import_device(dev,0, 90); 8204 if (IS_ERR(rdev)) 8205 continue; 8206 8207 if (test_bit(Faulty, &rdev->flags)) { 8208 MD_BUG(); 8209 continue; 8210 } 8211 set_bit(AutoDetected, &rdev->flags); 8212 list_add(&rdev->same_set, &pending_raid_disks); 8213 i_passed++; 8214 } 8215 8216 printk(KERN_INFO "md: Scanned %d and added %d devices.\n", 8217 i_scanned, i_passed); 8218 8219 autorun_devices(part); 8220 } 8221 8222 #endif /* !MODULE */ 8223 8224 static __exit void md_exit(void) 8225 { 8226 mddev_t *mddev; 8227 struct list_head *tmp; 8228 8229 blk_unregister_region(MKDEV(MD_MAJOR,0), 1U << MINORBITS); 8230 blk_unregister_region(MKDEV(mdp_major,0), 1U << MINORBITS); 8231 8232 unregister_blkdev(MD_MAJOR,"md"); 8233 unregister_blkdev(mdp_major, "mdp"); 8234 unregister_reboot_notifier(&md_notifier); 8235 unregister_sysctl_table(raid_table_header); 8236 remove_proc_entry("mdstat", NULL); 8237 for_each_mddev(mddev, tmp) { 8238 export_array(mddev); 8239 mddev->hold_active = 0; 8240 } 8241 destroy_workqueue(md_misc_wq); 8242 destroy_workqueue(md_wq); 8243 } 8244 8245 subsys_initcall(md_init); 8246 module_exit(md_exit) 8247 8248 static int get_ro(char *buffer, struct kernel_param *kp) 8249 { 8250 return sprintf(buffer, "%d", start_readonly); 8251 } 8252 static int set_ro(const char *val, struct kernel_param *kp) 8253 { 8254 char *e; 8255 int num = simple_strtoul(val, &e, 10); 8256 if (*val && (*e == '\0' || *e == '\n')) { 8257 start_readonly = num; 8258 return 0; 8259 } 8260 return -EINVAL; 8261 } 8262 8263 module_param_call(start_ro, set_ro, get_ro, NULL, S_IRUSR|S_IWUSR); 8264 module_param(start_dirty_degraded, int, S_IRUGO|S_IWUSR); 8265 8266 module_param_call(new_array, add_named_array, NULL, NULL, S_IWUSR); 8267 8268 EXPORT_SYMBOL(register_md_personality); 8269 EXPORT_SYMBOL(unregister_md_personality); 8270 EXPORT_SYMBOL(md_error); 8271 EXPORT_SYMBOL(md_done_sync); 8272 EXPORT_SYMBOL(md_write_start); 8273 EXPORT_SYMBOL(md_write_end); 8274 EXPORT_SYMBOL(md_register_thread); 8275 EXPORT_SYMBOL(md_unregister_thread); 8276 EXPORT_SYMBOL(md_wakeup_thread); 8277 EXPORT_SYMBOL(md_check_recovery); 8278 MODULE_LICENSE("GPL"); 8279 MODULE_DESCRIPTION("MD RAID framework"); 8280 MODULE_ALIAS("md"); 8281 MODULE_ALIAS_BLOCKDEV_MAJOR(MD_MAJOR); 8282