1 /* 2 * raid1.c : Multiple Devices driver for Linux 3 * 4 * Copyright (C) 1999, 2000, 2001 Ingo Molnar, Red Hat 5 * 6 * Copyright (C) 1996, 1997, 1998 Ingo Molnar, Miguel de Icaza, Gadi Oxman 7 * 8 * RAID-1 management functions. 9 * 10 * Better read-balancing code written by Mika Kuoppala <miku@iki.fi>, 2000 11 * 12 * Fixes to reconstruction by Jakob Østergaard" <jakob@ostenfeld.dk> 13 * Various fixes by Neil Brown <neilb@cse.unsw.edu.au> 14 * 15 * Changes by Peter T. Breuer <ptb@it.uc3m.es> 31/1/2003 to support 16 * bitmapped intelligence in resync: 17 * 18 * - bitmap marked during normal i/o 19 * - bitmap used to skip nondirty blocks during sync 20 * 21 * Additions to bitmap code, (C) 2003-2004 Paul Clements, SteelEye Technology: 22 * - persistent bitmap code 23 * 24 * This program is free software; you can redistribute it and/or modify 25 * it under the terms of the GNU General Public License as published by 26 * the Free Software Foundation; either version 2, or (at your option) 27 * any later version. 28 * 29 * You should have received a copy of the GNU General Public License 30 * (for example /usr/src/linux/COPYING); if not, write to the Free 31 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 32 */ 33 34 #include <linux/slab.h> 35 #include <linux/delay.h> 36 #include <linux/blkdev.h> 37 #include <linux/seq_file.h> 38 #include <linux/ratelimit.h> 39 #include "md.h" 40 #include "raid1.h" 41 #include "bitmap.h" 42 43 #define DEBUG 0 44 #define PRINTK(x...) do { if (DEBUG) printk(x); } while (0) 45 46 /* 47 * Number of guaranteed r1bios in case of extreme VM load: 48 */ 49 #define NR_RAID1_BIOS 256 50 51 52 static void allow_barrier(conf_t *conf); 53 static void lower_barrier(conf_t *conf); 54 55 static void * r1bio_pool_alloc(gfp_t gfp_flags, void *data) 56 { 57 struct pool_info *pi = data; 58 int size = offsetof(r1bio_t, bios[pi->raid_disks]); 59 60 /* allocate a r1bio with room for raid_disks entries in the bios array */ 61 return kzalloc(size, gfp_flags); 62 } 63 64 static void r1bio_pool_free(void *r1_bio, void *data) 65 { 66 kfree(r1_bio); 67 } 68 69 #define RESYNC_BLOCK_SIZE (64*1024) 70 //#define RESYNC_BLOCK_SIZE PAGE_SIZE 71 #define RESYNC_SECTORS (RESYNC_BLOCK_SIZE >> 9) 72 #define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE) 73 #define RESYNC_WINDOW (2048*1024) 74 75 static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) 76 { 77 struct pool_info *pi = data; 78 struct page *page; 79 r1bio_t *r1_bio; 80 struct bio *bio; 81 int i, j; 82 83 r1_bio = r1bio_pool_alloc(gfp_flags, pi); 84 if (!r1_bio) 85 return NULL; 86 87 /* 88 * Allocate bios : 1 for reading, n-1 for writing 89 */ 90 for (j = pi->raid_disks ; j-- ; ) { 91 bio = bio_kmalloc(gfp_flags, RESYNC_PAGES); 92 if (!bio) 93 goto out_free_bio; 94 r1_bio->bios[j] = bio; 95 } 96 /* 97 * Allocate RESYNC_PAGES data pages and attach them to 98 * the first bio. 99 * If this is a user-requested check/repair, allocate 100 * RESYNC_PAGES for each bio. 101 */ 102 if (test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) 103 j = pi->raid_disks; 104 else 105 j = 1; 106 while(j--) { 107 bio = r1_bio->bios[j]; 108 for (i = 0; i < RESYNC_PAGES; i++) { 109 page = alloc_page(gfp_flags); 110 if (unlikely(!page)) 111 goto out_free_pages; 112 113 bio->bi_io_vec[i].bv_page = page; 114 bio->bi_vcnt = i+1; 115 } 116 } 117 /* If not user-requests, copy the page pointers to all bios */ 118 if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) { 119 for (i=0; i<RESYNC_PAGES ; i++) 120 for (j=1; j<pi->raid_disks; j++) 121 r1_bio->bios[j]->bi_io_vec[i].bv_page = 122 r1_bio->bios[0]->bi_io_vec[i].bv_page; 123 } 124 125 r1_bio->master_bio = NULL; 126 127 return r1_bio; 128 129 out_free_pages: 130 for (j=0 ; j < pi->raid_disks; j++) 131 for (i=0; i < r1_bio->bios[j]->bi_vcnt ; i++) 132 put_page(r1_bio->bios[j]->bi_io_vec[i].bv_page); 133 j = -1; 134 out_free_bio: 135 while ( ++j < pi->raid_disks ) 136 bio_put(r1_bio->bios[j]); 137 r1bio_pool_free(r1_bio, data); 138 return NULL; 139 } 140 141 static void r1buf_pool_free(void *__r1_bio, void *data) 142 { 143 struct pool_info *pi = data; 144 int i,j; 145 r1bio_t *r1bio = __r1_bio; 146 147 for (i = 0; i < RESYNC_PAGES; i++) 148 for (j = pi->raid_disks; j-- ;) { 149 if (j == 0 || 150 r1bio->bios[j]->bi_io_vec[i].bv_page != 151 r1bio->bios[0]->bi_io_vec[i].bv_page) 152 safe_put_page(r1bio->bios[j]->bi_io_vec[i].bv_page); 153 } 154 for (i=0 ; i < pi->raid_disks; i++) 155 bio_put(r1bio->bios[i]); 156 157 r1bio_pool_free(r1bio, data); 158 } 159 160 static void put_all_bios(conf_t *conf, r1bio_t *r1_bio) 161 { 162 int i; 163 164 for (i = 0; i < conf->raid_disks; i++) { 165 struct bio **bio = r1_bio->bios + i; 166 if (!BIO_SPECIAL(*bio)) 167 bio_put(*bio); 168 *bio = NULL; 169 } 170 } 171 172 static void free_r1bio(r1bio_t *r1_bio) 173 { 174 conf_t *conf = r1_bio->mddev->private; 175 176 put_all_bios(conf, r1_bio); 177 mempool_free(r1_bio, conf->r1bio_pool); 178 } 179 180 static void put_buf(r1bio_t *r1_bio) 181 { 182 conf_t *conf = r1_bio->mddev->private; 183 int i; 184 185 for (i=0; i<conf->raid_disks; i++) { 186 struct bio *bio = r1_bio->bios[i]; 187 if (bio->bi_end_io) 188 rdev_dec_pending(conf->mirrors[i].rdev, r1_bio->mddev); 189 } 190 191 mempool_free(r1_bio, conf->r1buf_pool); 192 193 lower_barrier(conf); 194 } 195 196 static void reschedule_retry(r1bio_t *r1_bio) 197 { 198 unsigned long flags; 199 mddev_t *mddev = r1_bio->mddev; 200 conf_t *conf = mddev->private; 201 202 spin_lock_irqsave(&conf->device_lock, flags); 203 list_add(&r1_bio->retry_list, &conf->retry_list); 204 conf->nr_queued ++; 205 spin_unlock_irqrestore(&conf->device_lock, flags); 206 207 wake_up(&conf->wait_barrier); 208 md_wakeup_thread(mddev->thread); 209 } 210 211 /* 212 * raid_end_bio_io() is called when we have finished servicing a mirrored 213 * operation and are ready to return a success/failure code to the buffer 214 * cache layer. 215 */ 216 static void call_bio_endio(r1bio_t *r1_bio) 217 { 218 struct bio *bio = r1_bio->master_bio; 219 int done; 220 conf_t *conf = r1_bio->mddev->private; 221 222 if (bio->bi_phys_segments) { 223 unsigned long flags; 224 spin_lock_irqsave(&conf->device_lock, flags); 225 bio->bi_phys_segments--; 226 done = (bio->bi_phys_segments == 0); 227 spin_unlock_irqrestore(&conf->device_lock, flags); 228 } else 229 done = 1; 230 231 if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) 232 clear_bit(BIO_UPTODATE, &bio->bi_flags); 233 if (done) { 234 bio_endio(bio, 0); 235 /* 236 * Wake up any possible resync thread that waits for the device 237 * to go idle. 238 */ 239 allow_barrier(conf); 240 } 241 } 242 243 static void raid_end_bio_io(r1bio_t *r1_bio) 244 { 245 struct bio *bio = r1_bio->master_bio; 246 247 /* if nobody has done the final endio yet, do it now */ 248 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { 249 PRINTK(KERN_DEBUG "raid1: sync end %s on sectors %llu-%llu\n", 250 (bio_data_dir(bio) == WRITE) ? "write" : "read", 251 (unsigned long long) bio->bi_sector, 252 (unsigned long long) bio->bi_sector + 253 (bio->bi_size >> 9) - 1); 254 255 call_bio_endio(r1_bio); 256 } 257 free_r1bio(r1_bio); 258 } 259 260 /* 261 * Update disk head position estimator based on IRQ completion info. 262 */ 263 static inline void update_head_pos(int disk, r1bio_t *r1_bio) 264 { 265 conf_t *conf = r1_bio->mddev->private; 266 267 conf->mirrors[disk].head_position = 268 r1_bio->sector + (r1_bio->sectors); 269 } 270 271 static void raid1_end_read_request(struct bio *bio, int error) 272 { 273 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 274 r1bio_t *r1_bio = bio->bi_private; 275 int mirror; 276 conf_t *conf = r1_bio->mddev->private; 277 278 mirror = r1_bio->read_disk; 279 /* 280 * this branch is our 'one mirror IO has finished' event handler: 281 */ 282 update_head_pos(mirror, r1_bio); 283 284 if (uptodate) 285 set_bit(R1BIO_Uptodate, &r1_bio->state); 286 else { 287 /* If all other devices have failed, we want to return 288 * the error upwards rather than fail the last device. 289 * Here we redefine "uptodate" to mean "Don't want to retry" 290 */ 291 unsigned long flags; 292 spin_lock_irqsave(&conf->device_lock, flags); 293 if (r1_bio->mddev->degraded == conf->raid_disks || 294 (r1_bio->mddev->degraded == conf->raid_disks-1 && 295 !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags))) 296 uptodate = 1; 297 spin_unlock_irqrestore(&conf->device_lock, flags); 298 } 299 300 if (uptodate) 301 raid_end_bio_io(r1_bio); 302 else { 303 /* 304 * oops, read error: 305 */ 306 char b[BDEVNAME_SIZE]; 307 printk_ratelimited( 308 KERN_ERR "md/raid1:%s: %s: " 309 "rescheduling sector %llu\n", 310 mdname(conf->mddev), 311 bdevname(conf->mirrors[mirror].rdev->bdev, 312 b), 313 (unsigned long long)r1_bio->sector); 314 set_bit(R1BIO_ReadError, &r1_bio->state); 315 reschedule_retry(r1_bio); 316 } 317 318 rdev_dec_pending(conf->mirrors[mirror].rdev, conf->mddev); 319 } 320 321 static void close_write(r1bio_t *r1_bio) 322 { 323 /* it really is the end of this request */ 324 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) { 325 /* free extra copy of the data pages */ 326 int i = r1_bio->behind_page_count; 327 while (i--) 328 safe_put_page(r1_bio->behind_bvecs[i].bv_page); 329 kfree(r1_bio->behind_bvecs); 330 r1_bio->behind_bvecs = NULL; 331 } 332 /* clear the bitmap if all writes complete successfully */ 333 bitmap_endwrite(r1_bio->mddev->bitmap, r1_bio->sector, 334 r1_bio->sectors, 335 !test_bit(R1BIO_Degraded, &r1_bio->state), 336 test_bit(R1BIO_BehindIO, &r1_bio->state)); 337 md_write_end(r1_bio->mddev); 338 } 339 340 static void r1_bio_write_done(r1bio_t *r1_bio) 341 { 342 if (!atomic_dec_and_test(&r1_bio->remaining)) 343 return; 344 345 if (test_bit(R1BIO_WriteError, &r1_bio->state)) 346 reschedule_retry(r1_bio); 347 else { 348 close_write(r1_bio); 349 if (test_bit(R1BIO_MadeGood, &r1_bio->state)) 350 reschedule_retry(r1_bio); 351 else 352 raid_end_bio_io(r1_bio); 353 } 354 } 355 356 static void raid1_end_write_request(struct bio *bio, int error) 357 { 358 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 359 r1bio_t *r1_bio = bio->bi_private; 360 int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state); 361 conf_t *conf = r1_bio->mddev->private; 362 struct bio *to_put = NULL; 363 364 365 for (mirror = 0; mirror < conf->raid_disks; mirror++) 366 if (r1_bio->bios[mirror] == bio) 367 break; 368 369 /* 370 * 'one mirror IO has finished' event handler: 371 */ 372 if (!uptodate) { 373 set_bit(WriteErrorSeen, 374 &conf->mirrors[mirror].rdev->flags); 375 set_bit(R1BIO_WriteError, &r1_bio->state); 376 } else { 377 /* 378 * Set R1BIO_Uptodate in our master bio, so that we 379 * will return a good error code for to the higher 380 * levels even if IO on some other mirrored buffer 381 * fails. 382 * 383 * The 'master' represents the composite IO operation 384 * to user-side. So if something waits for IO, then it 385 * will wait for the 'master' bio. 386 */ 387 sector_t first_bad; 388 int bad_sectors; 389 390 r1_bio->bios[mirror] = NULL; 391 to_put = bio; 392 set_bit(R1BIO_Uptodate, &r1_bio->state); 393 394 /* Maybe we can clear some bad blocks. */ 395 if (is_badblock(conf->mirrors[mirror].rdev, 396 r1_bio->sector, r1_bio->sectors, 397 &first_bad, &bad_sectors)) { 398 r1_bio->bios[mirror] = IO_MADE_GOOD; 399 set_bit(R1BIO_MadeGood, &r1_bio->state); 400 } 401 } 402 403 update_head_pos(mirror, r1_bio); 404 405 if (behind) { 406 if (test_bit(WriteMostly, &conf->mirrors[mirror].rdev->flags)) 407 atomic_dec(&r1_bio->behind_remaining); 408 409 /* 410 * In behind mode, we ACK the master bio once the I/O 411 * has safely reached all non-writemostly 412 * disks. Setting the Returned bit ensures that this 413 * gets done only once -- we don't ever want to return 414 * -EIO here, instead we'll wait 415 */ 416 if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) && 417 test_bit(R1BIO_Uptodate, &r1_bio->state)) { 418 /* Maybe we can return now */ 419 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { 420 struct bio *mbio = r1_bio->master_bio; 421 PRINTK(KERN_DEBUG "raid1: behind end write sectors %llu-%llu\n", 422 (unsigned long long) mbio->bi_sector, 423 (unsigned long long) mbio->bi_sector + 424 (mbio->bi_size >> 9) - 1); 425 call_bio_endio(r1_bio); 426 } 427 } 428 } 429 if (r1_bio->bios[mirror] == NULL) 430 rdev_dec_pending(conf->mirrors[mirror].rdev, 431 conf->mddev); 432 433 /* 434 * Let's see if all mirrored write operations have finished 435 * already. 436 */ 437 r1_bio_write_done(r1_bio); 438 439 if (to_put) 440 bio_put(to_put); 441 } 442 443 444 /* 445 * This routine returns the disk from which the requested read should 446 * be done. There is a per-array 'next expected sequential IO' sector 447 * number - if this matches on the next IO then we use the last disk. 448 * There is also a per-disk 'last know head position' sector that is 449 * maintained from IRQ contexts, both the normal and the resync IO 450 * completion handlers update this position correctly. If there is no 451 * perfect sequential match then we pick the disk whose head is closest. 452 * 453 * If there are 2 mirrors in the same 2 devices, performance degrades 454 * because position is mirror, not device based. 455 * 456 * The rdev for the device selected will have nr_pending incremented. 457 */ 458 static int read_balance(conf_t *conf, r1bio_t *r1_bio, int *max_sectors) 459 { 460 const sector_t this_sector = r1_bio->sector; 461 int sectors; 462 int best_good_sectors; 463 int start_disk; 464 int best_disk; 465 int i; 466 sector_t best_dist; 467 mdk_rdev_t *rdev; 468 int choose_first; 469 470 rcu_read_lock(); 471 /* 472 * Check if we can balance. We can balance on the whole 473 * device if no resync is going on, or below the resync window. 474 * We take the first readable disk when above the resync window. 475 */ 476 retry: 477 sectors = r1_bio->sectors; 478 best_disk = -1; 479 best_dist = MaxSector; 480 best_good_sectors = 0; 481 482 if (conf->mddev->recovery_cp < MaxSector && 483 (this_sector + sectors >= conf->next_resync)) { 484 choose_first = 1; 485 start_disk = 0; 486 } else { 487 choose_first = 0; 488 start_disk = conf->last_used; 489 } 490 491 for (i = 0 ; i < conf->raid_disks ; i++) { 492 sector_t dist; 493 sector_t first_bad; 494 int bad_sectors; 495 496 int disk = start_disk + i; 497 if (disk >= conf->raid_disks) 498 disk -= conf->raid_disks; 499 500 rdev = rcu_dereference(conf->mirrors[disk].rdev); 501 if (r1_bio->bios[disk] == IO_BLOCKED 502 || rdev == NULL 503 || test_bit(Faulty, &rdev->flags)) 504 continue; 505 if (!test_bit(In_sync, &rdev->flags) && 506 rdev->recovery_offset < this_sector + sectors) 507 continue; 508 if (test_bit(WriteMostly, &rdev->flags)) { 509 /* Don't balance among write-mostly, just 510 * use the first as a last resort */ 511 if (best_disk < 0) 512 best_disk = disk; 513 continue; 514 } 515 /* This is a reasonable device to use. It might 516 * even be best. 517 */ 518 if (is_badblock(rdev, this_sector, sectors, 519 &first_bad, &bad_sectors)) { 520 if (best_dist < MaxSector) 521 /* already have a better device */ 522 continue; 523 if (first_bad <= this_sector) { 524 /* cannot read here. If this is the 'primary' 525 * device, then we must not read beyond 526 * bad_sectors from another device.. 527 */ 528 bad_sectors -= (this_sector - first_bad); 529 if (choose_first && sectors > bad_sectors) 530 sectors = bad_sectors; 531 if (best_good_sectors > sectors) 532 best_good_sectors = sectors; 533 534 } else { 535 sector_t good_sectors = first_bad - this_sector; 536 if (good_sectors > best_good_sectors) { 537 best_good_sectors = good_sectors; 538 best_disk = disk; 539 } 540 if (choose_first) 541 break; 542 } 543 continue; 544 } else 545 best_good_sectors = sectors; 546 547 dist = abs(this_sector - conf->mirrors[disk].head_position); 548 if (choose_first 549 /* Don't change to another disk for sequential reads */ 550 || conf->next_seq_sect == this_sector 551 || dist == 0 552 /* If device is idle, use it */ 553 || atomic_read(&rdev->nr_pending) == 0) { 554 best_disk = disk; 555 break; 556 } 557 if (dist < best_dist) { 558 best_dist = dist; 559 best_disk = disk; 560 } 561 } 562 563 if (best_disk >= 0) { 564 rdev = rcu_dereference(conf->mirrors[best_disk].rdev); 565 if (!rdev) 566 goto retry; 567 atomic_inc(&rdev->nr_pending); 568 if (test_bit(Faulty, &rdev->flags)) { 569 /* cannot risk returning a device that failed 570 * before we inc'ed nr_pending 571 */ 572 rdev_dec_pending(rdev, conf->mddev); 573 goto retry; 574 } 575 sectors = best_good_sectors; 576 conf->next_seq_sect = this_sector + sectors; 577 conf->last_used = best_disk; 578 } 579 rcu_read_unlock(); 580 *max_sectors = sectors; 581 582 return best_disk; 583 } 584 585 int md_raid1_congested(mddev_t *mddev, int bits) 586 { 587 conf_t *conf = mddev->private; 588 int i, ret = 0; 589 590 rcu_read_lock(); 591 for (i = 0; i < mddev->raid_disks; i++) { 592 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); 593 if (rdev && !test_bit(Faulty, &rdev->flags)) { 594 struct request_queue *q = bdev_get_queue(rdev->bdev); 595 596 BUG_ON(!q); 597 598 /* Note the '|| 1' - when read_balance prefers 599 * non-congested targets, it can be removed 600 */ 601 if ((bits & (1<<BDI_async_congested)) || 1) 602 ret |= bdi_congested(&q->backing_dev_info, bits); 603 else 604 ret &= bdi_congested(&q->backing_dev_info, bits); 605 } 606 } 607 rcu_read_unlock(); 608 return ret; 609 } 610 EXPORT_SYMBOL_GPL(md_raid1_congested); 611 612 static int raid1_congested(void *data, int bits) 613 { 614 mddev_t *mddev = data; 615 616 return mddev_congested(mddev, bits) || 617 md_raid1_congested(mddev, bits); 618 } 619 620 static void flush_pending_writes(conf_t *conf) 621 { 622 /* Any writes that have been queued but are awaiting 623 * bitmap updates get flushed here. 624 */ 625 spin_lock_irq(&conf->device_lock); 626 627 if (conf->pending_bio_list.head) { 628 struct bio *bio; 629 bio = bio_list_get(&conf->pending_bio_list); 630 spin_unlock_irq(&conf->device_lock); 631 /* flush any pending bitmap writes to 632 * disk before proceeding w/ I/O */ 633 bitmap_unplug(conf->mddev->bitmap); 634 635 while (bio) { /* submit pending writes */ 636 struct bio *next = bio->bi_next; 637 bio->bi_next = NULL; 638 generic_make_request(bio); 639 bio = next; 640 } 641 } else 642 spin_unlock_irq(&conf->device_lock); 643 } 644 645 /* Barriers.... 646 * Sometimes we need to suspend IO while we do something else, 647 * either some resync/recovery, or reconfigure the array. 648 * To do this we raise a 'barrier'. 649 * The 'barrier' is a counter that can be raised multiple times 650 * to count how many activities are happening which preclude 651 * normal IO. 652 * We can only raise the barrier if there is no pending IO. 653 * i.e. if nr_pending == 0. 654 * We choose only to raise the barrier if no-one is waiting for the 655 * barrier to go down. This means that as soon as an IO request 656 * is ready, no other operations which require a barrier will start 657 * until the IO request has had a chance. 658 * 659 * So: regular IO calls 'wait_barrier'. When that returns there 660 * is no backgroup IO happening, It must arrange to call 661 * allow_barrier when it has finished its IO. 662 * backgroup IO calls must call raise_barrier. Once that returns 663 * there is no normal IO happeing. It must arrange to call 664 * lower_barrier when the particular background IO completes. 665 */ 666 #define RESYNC_DEPTH 32 667 668 static void raise_barrier(conf_t *conf) 669 { 670 spin_lock_irq(&conf->resync_lock); 671 672 /* Wait until no block IO is waiting */ 673 wait_event_lock_irq(conf->wait_barrier, !conf->nr_waiting, 674 conf->resync_lock, ); 675 676 /* block any new IO from starting */ 677 conf->barrier++; 678 679 /* Now wait for all pending IO to complete */ 680 wait_event_lock_irq(conf->wait_barrier, 681 !conf->nr_pending && conf->barrier < RESYNC_DEPTH, 682 conf->resync_lock, ); 683 684 spin_unlock_irq(&conf->resync_lock); 685 } 686 687 static void lower_barrier(conf_t *conf) 688 { 689 unsigned long flags; 690 BUG_ON(conf->barrier <= 0); 691 spin_lock_irqsave(&conf->resync_lock, flags); 692 conf->barrier--; 693 spin_unlock_irqrestore(&conf->resync_lock, flags); 694 wake_up(&conf->wait_barrier); 695 } 696 697 static void wait_barrier(conf_t *conf) 698 { 699 spin_lock_irq(&conf->resync_lock); 700 if (conf->barrier) { 701 conf->nr_waiting++; 702 wait_event_lock_irq(conf->wait_barrier, !conf->barrier, 703 conf->resync_lock, 704 ); 705 conf->nr_waiting--; 706 } 707 conf->nr_pending++; 708 spin_unlock_irq(&conf->resync_lock); 709 } 710 711 static void allow_barrier(conf_t *conf) 712 { 713 unsigned long flags; 714 spin_lock_irqsave(&conf->resync_lock, flags); 715 conf->nr_pending--; 716 spin_unlock_irqrestore(&conf->resync_lock, flags); 717 wake_up(&conf->wait_barrier); 718 } 719 720 static void freeze_array(conf_t *conf) 721 { 722 /* stop syncio and normal IO and wait for everything to 723 * go quite. 724 * We increment barrier and nr_waiting, and then 725 * wait until nr_pending match nr_queued+1 726 * This is called in the context of one normal IO request 727 * that has failed. Thus any sync request that might be pending 728 * will be blocked by nr_pending, and we need to wait for 729 * pending IO requests to complete or be queued for re-try. 730 * Thus the number queued (nr_queued) plus this request (1) 731 * must match the number of pending IOs (nr_pending) before 732 * we continue. 733 */ 734 spin_lock_irq(&conf->resync_lock); 735 conf->barrier++; 736 conf->nr_waiting++; 737 wait_event_lock_irq(conf->wait_barrier, 738 conf->nr_pending == conf->nr_queued+1, 739 conf->resync_lock, 740 flush_pending_writes(conf)); 741 spin_unlock_irq(&conf->resync_lock); 742 } 743 static void unfreeze_array(conf_t *conf) 744 { 745 /* reverse the effect of the freeze */ 746 spin_lock_irq(&conf->resync_lock); 747 conf->barrier--; 748 conf->nr_waiting--; 749 wake_up(&conf->wait_barrier); 750 spin_unlock_irq(&conf->resync_lock); 751 } 752 753 754 /* duplicate the data pages for behind I/O 755 */ 756 static void alloc_behind_pages(struct bio *bio, r1bio_t *r1_bio) 757 { 758 int i; 759 struct bio_vec *bvec; 760 struct bio_vec *bvecs = kzalloc(bio->bi_vcnt * sizeof(struct bio_vec), 761 GFP_NOIO); 762 if (unlikely(!bvecs)) 763 return; 764 765 bio_for_each_segment(bvec, bio, i) { 766 bvecs[i] = *bvec; 767 bvecs[i].bv_page = alloc_page(GFP_NOIO); 768 if (unlikely(!bvecs[i].bv_page)) 769 goto do_sync_io; 770 memcpy(kmap(bvecs[i].bv_page) + bvec->bv_offset, 771 kmap(bvec->bv_page) + bvec->bv_offset, bvec->bv_len); 772 kunmap(bvecs[i].bv_page); 773 kunmap(bvec->bv_page); 774 } 775 r1_bio->behind_bvecs = bvecs; 776 r1_bio->behind_page_count = bio->bi_vcnt; 777 set_bit(R1BIO_BehindIO, &r1_bio->state); 778 return; 779 780 do_sync_io: 781 for (i = 0; i < bio->bi_vcnt; i++) 782 if (bvecs[i].bv_page) 783 put_page(bvecs[i].bv_page); 784 kfree(bvecs); 785 PRINTK("%dB behind alloc failed, doing sync I/O\n", bio->bi_size); 786 } 787 788 static int make_request(mddev_t *mddev, struct bio * bio) 789 { 790 conf_t *conf = mddev->private; 791 mirror_info_t *mirror; 792 r1bio_t *r1_bio; 793 struct bio *read_bio; 794 int i, disks; 795 struct bitmap *bitmap; 796 unsigned long flags; 797 const int rw = bio_data_dir(bio); 798 const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); 799 const unsigned long do_flush_fua = (bio->bi_rw & (REQ_FLUSH | REQ_FUA)); 800 mdk_rdev_t *blocked_rdev; 801 int plugged; 802 int first_clone; 803 int sectors_handled; 804 int max_sectors; 805 806 /* 807 * Register the new request and wait if the reconstruction 808 * thread has put up a bar for new requests. 809 * Continue immediately if no resync is active currently. 810 */ 811 812 md_write_start(mddev, bio); /* wait on superblock update early */ 813 814 if (bio_data_dir(bio) == WRITE && 815 bio->bi_sector + bio->bi_size/512 > mddev->suspend_lo && 816 bio->bi_sector < mddev->suspend_hi) { 817 /* As the suspend_* range is controlled by 818 * userspace, we want an interruptible 819 * wait. 820 */ 821 DEFINE_WAIT(w); 822 for (;;) { 823 flush_signals(current); 824 prepare_to_wait(&conf->wait_barrier, 825 &w, TASK_INTERRUPTIBLE); 826 if (bio->bi_sector + bio->bi_size/512 <= mddev->suspend_lo || 827 bio->bi_sector >= mddev->suspend_hi) 828 break; 829 schedule(); 830 } 831 finish_wait(&conf->wait_barrier, &w); 832 } 833 834 wait_barrier(conf); 835 836 bitmap = mddev->bitmap; 837 838 /* 839 * make_request() can abort the operation when READA is being 840 * used and no empty request is available. 841 * 842 */ 843 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); 844 845 r1_bio->master_bio = bio; 846 r1_bio->sectors = bio->bi_size >> 9; 847 r1_bio->state = 0; 848 r1_bio->mddev = mddev; 849 r1_bio->sector = bio->bi_sector; 850 851 /* We might need to issue multiple reads to different 852 * devices if there are bad blocks around, so we keep 853 * track of the number of reads in bio->bi_phys_segments. 854 * If this is 0, there is only one r1_bio and no locking 855 * will be needed when requests complete. If it is 856 * non-zero, then it is the number of not-completed requests. 857 */ 858 bio->bi_phys_segments = 0; 859 clear_bit(BIO_SEG_VALID, &bio->bi_flags); 860 861 if (rw == READ) { 862 /* 863 * read balancing logic: 864 */ 865 int rdisk; 866 867 read_again: 868 rdisk = read_balance(conf, r1_bio, &max_sectors); 869 870 if (rdisk < 0) { 871 /* couldn't find anywhere to read from */ 872 raid_end_bio_io(r1_bio); 873 return 0; 874 } 875 mirror = conf->mirrors + rdisk; 876 877 if (test_bit(WriteMostly, &mirror->rdev->flags) && 878 bitmap) { 879 /* Reading from a write-mostly device must 880 * take care not to over-take any writes 881 * that are 'behind' 882 */ 883 wait_event(bitmap->behind_wait, 884 atomic_read(&bitmap->behind_writes) == 0); 885 } 886 r1_bio->read_disk = rdisk; 887 888 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); 889 md_trim_bio(read_bio, r1_bio->sector - bio->bi_sector, 890 max_sectors); 891 892 r1_bio->bios[rdisk] = read_bio; 893 894 read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset; 895 read_bio->bi_bdev = mirror->rdev->bdev; 896 read_bio->bi_end_io = raid1_end_read_request; 897 read_bio->bi_rw = READ | do_sync; 898 read_bio->bi_private = r1_bio; 899 900 if (max_sectors < r1_bio->sectors) { 901 /* could not read all from this device, so we will 902 * need another r1_bio. 903 */ 904 905 sectors_handled = (r1_bio->sector + max_sectors 906 - bio->bi_sector); 907 r1_bio->sectors = max_sectors; 908 spin_lock_irq(&conf->device_lock); 909 if (bio->bi_phys_segments == 0) 910 bio->bi_phys_segments = 2; 911 else 912 bio->bi_phys_segments++; 913 spin_unlock_irq(&conf->device_lock); 914 /* Cannot call generic_make_request directly 915 * as that will be queued in __make_request 916 * and subsequent mempool_alloc might block waiting 917 * for it. So hand bio over to raid1d. 918 */ 919 reschedule_retry(r1_bio); 920 921 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); 922 923 r1_bio->master_bio = bio; 924 r1_bio->sectors = (bio->bi_size >> 9) - sectors_handled; 925 r1_bio->state = 0; 926 r1_bio->mddev = mddev; 927 r1_bio->sector = bio->bi_sector + sectors_handled; 928 goto read_again; 929 } else 930 generic_make_request(read_bio); 931 return 0; 932 } 933 934 /* 935 * WRITE: 936 */ 937 /* first select target devices under rcu_lock and 938 * inc refcount on their rdev. Record them by setting 939 * bios[x] to bio 940 * If there are known/acknowledged bad blocks on any device on 941 * which we have seen a write error, we want to avoid writing those 942 * blocks. 943 * This potentially requires several writes to write around 944 * the bad blocks. Each set of writes gets it's own r1bio 945 * with a set of bios attached. 946 */ 947 plugged = mddev_check_plugged(mddev); 948 949 disks = conf->raid_disks; 950 retry_write: 951 blocked_rdev = NULL; 952 rcu_read_lock(); 953 max_sectors = r1_bio->sectors; 954 for (i = 0; i < disks; i++) { 955 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); 956 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) { 957 atomic_inc(&rdev->nr_pending); 958 blocked_rdev = rdev; 959 break; 960 } 961 r1_bio->bios[i] = NULL; 962 if (!rdev || test_bit(Faulty, &rdev->flags)) { 963 set_bit(R1BIO_Degraded, &r1_bio->state); 964 continue; 965 } 966 967 atomic_inc(&rdev->nr_pending); 968 if (test_bit(WriteErrorSeen, &rdev->flags)) { 969 sector_t first_bad; 970 int bad_sectors; 971 int is_bad; 972 973 is_bad = is_badblock(rdev, r1_bio->sector, 974 max_sectors, 975 &first_bad, &bad_sectors); 976 if (is_bad < 0) { 977 /* mustn't write here until the bad block is 978 * acknowledged*/ 979 set_bit(BlockedBadBlocks, &rdev->flags); 980 blocked_rdev = rdev; 981 break; 982 } 983 if (is_bad && first_bad <= r1_bio->sector) { 984 /* Cannot write here at all */ 985 bad_sectors -= (r1_bio->sector - first_bad); 986 if (bad_sectors < max_sectors) 987 /* mustn't write more than bad_sectors 988 * to other devices yet 989 */ 990 max_sectors = bad_sectors; 991 rdev_dec_pending(rdev, mddev); 992 /* We don't set R1BIO_Degraded as that 993 * only applies if the disk is 994 * missing, so it might be re-added, 995 * and we want to know to recover this 996 * chunk. 997 * In this case the device is here, 998 * and the fact that this chunk is not 999 * in-sync is recorded in the bad 1000 * block log 1001 */ 1002 continue; 1003 } 1004 if (is_bad) { 1005 int good_sectors = first_bad - r1_bio->sector; 1006 if (good_sectors < max_sectors) 1007 max_sectors = good_sectors; 1008 } 1009 } 1010 r1_bio->bios[i] = bio; 1011 } 1012 rcu_read_unlock(); 1013 1014 if (unlikely(blocked_rdev)) { 1015 /* Wait for this device to become unblocked */ 1016 int j; 1017 1018 for (j = 0; j < i; j++) 1019 if (r1_bio->bios[j]) 1020 rdev_dec_pending(conf->mirrors[j].rdev, mddev); 1021 r1_bio->state = 0; 1022 allow_barrier(conf); 1023 md_wait_for_blocked_rdev(blocked_rdev, mddev); 1024 wait_barrier(conf); 1025 goto retry_write; 1026 } 1027 1028 if (max_sectors < r1_bio->sectors) { 1029 /* We are splitting this write into multiple parts, so 1030 * we need to prepare for allocating another r1_bio. 1031 */ 1032 r1_bio->sectors = max_sectors; 1033 spin_lock_irq(&conf->device_lock); 1034 if (bio->bi_phys_segments == 0) 1035 bio->bi_phys_segments = 2; 1036 else 1037 bio->bi_phys_segments++; 1038 spin_unlock_irq(&conf->device_lock); 1039 } 1040 sectors_handled = r1_bio->sector + max_sectors - bio->bi_sector; 1041 1042 atomic_set(&r1_bio->remaining, 1); 1043 atomic_set(&r1_bio->behind_remaining, 0); 1044 1045 first_clone = 1; 1046 for (i = 0; i < disks; i++) { 1047 struct bio *mbio; 1048 if (!r1_bio->bios[i]) 1049 continue; 1050 1051 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); 1052 md_trim_bio(mbio, r1_bio->sector - bio->bi_sector, max_sectors); 1053 1054 if (first_clone) { 1055 /* do behind I/O ? 1056 * Not if there are too many, or cannot 1057 * allocate memory, or a reader on WriteMostly 1058 * is waiting for behind writes to flush */ 1059 if (bitmap && 1060 (atomic_read(&bitmap->behind_writes) 1061 < mddev->bitmap_info.max_write_behind) && 1062 !waitqueue_active(&bitmap->behind_wait)) 1063 alloc_behind_pages(mbio, r1_bio); 1064 1065 bitmap_startwrite(bitmap, r1_bio->sector, 1066 r1_bio->sectors, 1067 test_bit(R1BIO_BehindIO, 1068 &r1_bio->state)); 1069 first_clone = 0; 1070 } 1071 if (r1_bio->behind_bvecs) { 1072 struct bio_vec *bvec; 1073 int j; 1074 1075 /* Yes, I really want the '__' version so that 1076 * we clear any unused pointer in the io_vec, rather 1077 * than leave them unchanged. This is important 1078 * because when we come to free the pages, we won't 1079 * know the original bi_idx, so we just free 1080 * them all 1081 */ 1082 __bio_for_each_segment(bvec, mbio, j, 0) 1083 bvec->bv_page = r1_bio->behind_bvecs[j].bv_page; 1084 if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags)) 1085 atomic_inc(&r1_bio->behind_remaining); 1086 } 1087 1088 r1_bio->bios[i] = mbio; 1089 1090 mbio->bi_sector = (r1_bio->sector + 1091 conf->mirrors[i].rdev->data_offset); 1092 mbio->bi_bdev = conf->mirrors[i].rdev->bdev; 1093 mbio->bi_end_io = raid1_end_write_request; 1094 mbio->bi_rw = WRITE | do_flush_fua | do_sync; 1095 mbio->bi_private = r1_bio; 1096 1097 atomic_inc(&r1_bio->remaining); 1098 spin_lock_irqsave(&conf->device_lock, flags); 1099 bio_list_add(&conf->pending_bio_list, mbio); 1100 spin_unlock_irqrestore(&conf->device_lock, flags); 1101 } 1102 /* Mustn't call r1_bio_write_done before this next test, 1103 * as it could result in the bio being freed. 1104 */ 1105 if (sectors_handled < (bio->bi_size >> 9)) { 1106 r1_bio_write_done(r1_bio); 1107 /* We need another r1_bio. It has already been counted 1108 * in bio->bi_phys_segments 1109 */ 1110 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); 1111 r1_bio->master_bio = bio; 1112 r1_bio->sectors = (bio->bi_size >> 9) - sectors_handled; 1113 r1_bio->state = 0; 1114 r1_bio->mddev = mddev; 1115 r1_bio->sector = bio->bi_sector + sectors_handled; 1116 goto retry_write; 1117 } 1118 1119 r1_bio_write_done(r1_bio); 1120 1121 /* In case raid1d snuck in to freeze_array */ 1122 wake_up(&conf->wait_barrier); 1123 1124 if (do_sync || !bitmap || !plugged) 1125 md_wakeup_thread(mddev->thread); 1126 1127 return 0; 1128 } 1129 1130 static void status(struct seq_file *seq, mddev_t *mddev) 1131 { 1132 conf_t *conf = mddev->private; 1133 int i; 1134 1135 seq_printf(seq, " [%d/%d] [", conf->raid_disks, 1136 conf->raid_disks - mddev->degraded); 1137 rcu_read_lock(); 1138 for (i = 0; i < conf->raid_disks; i++) { 1139 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); 1140 seq_printf(seq, "%s", 1141 rdev && test_bit(In_sync, &rdev->flags) ? "U" : "_"); 1142 } 1143 rcu_read_unlock(); 1144 seq_printf(seq, "]"); 1145 } 1146 1147 1148 static void error(mddev_t *mddev, mdk_rdev_t *rdev) 1149 { 1150 char b[BDEVNAME_SIZE]; 1151 conf_t *conf = mddev->private; 1152 1153 /* 1154 * If it is not operational, then we have already marked it as dead 1155 * else if it is the last working disks, ignore the error, let the 1156 * next level up know. 1157 * else mark the drive as failed 1158 */ 1159 if (test_bit(In_sync, &rdev->flags) 1160 && (conf->raid_disks - mddev->degraded) == 1) { 1161 /* 1162 * Don't fail the drive, act as though we were just a 1163 * normal single drive. 1164 * However don't try a recovery from this drive as 1165 * it is very likely to fail. 1166 */ 1167 conf->recovery_disabled = mddev->recovery_disabled; 1168 return; 1169 } 1170 set_bit(Blocked, &rdev->flags); 1171 if (test_and_clear_bit(In_sync, &rdev->flags)) { 1172 unsigned long flags; 1173 spin_lock_irqsave(&conf->device_lock, flags); 1174 mddev->degraded++; 1175 set_bit(Faulty, &rdev->flags); 1176 spin_unlock_irqrestore(&conf->device_lock, flags); 1177 /* 1178 * if recovery is running, make sure it aborts. 1179 */ 1180 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 1181 } else 1182 set_bit(Faulty, &rdev->flags); 1183 set_bit(MD_CHANGE_DEVS, &mddev->flags); 1184 printk(KERN_ALERT 1185 "md/raid1:%s: Disk failure on %s, disabling device.\n" 1186 "md/raid1:%s: Operation continuing on %d devices.\n", 1187 mdname(mddev), bdevname(rdev->bdev, b), 1188 mdname(mddev), conf->raid_disks - mddev->degraded); 1189 } 1190 1191 static void print_conf(conf_t *conf) 1192 { 1193 int i; 1194 1195 printk(KERN_DEBUG "RAID1 conf printout:\n"); 1196 if (!conf) { 1197 printk(KERN_DEBUG "(!conf)\n"); 1198 return; 1199 } 1200 printk(KERN_DEBUG " --- wd:%d rd:%d\n", conf->raid_disks - conf->mddev->degraded, 1201 conf->raid_disks); 1202 1203 rcu_read_lock(); 1204 for (i = 0; i < conf->raid_disks; i++) { 1205 char b[BDEVNAME_SIZE]; 1206 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev); 1207 if (rdev) 1208 printk(KERN_DEBUG " disk %d, wo:%d, o:%d, dev:%s\n", 1209 i, !test_bit(In_sync, &rdev->flags), 1210 !test_bit(Faulty, &rdev->flags), 1211 bdevname(rdev->bdev,b)); 1212 } 1213 rcu_read_unlock(); 1214 } 1215 1216 static void close_sync(conf_t *conf) 1217 { 1218 wait_barrier(conf); 1219 allow_barrier(conf); 1220 1221 mempool_destroy(conf->r1buf_pool); 1222 conf->r1buf_pool = NULL; 1223 } 1224 1225 static int raid1_spare_active(mddev_t *mddev) 1226 { 1227 int i; 1228 conf_t *conf = mddev->private; 1229 int count = 0; 1230 unsigned long flags; 1231 1232 /* 1233 * Find all failed disks within the RAID1 configuration 1234 * and mark them readable. 1235 * Called under mddev lock, so rcu protection not needed. 1236 */ 1237 for (i = 0; i < conf->raid_disks; i++) { 1238 mdk_rdev_t *rdev = conf->mirrors[i].rdev; 1239 if (rdev 1240 && !test_bit(Faulty, &rdev->flags) 1241 && !test_and_set_bit(In_sync, &rdev->flags)) { 1242 count++; 1243 sysfs_notify_dirent_safe(rdev->sysfs_state); 1244 } 1245 } 1246 spin_lock_irqsave(&conf->device_lock, flags); 1247 mddev->degraded -= count; 1248 spin_unlock_irqrestore(&conf->device_lock, flags); 1249 1250 print_conf(conf); 1251 return count; 1252 } 1253 1254 1255 static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) 1256 { 1257 conf_t *conf = mddev->private; 1258 int err = -EEXIST; 1259 int mirror = 0; 1260 mirror_info_t *p; 1261 int first = 0; 1262 int last = mddev->raid_disks - 1; 1263 1264 if (mddev->recovery_disabled == conf->recovery_disabled) 1265 return -EBUSY; 1266 1267 if (rdev->raid_disk >= 0) 1268 first = last = rdev->raid_disk; 1269 1270 for (mirror = first; mirror <= last; mirror++) 1271 if ( !(p=conf->mirrors+mirror)->rdev) { 1272 1273 disk_stack_limits(mddev->gendisk, rdev->bdev, 1274 rdev->data_offset << 9); 1275 /* as we don't honour merge_bvec_fn, we must 1276 * never risk violating it, so limit 1277 * ->max_segments to one lying with a single 1278 * page, as a one page request is never in 1279 * violation. 1280 */ 1281 if (rdev->bdev->bd_disk->queue->merge_bvec_fn) { 1282 blk_queue_max_segments(mddev->queue, 1); 1283 blk_queue_segment_boundary(mddev->queue, 1284 PAGE_CACHE_SIZE - 1); 1285 } 1286 1287 p->head_position = 0; 1288 rdev->raid_disk = mirror; 1289 err = 0; 1290 /* As all devices are equivalent, we don't need a full recovery 1291 * if this was recently any drive of the array 1292 */ 1293 if (rdev->saved_raid_disk < 0) 1294 conf->fullsync = 1; 1295 rcu_assign_pointer(p->rdev, rdev); 1296 break; 1297 } 1298 md_integrity_add_rdev(rdev, mddev); 1299 print_conf(conf); 1300 return err; 1301 } 1302 1303 static int raid1_remove_disk(mddev_t *mddev, int number) 1304 { 1305 conf_t *conf = mddev->private; 1306 int err = 0; 1307 mdk_rdev_t *rdev; 1308 mirror_info_t *p = conf->mirrors+ number; 1309 1310 print_conf(conf); 1311 rdev = p->rdev; 1312 if (rdev) { 1313 if (test_bit(In_sync, &rdev->flags) || 1314 atomic_read(&rdev->nr_pending)) { 1315 err = -EBUSY; 1316 goto abort; 1317 } 1318 /* Only remove non-faulty devices if recovery 1319 * is not possible. 1320 */ 1321 if (!test_bit(Faulty, &rdev->flags) && 1322 mddev->recovery_disabled != conf->recovery_disabled && 1323 mddev->degraded < conf->raid_disks) { 1324 err = -EBUSY; 1325 goto abort; 1326 } 1327 p->rdev = NULL; 1328 synchronize_rcu(); 1329 if (atomic_read(&rdev->nr_pending)) { 1330 /* lost the race, try later */ 1331 err = -EBUSY; 1332 p->rdev = rdev; 1333 goto abort; 1334 } 1335 err = md_integrity_register(mddev); 1336 } 1337 abort: 1338 1339 print_conf(conf); 1340 return err; 1341 } 1342 1343 1344 static void end_sync_read(struct bio *bio, int error) 1345 { 1346 r1bio_t *r1_bio = bio->bi_private; 1347 int i; 1348 1349 for (i=r1_bio->mddev->raid_disks; i--; ) 1350 if (r1_bio->bios[i] == bio) 1351 break; 1352 BUG_ON(i < 0); 1353 update_head_pos(i, r1_bio); 1354 /* 1355 * we have read a block, now it needs to be re-written, 1356 * or re-read if the read failed. 1357 * We don't do much here, just schedule handling by raid1d 1358 */ 1359 if (test_bit(BIO_UPTODATE, &bio->bi_flags)) 1360 set_bit(R1BIO_Uptodate, &r1_bio->state); 1361 1362 if (atomic_dec_and_test(&r1_bio->remaining)) 1363 reschedule_retry(r1_bio); 1364 } 1365 1366 static void end_sync_write(struct bio *bio, int error) 1367 { 1368 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 1369 r1bio_t *r1_bio = bio->bi_private; 1370 mddev_t *mddev = r1_bio->mddev; 1371 conf_t *conf = mddev->private; 1372 int i; 1373 int mirror=0; 1374 sector_t first_bad; 1375 int bad_sectors; 1376 1377 for (i = 0; i < conf->raid_disks; i++) 1378 if (r1_bio->bios[i] == bio) { 1379 mirror = i; 1380 break; 1381 } 1382 if (!uptodate) { 1383 sector_t sync_blocks = 0; 1384 sector_t s = r1_bio->sector; 1385 long sectors_to_go = r1_bio->sectors; 1386 /* make sure these bits doesn't get cleared. */ 1387 do { 1388 bitmap_end_sync(mddev->bitmap, s, 1389 &sync_blocks, 1); 1390 s += sync_blocks; 1391 sectors_to_go -= sync_blocks; 1392 } while (sectors_to_go > 0); 1393 set_bit(WriteErrorSeen, 1394 &conf->mirrors[mirror].rdev->flags); 1395 set_bit(R1BIO_WriteError, &r1_bio->state); 1396 } else if (is_badblock(conf->mirrors[mirror].rdev, 1397 r1_bio->sector, 1398 r1_bio->sectors, 1399 &first_bad, &bad_sectors) && 1400 !is_badblock(conf->mirrors[r1_bio->read_disk].rdev, 1401 r1_bio->sector, 1402 r1_bio->sectors, 1403 &first_bad, &bad_sectors) 1404 ) 1405 set_bit(R1BIO_MadeGood, &r1_bio->state); 1406 1407 update_head_pos(mirror, r1_bio); 1408 1409 if (atomic_dec_and_test(&r1_bio->remaining)) { 1410 int s = r1_bio->sectors; 1411 if (test_bit(R1BIO_MadeGood, &r1_bio->state) || 1412 test_bit(R1BIO_WriteError, &r1_bio->state)) 1413 reschedule_retry(r1_bio); 1414 else { 1415 put_buf(r1_bio); 1416 md_done_sync(mddev, s, uptodate); 1417 } 1418 } 1419 } 1420 1421 static int r1_sync_page_io(mdk_rdev_t *rdev, sector_t sector, 1422 int sectors, struct page *page, int rw) 1423 { 1424 if (sync_page_io(rdev, sector, sectors << 9, page, rw, false)) 1425 /* success */ 1426 return 1; 1427 if (rw == WRITE) 1428 set_bit(WriteErrorSeen, &rdev->flags); 1429 /* need to record an error - either for the block or the device */ 1430 if (!rdev_set_badblocks(rdev, sector, sectors, 0)) 1431 md_error(rdev->mddev, rdev); 1432 return 0; 1433 } 1434 1435 static int fix_sync_read_error(r1bio_t *r1_bio) 1436 { 1437 /* Try some synchronous reads of other devices to get 1438 * good data, much like with normal read errors. Only 1439 * read into the pages we already have so we don't 1440 * need to re-issue the read request. 1441 * We don't need to freeze the array, because being in an 1442 * active sync request, there is no normal IO, and 1443 * no overlapping syncs. 1444 * We don't need to check is_badblock() again as we 1445 * made sure that anything with a bad block in range 1446 * will have bi_end_io clear. 1447 */ 1448 mddev_t *mddev = r1_bio->mddev; 1449 conf_t *conf = mddev->private; 1450 struct bio *bio = r1_bio->bios[r1_bio->read_disk]; 1451 sector_t sect = r1_bio->sector; 1452 int sectors = r1_bio->sectors; 1453 int idx = 0; 1454 1455 while(sectors) { 1456 int s = sectors; 1457 int d = r1_bio->read_disk; 1458 int success = 0; 1459 mdk_rdev_t *rdev; 1460 int start; 1461 1462 if (s > (PAGE_SIZE>>9)) 1463 s = PAGE_SIZE >> 9; 1464 do { 1465 if (r1_bio->bios[d]->bi_end_io == end_sync_read) { 1466 /* No rcu protection needed here devices 1467 * can only be removed when no resync is 1468 * active, and resync is currently active 1469 */ 1470 rdev = conf->mirrors[d].rdev; 1471 if (sync_page_io(rdev, sect, s<<9, 1472 bio->bi_io_vec[idx].bv_page, 1473 READ, false)) { 1474 success = 1; 1475 break; 1476 } 1477 } 1478 d++; 1479 if (d == conf->raid_disks) 1480 d = 0; 1481 } while (!success && d != r1_bio->read_disk); 1482 1483 if (!success) { 1484 char b[BDEVNAME_SIZE]; 1485 int abort = 0; 1486 /* Cannot read from anywhere, this block is lost. 1487 * Record a bad block on each device. If that doesn't 1488 * work just disable and interrupt the recovery. 1489 * Don't fail devices as that won't really help. 1490 */ 1491 printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O read error" 1492 " for block %llu\n", 1493 mdname(mddev), 1494 bdevname(bio->bi_bdev, b), 1495 (unsigned long long)r1_bio->sector); 1496 for (d = 0; d < conf->raid_disks; d++) { 1497 rdev = conf->mirrors[d].rdev; 1498 if (!rdev || test_bit(Faulty, &rdev->flags)) 1499 continue; 1500 if (!rdev_set_badblocks(rdev, sect, s, 0)) 1501 abort = 1; 1502 } 1503 if (abort) { 1504 mddev->recovery_disabled = 1; 1505 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 1506 md_done_sync(mddev, r1_bio->sectors, 0); 1507 put_buf(r1_bio); 1508 return 0; 1509 } 1510 /* Try next page */ 1511 sectors -= s; 1512 sect += s; 1513 idx++; 1514 continue; 1515 } 1516 1517 start = d; 1518 /* write it back and re-read */ 1519 while (d != r1_bio->read_disk) { 1520 if (d == 0) 1521 d = conf->raid_disks; 1522 d--; 1523 if (r1_bio->bios[d]->bi_end_io != end_sync_read) 1524 continue; 1525 rdev = conf->mirrors[d].rdev; 1526 if (r1_sync_page_io(rdev, sect, s, 1527 bio->bi_io_vec[idx].bv_page, 1528 WRITE) == 0) { 1529 r1_bio->bios[d]->bi_end_io = NULL; 1530 rdev_dec_pending(rdev, mddev); 1531 } 1532 } 1533 d = start; 1534 while (d != r1_bio->read_disk) { 1535 if (d == 0) 1536 d = conf->raid_disks; 1537 d--; 1538 if (r1_bio->bios[d]->bi_end_io != end_sync_read) 1539 continue; 1540 rdev = conf->mirrors[d].rdev; 1541 if (r1_sync_page_io(rdev, sect, s, 1542 bio->bi_io_vec[idx].bv_page, 1543 READ) != 0) 1544 atomic_add(s, &rdev->corrected_errors); 1545 } 1546 sectors -= s; 1547 sect += s; 1548 idx ++; 1549 } 1550 set_bit(R1BIO_Uptodate, &r1_bio->state); 1551 set_bit(BIO_UPTODATE, &bio->bi_flags); 1552 return 1; 1553 } 1554 1555 static int process_checks(r1bio_t *r1_bio) 1556 { 1557 /* We have read all readable devices. If we haven't 1558 * got the block, then there is no hope left. 1559 * If we have, then we want to do a comparison 1560 * and skip the write if everything is the same. 1561 * If any blocks failed to read, then we need to 1562 * attempt an over-write 1563 */ 1564 mddev_t *mddev = r1_bio->mddev; 1565 conf_t *conf = mddev->private; 1566 int primary; 1567 int i; 1568 1569 for (primary = 0; primary < conf->raid_disks; primary++) 1570 if (r1_bio->bios[primary]->bi_end_io == end_sync_read && 1571 test_bit(BIO_UPTODATE, &r1_bio->bios[primary]->bi_flags)) { 1572 r1_bio->bios[primary]->bi_end_io = NULL; 1573 rdev_dec_pending(conf->mirrors[primary].rdev, mddev); 1574 break; 1575 } 1576 r1_bio->read_disk = primary; 1577 for (i = 0; i < conf->raid_disks; i++) { 1578 int j; 1579 int vcnt = r1_bio->sectors >> (PAGE_SHIFT- 9); 1580 struct bio *pbio = r1_bio->bios[primary]; 1581 struct bio *sbio = r1_bio->bios[i]; 1582 int size; 1583 1584 if (r1_bio->bios[i]->bi_end_io != end_sync_read) 1585 continue; 1586 1587 if (test_bit(BIO_UPTODATE, &sbio->bi_flags)) { 1588 for (j = vcnt; j-- ; ) { 1589 struct page *p, *s; 1590 p = pbio->bi_io_vec[j].bv_page; 1591 s = sbio->bi_io_vec[j].bv_page; 1592 if (memcmp(page_address(p), 1593 page_address(s), 1594 PAGE_SIZE)) 1595 break; 1596 } 1597 } else 1598 j = 0; 1599 if (j >= 0) 1600 mddev->resync_mismatches += r1_bio->sectors; 1601 if (j < 0 || (test_bit(MD_RECOVERY_CHECK, &mddev->recovery) 1602 && test_bit(BIO_UPTODATE, &sbio->bi_flags))) { 1603 /* No need to write to this device. */ 1604 sbio->bi_end_io = NULL; 1605 rdev_dec_pending(conf->mirrors[i].rdev, mddev); 1606 continue; 1607 } 1608 /* fixup the bio for reuse */ 1609 sbio->bi_vcnt = vcnt; 1610 sbio->bi_size = r1_bio->sectors << 9; 1611 sbio->bi_idx = 0; 1612 sbio->bi_phys_segments = 0; 1613 sbio->bi_flags &= ~(BIO_POOL_MASK - 1); 1614 sbio->bi_flags |= 1 << BIO_UPTODATE; 1615 sbio->bi_next = NULL; 1616 sbio->bi_sector = r1_bio->sector + 1617 conf->mirrors[i].rdev->data_offset; 1618 sbio->bi_bdev = conf->mirrors[i].rdev->bdev; 1619 size = sbio->bi_size; 1620 for (j = 0; j < vcnt ; j++) { 1621 struct bio_vec *bi; 1622 bi = &sbio->bi_io_vec[j]; 1623 bi->bv_offset = 0; 1624 if (size > PAGE_SIZE) 1625 bi->bv_len = PAGE_SIZE; 1626 else 1627 bi->bv_len = size; 1628 size -= PAGE_SIZE; 1629 memcpy(page_address(bi->bv_page), 1630 page_address(pbio->bi_io_vec[j].bv_page), 1631 PAGE_SIZE); 1632 } 1633 } 1634 return 0; 1635 } 1636 1637 static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio) 1638 { 1639 conf_t *conf = mddev->private; 1640 int i; 1641 int disks = conf->raid_disks; 1642 struct bio *bio, *wbio; 1643 1644 bio = r1_bio->bios[r1_bio->read_disk]; 1645 1646 if (!test_bit(R1BIO_Uptodate, &r1_bio->state)) 1647 /* ouch - failed to read all of that. */ 1648 if (!fix_sync_read_error(r1_bio)) 1649 return; 1650 1651 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 1652 if (process_checks(r1_bio) < 0) 1653 return; 1654 /* 1655 * schedule writes 1656 */ 1657 atomic_set(&r1_bio->remaining, 1); 1658 for (i = 0; i < disks ; i++) { 1659 wbio = r1_bio->bios[i]; 1660 if (wbio->bi_end_io == NULL || 1661 (wbio->bi_end_io == end_sync_read && 1662 (i == r1_bio->read_disk || 1663 !test_bit(MD_RECOVERY_SYNC, &mddev->recovery)))) 1664 continue; 1665 1666 wbio->bi_rw = WRITE; 1667 wbio->bi_end_io = end_sync_write; 1668 atomic_inc(&r1_bio->remaining); 1669 md_sync_acct(conf->mirrors[i].rdev->bdev, wbio->bi_size >> 9); 1670 1671 generic_make_request(wbio); 1672 } 1673 1674 if (atomic_dec_and_test(&r1_bio->remaining)) { 1675 /* if we're here, all write(s) have completed, so clean up */ 1676 md_done_sync(mddev, r1_bio->sectors, 1); 1677 put_buf(r1_bio); 1678 } 1679 } 1680 1681 /* 1682 * This is a kernel thread which: 1683 * 1684 * 1. Retries failed read operations on working mirrors. 1685 * 2. Updates the raid superblock when problems encounter. 1686 * 3. Performs writes following reads for array synchronising. 1687 */ 1688 1689 static void fix_read_error(conf_t *conf, int read_disk, 1690 sector_t sect, int sectors) 1691 { 1692 mddev_t *mddev = conf->mddev; 1693 while(sectors) { 1694 int s = sectors; 1695 int d = read_disk; 1696 int success = 0; 1697 int start; 1698 mdk_rdev_t *rdev; 1699 1700 if (s > (PAGE_SIZE>>9)) 1701 s = PAGE_SIZE >> 9; 1702 1703 do { 1704 /* Note: no rcu protection needed here 1705 * as this is synchronous in the raid1d thread 1706 * which is the thread that might remove 1707 * a device. If raid1d ever becomes multi-threaded.... 1708 */ 1709 sector_t first_bad; 1710 int bad_sectors; 1711 1712 rdev = conf->mirrors[d].rdev; 1713 if (rdev && 1714 test_bit(In_sync, &rdev->flags) && 1715 is_badblock(rdev, sect, s, 1716 &first_bad, &bad_sectors) == 0 && 1717 sync_page_io(rdev, sect, s<<9, 1718 conf->tmppage, READ, false)) 1719 success = 1; 1720 else { 1721 d++; 1722 if (d == conf->raid_disks) 1723 d = 0; 1724 } 1725 } while (!success && d != read_disk); 1726 1727 if (!success) { 1728 /* Cannot read from anywhere - mark it bad */ 1729 mdk_rdev_t *rdev = conf->mirrors[read_disk].rdev; 1730 if (!rdev_set_badblocks(rdev, sect, s, 0)) 1731 md_error(mddev, rdev); 1732 break; 1733 } 1734 /* write it back and re-read */ 1735 start = d; 1736 while (d != read_disk) { 1737 if (d==0) 1738 d = conf->raid_disks; 1739 d--; 1740 rdev = conf->mirrors[d].rdev; 1741 if (rdev && 1742 test_bit(In_sync, &rdev->flags)) 1743 r1_sync_page_io(rdev, sect, s, 1744 conf->tmppage, WRITE); 1745 } 1746 d = start; 1747 while (d != read_disk) { 1748 char b[BDEVNAME_SIZE]; 1749 if (d==0) 1750 d = conf->raid_disks; 1751 d--; 1752 rdev = conf->mirrors[d].rdev; 1753 if (rdev && 1754 test_bit(In_sync, &rdev->flags)) { 1755 if (r1_sync_page_io(rdev, sect, s, 1756 conf->tmppage, READ)) { 1757 atomic_add(s, &rdev->corrected_errors); 1758 printk(KERN_INFO 1759 "md/raid1:%s: read error corrected " 1760 "(%d sectors at %llu on %s)\n", 1761 mdname(mddev), s, 1762 (unsigned long long)(sect + 1763 rdev->data_offset), 1764 bdevname(rdev->bdev, b)); 1765 } 1766 } 1767 } 1768 sectors -= s; 1769 sect += s; 1770 } 1771 } 1772 1773 static void bi_complete(struct bio *bio, int error) 1774 { 1775 complete((struct completion *)bio->bi_private); 1776 } 1777 1778 static int submit_bio_wait(int rw, struct bio *bio) 1779 { 1780 struct completion event; 1781 rw |= REQ_SYNC; 1782 1783 init_completion(&event); 1784 bio->bi_private = &event; 1785 bio->bi_end_io = bi_complete; 1786 submit_bio(rw, bio); 1787 wait_for_completion(&event); 1788 1789 return test_bit(BIO_UPTODATE, &bio->bi_flags); 1790 } 1791 1792 static int narrow_write_error(r1bio_t *r1_bio, int i) 1793 { 1794 mddev_t *mddev = r1_bio->mddev; 1795 conf_t *conf = mddev->private; 1796 mdk_rdev_t *rdev = conf->mirrors[i].rdev; 1797 int vcnt, idx; 1798 struct bio_vec *vec; 1799 1800 /* bio has the data to be written to device 'i' where 1801 * we just recently had a write error. 1802 * We repeatedly clone the bio and trim down to one block, 1803 * then try the write. Where the write fails we record 1804 * a bad block. 1805 * It is conceivable that the bio doesn't exactly align with 1806 * blocks. We must handle this somehow. 1807 * 1808 * We currently own a reference on the rdev. 1809 */ 1810 1811 int block_sectors; 1812 sector_t sector; 1813 int sectors; 1814 int sect_to_write = r1_bio->sectors; 1815 int ok = 1; 1816 1817 if (rdev->badblocks.shift < 0) 1818 return 0; 1819 1820 block_sectors = 1 << rdev->badblocks.shift; 1821 sector = r1_bio->sector; 1822 sectors = ((sector + block_sectors) 1823 & ~(sector_t)(block_sectors - 1)) 1824 - sector; 1825 1826 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) { 1827 vcnt = r1_bio->behind_page_count; 1828 vec = r1_bio->behind_bvecs; 1829 idx = 0; 1830 while (vec[idx].bv_page == NULL) 1831 idx++; 1832 } else { 1833 vcnt = r1_bio->master_bio->bi_vcnt; 1834 vec = r1_bio->master_bio->bi_io_vec; 1835 idx = r1_bio->master_bio->bi_idx; 1836 } 1837 while (sect_to_write) { 1838 struct bio *wbio; 1839 if (sectors > sect_to_write) 1840 sectors = sect_to_write; 1841 /* Write at 'sector' for 'sectors'*/ 1842 1843 wbio = bio_alloc_mddev(GFP_NOIO, vcnt, mddev); 1844 memcpy(wbio->bi_io_vec, vec, vcnt * sizeof(struct bio_vec)); 1845 wbio->bi_sector = r1_bio->sector; 1846 wbio->bi_rw = WRITE; 1847 wbio->bi_vcnt = vcnt; 1848 wbio->bi_size = r1_bio->sectors << 9; 1849 wbio->bi_idx = idx; 1850 1851 md_trim_bio(wbio, sector - r1_bio->sector, sectors); 1852 wbio->bi_sector += rdev->data_offset; 1853 wbio->bi_bdev = rdev->bdev; 1854 if (submit_bio_wait(WRITE, wbio) == 0) 1855 /* failure! */ 1856 ok = rdev_set_badblocks(rdev, sector, 1857 sectors, 0) 1858 && ok; 1859 1860 bio_put(wbio); 1861 sect_to_write -= sectors; 1862 sector += sectors; 1863 sectors = block_sectors; 1864 } 1865 return ok; 1866 } 1867 1868 static void handle_sync_write_finished(conf_t *conf, r1bio_t *r1_bio) 1869 { 1870 int m; 1871 int s = r1_bio->sectors; 1872 for (m = 0; m < conf->raid_disks ; m++) { 1873 mdk_rdev_t *rdev = conf->mirrors[m].rdev; 1874 struct bio *bio = r1_bio->bios[m]; 1875 if (bio->bi_end_io == NULL) 1876 continue; 1877 if (test_bit(BIO_UPTODATE, &bio->bi_flags) && 1878 test_bit(R1BIO_MadeGood, &r1_bio->state)) { 1879 rdev_clear_badblocks(rdev, r1_bio->sector, s); 1880 } 1881 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && 1882 test_bit(R1BIO_WriteError, &r1_bio->state)) { 1883 if (!rdev_set_badblocks(rdev, r1_bio->sector, s, 0)) 1884 md_error(conf->mddev, rdev); 1885 } 1886 } 1887 put_buf(r1_bio); 1888 md_done_sync(conf->mddev, s, 1); 1889 } 1890 1891 static void handle_write_finished(conf_t *conf, r1bio_t *r1_bio) 1892 { 1893 int m; 1894 for (m = 0; m < conf->raid_disks ; m++) 1895 if (r1_bio->bios[m] == IO_MADE_GOOD) { 1896 mdk_rdev_t *rdev = conf->mirrors[m].rdev; 1897 rdev_clear_badblocks(rdev, 1898 r1_bio->sector, 1899 r1_bio->sectors); 1900 rdev_dec_pending(rdev, conf->mddev); 1901 } else if (r1_bio->bios[m] != NULL) { 1902 /* This drive got a write error. We need to 1903 * narrow down and record precise write 1904 * errors. 1905 */ 1906 if (!narrow_write_error(r1_bio, m)) { 1907 md_error(conf->mddev, 1908 conf->mirrors[m].rdev); 1909 /* an I/O failed, we can't clear the bitmap */ 1910 set_bit(R1BIO_Degraded, &r1_bio->state); 1911 } 1912 rdev_dec_pending(conf->mirrors[m].rdev, 1913 conf->mddev); 1914 } 1915 if (test_bit(R1BIO_WriteError, &r1_bio->state)) 1916 close_write(r1_bio); 1917 raid_end_bio_io(r1_bio); 1918 } 1919 1920 static void handle_read_error(conf_t *conf, r1bio_t *r1_bio) 1921 { 1922 int disk; 1923 int max_sectors; 1924 mddev_t *mddev = conf->mddev; 1925 struct bio *bio; 1926 char b[BDEVNAME_SIZE]; 1927 mdk_rdev_t *rdev; 1928 1929 clear_bit(R1BIO_ReadError, &r1_bio->state); 1930 /* we got a read error. Maybe the drive is bad. Maybe just 1931 * the block and we can fix it. 1932 * We freeze all other IO, and try reading the block from 1933 * other devices. When we find one, we re-write 1934 * and check it that fixes the read error. 1935 * This is all done synchronously while the array is 1936 * frozen 1937 */ 1938 if (mddev->ro == 0) { 1939 freeze_array(conf); 1940 fix_read_error(conf, r1_bio->read_disk, 1941 r1_bio->sector, r1_bio->sectors); 1942 unfreeze_array(conf); 1943 } else 1944 md_error(mddev, conf->mirrors[r1_bio->read_disk].rdev); 1945 1946 bio = r1_bio->bios[r1_bio->read_disk]; 1947 bdevname(bio->bi_bdev, b); 1948 read_more: 1949 disk = read_balance(conf, r1_bio, &max_sectors); 1950 if (disk == -1) { 1951 printk(KERN_ALERT "md/raid1:%s: %s: unrecoverable I/O" 1952 " read error for block %llu\n", 1953 mdname(mddev), b, (unsigned long long)r1_bio->sector); 1954 raid_end_bio_io(r1_bio); 1955 } else { 1956 const unsigned long do_sync 1957 = r1_bio->master_bio->bi_rw & REQ_SYNC; 1958 if (bio) { 1959 r1_bio->bios[r1_bio->read_disk] = 1960 mddev->ro ? IO_BLOCKED : NULL; 1961 bio_put(bio); 1962 } 1963 r1_bio->read_disk = disk; 1964 bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev); 1965 md_trim_bio(bio, r1_bio->sector - bio->bi_sector, max_sectors); 1966 r1_bio->bios[r1_bio->read_disk] = bio; 1967 rdev = conf->mirrors[disk].rdev; 1968 printk_ratelimited(KERN_ERR 1969 "md/raid1:%s: redirecting sector %llu" 1970 " to other mirror: %s\n", 1971 mdname(mddev), 1972 (unsigned long long)r1_bio->sector, 1973 bdevname(rdev->bdev, b)); 1974 bio->bi_sector = r1_bio->sector + rdev->data_offset; 1975 bio->bi_bdev = rdev->bdev; 1976 bio->bi_end_io = raid1_end_read_request; 1977 bio->bi_rw = READ | do_sync; 1978 bio->bi_private = r1_bio; 1979 if (max_sectors < r1_bio->sectors) { 1980 /* Drat - have to split this up more */ 1981 struct bio *mbio = r1_bio->master_bio; 1982 int sectors_handled = (r1_bio->sector + max_sectors 1983 - mbio->bi_sector); 1984 r1_bio->sectors = max_sectors; 1985 spin_lock_irq(&conf->device_lock); 1986 if (mbio->bi_phys_segments == 0) 1987 mbio->bi_phys_segments = 2; 1988 else 1989 mbio->bi_phys_segments++; 1990 spin_unlock_irq(&conf->device_lock); 1991 generic_make_request(bio); 1992 bio = NULL; 1993 1994 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); 1995 1996 r1_bio->master_bio = mbio; 1997 r1_bio->sectors = (mbio->bi_size >> 9) 1998 - sectors_handled; 1999 r1_bio->state = 0; 2000 set_bit(R1BIO_ReadError, &r1_bio->state); 2001 r1_bio->mddev = mddev; 2002 r1_bio->sector = mbio->bi_sector + sectors_handled; 2003 2004 goto read_more; 2005 } else 2006 generic_make_request(bio); 2007 } 2008 } 2009 2010 static void raid1d(mddev_t *mddev) 2011 { 2012 r1bio_t *r1_bio; 2013 unsigned long flags; 2014 conf_t *conf = mddev->private; 2015 struct list_head *head = &conf->retry_list; 2016 struct blk_plug plug; 2017 2018 md_check_recovery(mddev); 2019 2020 blk_start_plug(&plug); 2021 for (;;) { 2022 2023 if (atomic_read(&mddev->plug_cnt) == 0) 2024 flush_pending_writes(conf); 2025 2026 spin_lock_irqsave(&conf->device_lock, flags); 2027 if (list_empty(head)) { 2028 spin_unlock_irqrestore(&conf->device_lock, flags); 2029 break; 2030 } 2031 r1_bio = list_entry(head->prev, r1bio_t, retry_list); 2032 list_del(head->prev); 2033 conf->nr_queued--; 2034 spin_unlock_irqrestore(&conf->device_lock, flags); 2035 2036 mddev = r1_bio->mddev; 2037 conf = mddev->private; 2038 if (test_bit(R1BIO_IsSync, &r1_bio->state)) { 2039 if (test_bit(R1BIO_MadeGood, &r1_bio->state) || 2040 test_bit(R1BIO_WriteError, &r1_bio->state)) 2041 handle_sync_write_finished(conf, r1_bio); 2042 else 2043 sync_request_write(mddev, r1_bio); 2044 } else if (test_bit(R1BIO_MadeGood, &r1_bio->state) || 2045 test_bit(R1BIO_WriteError, &r1_bio->state)) 2046 handle_write_finished(conf, r1_bio); 2047 else if (test_bit(R1BIO_ReadError, &r1_bio->state)) 2048 handle_read_error(conf, r1_bio); 2049 else 2050 /* just a partial read to be scheduled from separate 2051 * context 2052 */ 2053 generic_make_request(r1_bio->bios[r1_bio->read_disk]); 2054 2055 cond_resched(); 2056 if (mddev->flags & ~(1<<MD_CHANGE_PENDING)) 2057 md_check_recovery(mddev); 2058 } 2059 blk_finish_plug(&plug); 2060 } 2061 2062 2063 static int init_resync(conf_t *conf) 2064 { 2065 int buffs; 2066 2067 buffs = RESYNC_WINDOW / RESYNC_BLOCK_SIZE; 2068 BUG_ON(conf->r1buf_pool); 2069 conf->r1buf_pool = mempool_create(buffs, r1buf_pool_alloc, r1buf_pool_free, 2070 conf->poolinfo); 2071 if (!conf->r1buf_pool) 2072 return -ENOMEM; 2073 conf->next_resync = 0; 2074 return 0; 2075 } 2076 2077 /* 2078 * perform a "sync" on one "block" 2079 * 2080 * We need to make sure that no normal I/O request - particularly write 2081 * requests - conflict with active sync requests. 2082 * 2083 * This is achieved by tracking pending requests and a 'barrier' concept 2084 * that can be installed to exclude normal IO requests. 2085 */ 2086 2087 static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster) 2088 { 2089 conf_t *conf = mddev->private; 2090 r1bio_t *r1_bio; 2091 struct bio *bio; 2092 sector_t max_sector, nr_sectors; 2093 int disk = -1; 2094 int i; 2095 int wonly = -1; 2096 int write_targets = 0, read_targets = 0; 2097 sector_t sync_blocks; 2098 int still_degraded = 0; 2099 int good_sectors = RESYNC_SECTORS; 2100 int min_bad = 0; /* number of sectors that are bad in all devices */ 2101 2102 if (!conf->r1buf_pool) 2103 if (init_resync(conf)) 2104 return 0; 2105 2106 max_sector = mddev->dev_sectors; 2107 if (sector_nr >= max_sector) { 2108 /* If we aborted, we need to abort the 2109 * sync on the 'current' bitmap chunk (there will 2110 * only be one in raid1 resync. 2111 * We can find the current addess in mddev->curr_resync 2112 */ 2113 if (mddev->curr_resync < max_sector) /* aborted */ 2114 bitmap_end_sync(mddev->bitmap, mddev->curr_resync, 2115 &sync_blocks, 1); 2116 else /* completed sync */ 2117 conf->fullsync = 0; 2118 2119 bitmap_close_sync(mddev->bitmap); 2120 close_sync(conf); 2121 return 0; 2122 } 2123 2124 if (mddev->bitmap == NULL && 2125 mddev->recovery_cp == MaxSector && 2126 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && 2127 conf->fullsync == 0) { 2128 *skipped = 1; 2129 return max_sector - sector_nr; 2130 } 2131 /* before building a request, check if we can skip these blocks.. 2132 * This call the bitmap_start_sync doesn't actually record anything 2133 */ 2134 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && 2135 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 2136 /* We can skip this block, and probably several more */ 2137 *skipped = 1; 2138 return sync_blocks; 2139 } 2140 /* 2141 * If there is non-resync activity waiting for a turn, 2142 * and resync is going fast enough, 2143 * then let it though before starting on this new sync request. 2144 */ 2145 if (!go_faster && conf->nr_waiting) 2146 msleep_interruptible(1000); 2147 2148 bitmap_cond_end_sync(mddev->bitmap, sector_nr); 2149 r1_bio = mempool_alloc(conf->r1buf_pool, GFP_NOIO); 2150 raise_barrier(conf); 2151 2152 conf->next_resync = sector_nr; 2153 2154 rcu_read_lock(); 2155 /* 2156 * If we get a correctably read error during resync or recovery, 2157 * we might want to read from a different device. So we 2158 * flag all drives that could conceivably be read from for READ, 2159 * and any others (which will be non-In_sync devices) for WRITE. 2160 * If a read fails, we try reading from something else for which READ 2161 * is OK. 2162 */ 2163 2164 r1_bio->mddev = mddev; 2165 r1_bio->sector = sector_nr; 2166 r1_bio->state = 0; 2167 set_bit(R1BIO_IsSync, &r1_bio->state); 2168 2169 for (i=0; i < conf->raid_disks; i++) { 2170 mdk_rdev_t *rdev; 2171 bio = r1_bio->bios[i]; 2172 2173 /* take from bio_init */ 2174 bio->bi_next = NULL; 2175 bio->bi_flags &= ~(BIO_POOL_MASK-1); 2176 bio->bi_flags |= 1 << BIO_UPTODATE; 2177 bio->bi_comp_cpu = -1; 2178 bio->bi_rw = READ; 2179 bio->bi_vcnt = 0; 2180 bio->bi_idx = 0; 2181 bio->bi_phys_segments = 0; 2182 bio->bi_size = 0; 2183 bio->bi_end_io = NULL; 2184 bio->bi_private = NULL; 2185 2186 rdev = rcu_dereference(conf->mirrors[i].rdev); 2187 if (rdev == NULL || 2188 test_bit(Faulty, &rdev->flags)) { 2189 still_degraded = 1; 2190 } else if (!test_bit(In_sync, &rdev->flags)) { 2191 bio->bi_rw = WRITE; 2192 bio->bi_end_io = end_sync_write; 2193 write_targets ++; 2194 } else { 2195 /* may need to read from here */ 2196 sector_t first_bad = MaxSector; 2197 int bad_sectors; 2198 2199 if (is_badblock(rdev, sector_nr, good_sectors, 2200 &first_bad, &bad_sectors)) { 2201 if (first_bad > sector_nr) 2202 good_sectors = first_bad - sector_nr; 2203 else { 2204 bad_sectors -= (sector_nr - first_bad); 2205 if (min_bad == 0 || 2206 min_bad > bad_sectors) 2207 min_bad = bad_sectors; 2208 } 2209 } 2210 if (sector_nr < first_bad) { 2211 if (test_bit(WriteMostly, &rdev->flags)) { 2212 if (wonly < 0) 2213 wonly = i; 2214 } else { 2215 if (disk < 0) 2216 disk = i; 2217 } 2218 bio->bi_rw = READ; 2219 bio->bi_end_io = end_sync_read; 2220 read_targets++; 2221 } 2222 } 2223 if (bio->bi_end_io) { 2224 atomic_inc(&rdev->nr_pending); 2225 bio->bi_sector = sector_nr + rdev->data_offset; 2226 bio->bi_bdev = rdev->bdev; 2227 bio->bi_private = r1_bio; 2228 } 2229 } 2230 rcu_read_unlock(); 2231 if (disk < 0) 2232 disk = wonly; 2233 r1_bio->read_disk = disk; 2234 2235 if (read_targets == 0 && min_bad > 0) { 2236 /* These sectors are bad on all InSync devices, so we 2237 * need to mark them bad on all write targets 2238 */ 2239 int ok = 1; 2240 for (i = 0 ; i < conf->raid_disks ; i++) 2241 if (r1_bio->bios[i]->bi_end_io == end_sync_write) { 2242 mdk_rdev_t *rdev = 2243 rcu_dereference(conf->mirrors[i].rdev); 2244 ok = rdev_set_badblocks(rdev, sector_nr, 2245 min_bad, 0 2246 ) && ok; 2247 } 2248 set_bit(MD_CHANGE_DEVS, &mddev->flags); 2249 *skipped = 1; 2250 put_buf(r1_bio); 2251 2252 if (!ok) { 2253 /* Cannot record the badblocks, so need to 2254 * abort the resync. 2255 * If there are multiple read targets, could just 2256 * fail the really bad ones ??? 2257 */ 2258 conf->recovery_disabled = mddev->recovery_disabled; 2259 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 2260 return 0; 2261 } else 2262 return min_bad; 2263 2264 } 2265 if (min_bad > 0 && min_bad < good_sectors) { 2266 /* only resync enough to reach the next bad->good 2267 * transition */ 2268 good_sectors = min_bad; 2269 } 2270 2271 if (test_bit(MD_RECOVERY_SYNC, &mddev->recovery) && read_targets > 0) 2272 /* extra read targets are also write targets */ 2273 write_targets += read_targets-1; 2274 2275 if (write_targets == 0 || read_targets == 0) { 2276 /* There is nowhere to write, so all non-sync 2277 * drives must be failed - so we are finished 2278 */ 2279 sector_t rv = max_sector - sector_nr; 2280 *skipped = 1; 2281 put_buf(r1_bio); 2282 return rv; 2283 } 2284 2285 if (max_sector > mddev->resync_max) 2286 max_sector = mddev->resync_max; /* Don't do IO beyond here */ 2287 if (max_sector > sector_nr + good_sectors) 2288 max_sector = sector_nr + good_sectors; 2289 nr_sectors = 0; 2290 sync_blocks = 0; 2291 do { 2292 struct page *page; 2293 int len = PAGE_SIZE; 2294 if (sector_nr + (len>>9) > max_sector) 2295 len = (max_sector - sector_nr) << 9; 2296 if (len == 0) 2297 break; 2298 if (sync_blocks == 0) { 2299 if (!bitmap_start_sync(mddev->bitmap, sector_nr, 2300 &sync_blocks, still_degraded) && 2301 !conf->fullsync && 2302 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) 2303 break; 2304 BUG_ON(sync_blocks < (PAGE_SIZE>>9)); 2305 if ((len >> 9) > sync_blocks) 2306 len = sync_blocks<<9; 2307 } 2308 2309 for (i=0 ; i < conf->raid_disks; i++) { 2310 bio = r1_bio->bios[i]; 2311 if (bio->bi_end_io) { 2312 page = bio->bi_io_vec[bio->bi_vcnt].bv_page; 2313 if (bio_add_page(bio, page, len, 0) == 0) { 2314 /* stop here */ 2315 bio->bi_io_vec[bio->bi_vcnt].bv_page = page; 2316 while (i > 0) { 2317 i--; 2318 bio = r1_bio->bios[i]; 2319 if (bio->bi_end_io==NULL) 2320 continue; 2321 /* remove last page from this bio */ 2322 bio->bi_vcnt--; 2323 bio->bi_size -= len; 2324 bio->bi_flags &= ~(1<< BIO_SEG_VALID); 2325 } 2326 goto bio_full; 2327 } 2328 } 2329 } 2330 nr_sectors += len>>9; 2331 sector_nr += len>>9; 2332 sync_blocks -= (len>>9); 2333 } while (r1_bio->bios[disk]->bi_vcnt < RESYNC_PAGES); 2334 bio_full: 2335 r1_bio->sectors = nr_sectors; 2336 2337 /* For a user-requested sync, we read all readable devices and do a 2338 * compare 2339 */ 2340 if (test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { 2341 atomic_set(&r1_bio->remaining, read_targets); 2342 for (i=0; i<conf->raid_disks; i++) { 2343 bio = r1_bio->bios[i]; 2344 if (bio->bi_end_io == end_sync_read) { 2345 md_sync_acct(bio->bi_bdev, nr_sectors); 2346 generic_make_request(bio); 2347 } 2348 } 2349 } else { 2350 atomic_set(&r1_bio->remaining, 1); 2351 bio = r1_bio->bios[r1_bio->read_disk]; 2352 md_sync_acct(bio->bi_bdev, nr_sectors); 2353 generic_make_request(bio); 2354 2355 } 2356 return nr_sectors; 2357 } 2358 2359 static sector_t raid1_size(mddev_t *mddev, sector_t sectors, int raid_disks) 2360 { 2361 if (sectors) 2362 return sectors; 2363 2364 return mddev->dev_sectors; 2365 } 2366 2367 static conf_t *setup_conf(mddev_t *mddev) 2368 { 2369 conf_t *conf; 2370 int i; 2371 mirror_info_t *disk; 2372 mdk_rdev_t *rdev; 2373 int err = -ENOMEM; 2374 2375 conf = kzalloc(sizeof(conf_t), GFP_KERNEL); 2376 if (!conf) 2377 goto abort; 2378 2379 conf->mirrors = kzalloc(sizeof(struct mirror_info)*mddev->raid_disks, 2380 GFP_KERNEL); 2381 if (!conf->mirrors) 2382 goto abort; 2383 2384 conf->tmppage = alloc_page(GFP_KERNEL); 2385 if (!conf->tmppage) 2386 goto abort; 2387 2388 conf->poolinfo = kzalloc(sizeof(*conf->poolinfo), GFP_KERNEL); 2389 if (!conf->poolinfo) 2390 goto abort; 2391 conf->poolinfo->raid_disks = mddev->raid_disks; 2392 conf->r1bio_pool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc, 2393 r1bio_pool_free, 2394 conf->poolinfo); 2395 if (!conf->r1bio_pool) 2396 goto abort; 2397 2398 conf->poolinfo->mddev = mddev; 2399 2400 spin_lock_init(&conf->device_lock); 2401 list_for_each_entry(rdev, &mddev->disks, same_set) { 2402 int disk_idx = rdev->raid_disk; 2403 if (disk_idx >= mddev->raid_disks 2404 || disk_idx < 0) 2405 continue; 2406 disk = conf->mirrors + disk_idx; 2407 2408 disk->rdev = rdev; 2409 2410 disk->head_position = 0; 2411 } 2412 conf->raid_disks = mddev->raid_disks; 2413 conf->mddev = mddev; 2414 INIT_LIST_HEAD(&conf->retry_list); 2415 2416 spin_lock_init(&conf->resync_lock); 2417 init_waitqueue_head(&conf->wait_barrier); 2418 2419 bio_list_init(&conf->pending_bio_list); 2420 2421 conf->last_used = -1; 2422 for (i = 0; i < conf->raid_disks; i++) { 2423 2424 disk = conf->mirrors + i; 2425 2426 if (!disk->rdev || 2427 !test_bit(In_sync, &disk->rdev->flags)) { 2428 disk->head_position = 0; 2429 if (disk->rdev) 2430 conf->fullsync = 1; 2431 } else if (conf->last_used < 0) 2432 /* 2433 * The first working device is used as a 2434 * starting point to read balancing. 2435 */ 2436 conf->last_used = i; 2437 } 2438 2439 err = -EIO; 2440 if (conf->last_used < 0) { 2441 printk(KERN_ERR "md/raid1:%s: no operational mirrors\n", 2442 mdname(mddev)); 2443 goto abort; 2444 } 2445 err = -ENOMEM; 2446 conf->thread = md_register_thread(raid1d, mddev, NULL); 2447 if (!conf->thread) { 2448 printk(KERN_ERR 2449 "md/raid1:%s: couldn't allocate thread\n", 2450 mdname(mddev)); 2451 goto abort; 2452 } 2453 2454 return conf; 2455 2456 abort: 2457 if (conf) { 2458 if (conf->r1bio_pool) 2459 mempool_destroy(conf->r1bio_pool); 2460 kfree(conf->mirrors); 2461 safe_put_page(conf->tmppage); 2462 kfree(conf->poolinfo); 2463 kfree(conf); 2464 } 2465 return ERR_PTR(err); 2466 } 2467 2468 static int run(mddev_t *mddev) 2469 { 2470 conf_t *conf; 2471 int i; 2472 mdk_rdev_t *rdev; 2473 2474 if (mddev->level != 1) { 2475 printk(KERN_ERR "md/raid1:%s: raid level not set to mirroring (%d)\n", 2476 mdname(mddev), mddev->level); 2477 return -EIO; 2478 } 2479 if (mddev->reshape_position != MaxSector) { 2480 printk(KERN_ERR "md/raid1:%s: reshape_position set but not supported\n", 2481 mdname(mddev)); 2482 return -EIO; 2483 } 2484 /* 2485 * copy the already verified devices into our private RAID1 2486 * bookkeeping area. [whatever we allocate in run(), 2487 * should be freed in stop()] 2488 */ 2489 if (mddev->private == NULL) 2490 conf = setup_conf(mddev); 2491 else 2492 conf = mddev->private; 2493 2494 if (IS_ERR(conf)) 2495 return PTR_ERR(conf); 2496 2497 list_for_each_entry(rdev, &mddev->disks, same_set) { 2498 if (!mddev->gendisk) 2499 continue; 2500 disk_stack_limits(mddev->gendisk, rdev->bdev, 2501 rdev->data_offset << 9); 2502 /* as we don't honour merge_bvec_fn, we must never risk 2503 * violating it, so limit ->max_segments to 1 lying within 2504 * a single page, as a one page request is never in violation. 2505 */ 2506 if (rdev->bdev->bd_disk->queue->merge_bvec_fn) { 2507 blk_queue_max_segments(mddev->queue, 1); 2508 blk_queue_segment_boundary(mddev->queue, 2509 PAGE_CACHE_SIZE - 1); 2510 } 2511 } 2512 2513 mddev->degraded = 0; 2514 for (i=0; i < conf->raid_disks; i++) 2515 if (conf->mirrors[i].rdev == NULL || 2516 !test_bit(In_sync, &conf->mirrors[i].rdev->flags) || 2517 test_bit(Faulty, &conf->mirrors[i].rdev->flags)) 2518 mddev->degraded++; 2519 2520 if (conf->raid_disks - mddev->degraded == 1) 2521 mddev->recovery_cp = MaxSector; 2522 2523 if (mddev->recovery_cp != MaxSector) 2524 printk(KERN_NOTICE "md/raid1:%s: not clean" 2525 " -- starting background reconstruction\n", 2526 mdname(mddev)); 2527 printk(KERN_INFO 2528 "md/raid1:%s: active with %d out of %d mirrors\n", 2529 mdname(mddev), mddev->raid_disks - mddev->degraded, 2530 mddev->raid_disks); 2531 2532 /* 2533 * Ok, everything is just fine now 2534 */ 2535 mddev->thread = conf->thread; 2536 conf->thread = NULL; 2537 mddev->private = conf; 2538 2539 md_set_array_sectors(mddev, raid1_size(mddev, 0, 0)); 2540 2541 if (mddev->queue) { 2542 mddev->queue->backing_dev_info.congested_fn = raid1_congested; 2543 mddev->queue->backing_dev_info.congested_data = mddev; 2544 } 2545 return md_integrity_register(mddev); 2546 } 2547 2548 static int stop(mddev_t *mddev) 2549 { 2550 conf_t *conf = mddev->private; 2551 struct bitmap *bitmap = mddev->bitmap; 2552 2553 /* wait for behind writes to complete */ 2554 if (bitmap && atomic_read(&bitmap->behind_writes) > 0) { 2555 printk(KERN_INFO "md/raid1:%s: behind writes in progress - waiting to stop.\n", 2556 mdname(mddev)); 2557 /* need to kick something here to make sure I/O goes? */ 2558 wait_event(bitmap->behind_wait, 2559 atomic_read(&bitmap->behind_writes) == 0); 2560 } 2561 2562 raise_barrier(conf); 2563 lower_barrier(conf); 2564 2565 md_unregister_thread(&mddev->thread); 2566 if (conf->r1bio_pool) 2567 mempool_destroy(conf->r1bio_pool); 2568 kfree(conf->mirrors); 2569 kfree(conf->poolinfo); 2570 kfree(conf); 2571 mddev->private = NULL; 2572 return 0; 2573 } 2574 2575 static int raid1_resize(mddev_t *mddev, sector_t sectors) 2576 { 2577 /* no resync is happening, and there is enough space 2578 * on all devices, so we can resize. 2579 * We need to make sure resync covers any new space. 2580 * If the array is shrinking we should possibly wait until 2581 * any io in the removed space completes, but it hardly seems 2582 * worth it. 2583 */ 2584 md_set_array_sectors(mddev, raid1_size(mddev, sectors, 0)); 2585 if (mddev->array_sectors > raid1_size(mddev, sectors, 0)) 2586 return -EINVAL; 2587 set_capacity(mddev->gendisk, mddev->array_sectors); 2588 revalidate_disk(mddev->gendisk); 2589 if (sectors > mddev->dev_sectors && 2590 mddev->recovery_cp > mddev->dev_sectors) { 2591 mddev->recovery_cp = mddev->dev_sectors; 2592 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 2593 } 2594 mddev->dev_sectors = sectors; 2595 mddev->resync_max_sectors = sectors; 2596 return 0; 2597 } 2598 2599 static int raid1_reshape(mddev_t *mddev) 2600 { 2601 /* We need to: 2602 * 1/ resize the r1bio_pool 2603 * 2/ resize conf->mirrors 2604 * 2605 * We allocate a new r1bio_pool if we can. 2606 * Then raise a device barrier and wait until all IO stops. 2607 * Then resize conf->mirrors and swap in the new r1bio pool. 2608 * 2609 * At the same time, we "pack" the devices so that all the missing 2610 * devices have the higher raid_disk numbers. 2611 */ 2612 mempool_t *newpool, *oldpool; 2613 struct pool_info *newpoolinfo; 2614 mirror_info_t *newmirrors; 2615 conf_t *conf = mddev->private; 2616 int cnt, raid_disks; 2617 unsigned long flags; 2618 int d, d2, err; 2619 2620 /* Cannot change chunk_size, layout, or level */ 2621 if (mddev->chunk_sectors != mddev->new_chunk_sectors || 2622 mddev->layout != mddev->new_layout || 2623 mddev->level != mddev->new_level) { 2624 mddev->new_chunk_sectors = mddev->chunk_sectors; 2625 mddev->new_layout = mddev->layout; 2626 mddev->new_level = mddev->level; 2627 return -EINVAL; 2628 } 2629 2630 err = md_allow_write(mddev); 2631 if (err) 2632 return err; 2633 2634 raid_disks = mddev->raid_disks + mddev->delta_disks; 2635 2636 if (raid_disks < conf->raid_disks) { 2637 cnt=0; 2638 for (d= 0; d < conf->raid_disks; d++) 2639 if (conf->mirrors[d].rdev) 2640 cnt++; 2641 if (cnt > raid_disks) 2642 return -EBUSY; 2643 } 2644 2645 newpoolinfo = kmalloc(sizeof(*newpoolinfo), GFP_KERNEL); 2646 if (!newpoolinfo) 2647 return -ENOMEM; 2648 newpoolinfo->mddev = mddev; 2649 newpoolinfo->raid_disks = raid_disks; 2650 2651 newpool = mempool_create(NR_RAID1_BIOS, r1bio_pool_alloc, 2652 r1bio_pool_free, newpoolinfo); 2653 if (!newpool) { 2654 kfree(newpoolinfo); 2655 return -ENOMEM; 2656 } 2657 newmirrors = kzalloc(sizeof(struct mirror_info) * raid_disks, GFP_KERNEL); 2658 if (!newmirrors) { 2659 kfree(newpoolinfo); 2660 mempool_destroy(newpool); 2661 return -ENOMEM; 2662 } 2663 2664 raise_barrier(conf); 2665 2666 /* ok, everything is stopped */ 2667 oldpool = conf->r1bio_pool; 2668 conf->r1bio_pool = newpool; 2669 2670 for (d = d2 = 0; d < conf->raid_disks; d++) { 2671 mdk_rdev_t *rdev = conf->mirrors[d].rdev; 2672 if (rdev && rdev->raid_disk != d2) { 2673 sysfs_unlink_rdev(mddev, rdev); 2674 rdev->raid_disk = d2; 2675 sysfs_unlink_rdev(mddev, rdev); 2676 if (sysfs_link_rdev(mddev, rdev)) 2677 printk(KERN_WARNING 2678 "md/raid1:%s: cannot register rd%d\n", 2679 mdname(mddev), rdev->raid_disk); 2680 } 2681 if (rdev) 2682 newmirrors[d2++].rdev = rdev; 2683 } 2684 kfree(conf->mirrors); 2685 conf->mirrors = newmirrors; 2686 kfree(conf->poolinfo); 2687 conf->poolinfo = newpoolinfo; 2688 2689 spin_lock_irqsave(&conf->device_lock, flags); 2690 mddev->degraded += (raid_disks - conf->raid_disks); 2691 spin_unlock_irqrestore(&conf->device_lock, flags); 2692 conf->raid_disks = mddev->raid_disks = raid_disks; 2693 mddev->delta_disks = 0; 2694 2695 conf->last_used = 0; /* just make sure it is in-range */ 2696 lower_barrier(conf); 2697 2698 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 2699 md_wakeup_thread(mddev->thread); 2700 2701 mempool_destroy(oldpool); 2702 return 0; 2703 } 2704 2705 static void raid1_quiesce(mddev_t *mddev, int state) 2706 { 2707 conf_t *conf = mddev->private; 2708 2709 switch(state) { 2710 case 2: /* wake for suspend */ 2711 wake_up(&conf->wait_barrier); 2712 break; 2713 case 1: 2714 raise_barrier(conf); 2715 break; 2716 case 0: 2717 lower_barrier(conf); 2718 break; 2719 } 2720 } 2721 2722 static void *raid1_takeover(mddev_t *mddev) 2723 { 2724 /* raid1 can take over: 2725 * raid5 with 2 devices, any layout or chunk size 2726 */ 2727 if (mddev->level == 5 && mddev->raid_disks == 2) { 2728 conf_t *conf; 2729 mddev->new_level = 1; 2730 mddev->new_layout = 0; 2731 mddev->new_chunk_sectors = 0; 2732 conf = setup_conf(mddev); 2733 if (!IS_ERR(conf)) 2734 conf->barrier = 1; 2735 return conf; 2736 } 2737 return ERR_PTR(-EINVAL); 2738 } 2739 2740 static struct mdk_personality raid1_personality = 2741 { 2742 .name = "raid1", 2743 .level = 1, 2744 .owner = THIS_MODULE, 2745 .make_request = make_request, 2746 .run = run, 2747 .stop = stop, 2748 .status = status, 2749 .error_handler = error, 2750 .hot_add_disk = raid1_add_disk, 2751 .hot_remove_disk= raid1_remove_disk, 2752 .spare_active = raid1_spare_active, 2753 .sync_request = sync_request, 2754 .resize = raid1_resize, 2755 .size = raid1_size, 2756 .check_reshape = raid1_reshape, 2757 .quiesce = raid1_quiesce, 2758 .takeover = raid1_takeover, 2759 }; 2760 2761 static int __init raid_init(void) 2762 { 2763 return register_md_personality(&raid1_personality); 2764 } 2765 2766 static void raid_exit(void) 2767 { 2768 unregister_md_personality(&raid1_personality); 2769 } 2770 2771 module_init(raid_init); 2772 module_exit(raid_exit); 2773 MODULE_LICENSE("GPL"); 2774 MODULE_DESCRIPTION("RAID1 (mirroring) personality for MD"); 2775 MODULE_ALIAS("md-personality-3"); /* RAID1 */ 2776 MODULE_ALIAS("md-raid1"); 2777 MODULE_ALIAS("md-level-1"); 2778