1 /* 2 * raid5.c : Multiple Devices driver for Linux 3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman 4 * Copyright (C) 1999, 2000 Ingo Molnar 5 * Copyright (C) 2002, 2003 H. Peter Anvin 6 * 7 * RAID-4/5/6 management functions. 8 * Thanks to Penguin Computing for making the RAID-6 development possible 9 * by donating a test server! 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2, or (at your option) 14 * any later version. 15 * 16 * You should have received a copy of the GNU General Public License 17 * (for example /usr/src/linux/COPYING); if not, write to the Free 18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 */ 20 21 /* 22 * BITMAP UNPLUGGING: 23 * 24 * The sequencing for updating the bitmap reliably is a little 25 * subtle (and I got it wrong the first time) so it deserves some 26 * explanation. 27 * 28 * We group bitmap updates into batches. Each batch has a number. 29 * We may write out several batches at once, but that isn't very important. 30 * conf->seq_write is the number of the last batch successfully written. 31 * conf->seq_flush is the number of the last batch that was closed to 32 * new additions. 33 * When we discover that we will need to write to any block in a stripe 34 * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq 35 * the number of the batch it will be in. This is seq_flush+1. 36 * When we are ready to do a write, if that batch hasn't been written yet, 37 * we plug the array and queue the stripe for later. 38 * When an unplug happens, we increment bm_flush, thus closing the current 39 * batch. 40 * When we notice that bm_flush > bm_write, we write out all pending updates 41 * to the bitmap, and advance bm_write to where bm_flush was. 42 * This may occasionally write a bit out twice, but is sure never to 43 * miss any bits. 44 */ 45 46 #include <linux/blkdev.h> 47 #include <linux/kthread.h> 48 #include <linux/raid/pq.h> 49 #include <linux/async_tx.h> 50 #include <linux/module.h> 51 #include <linux/async.h> 52 #include <linux/seq_file.h> 53 #include <linux/cpu.h> 54 #include <linux/slab.h> 55 #include <linux/ratelimit.h> 56 #include "md.h" 57 #include "raid5.h" 58 #include "raid0.h" 59 #include "bitmap.h" 60 61 /* 62 * Stripe cache 63 */ 64 65 #define NR_STRIPES 256 66 #define STRIPE_SIZE PAGE_SIZE 67 #define STRIPE_SHIFT (PAGE_SHIFT - 9) 68 #define STRIPE_SECTORS (STRIPE_SIZE>>9) 69 #define IO_THRESHOLD 1 70 #define BYPASS_THRESHOLD 1 71 #define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head)) 72 #define HASH_MASK (NR_HASH - 1) 73 74 static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect) 75 { 76 int hash = (sect >> STRIPE_SHIFT) & HASH_MASK; 77 return &conf->stripe_hashtbl[hash]; 78 } 79 80 /* bio's attached to a stripe+device for I/O are linked together in bi_sector 81 * order without overlap. There may be several bio's per stripe+device, and 82 * a bio could span several devices. 83 * When walking this list for a particular stripe+device, we must never proceed 84 * beyond a bio that extends past this device, as the next bio might no longer 85 * be valid. 86 * This function is used to determine the 'next' bio in the list, given the sector 87 * of the current stripe+device 88 */ 89 static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector) 90 { 91 int sectors = bio->bi_size >> 9; 92 if (bio->bi_sector + sectors < sector + STRIPE_SECTORS) 93 return bio->bi_next; 94 else 95 return NULL; 96 } 97 98 /* 99 * We maintain a biased count of active stripes in the bottom 16 bits of 100 * bi_phys_segments, and a count of processed stripes in the upper 16 bits 101 */ 102 static inline int raid5_bi_phys_segments(struct bio *bio) 103 { 104 return bio->bi_phys_segments & 0xffff; 105 } 106 107 static inline int raid5_bi_hw_segments(struct bio *bio) 108 { 109 return (bio->bi_phys_segments >> 16) & 0xffff; 110 } 111 112 static inline int raid5_dec_bi_phys_segments(struct bio *bio) 113 { 114 --bio->bi_phys_segments; 115 return raid5_bi_phys_segments(bio); 116 } 117 118 static inline int raid5_dec_bi_hw_segments(struct bio *bio) 119 { 120 unsigned short val = raid5_bi_hw_segments(bio); 121 122 --val; 123 bio->bi_phys_segments = (val << 16) | raid5_bi_phys_segments(bio); 124 return val; 125 } 126 127 static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt) 128 { 129 bio->bi_phys_segments = raid5_bi_phys_segments(bio) | (cnt << 16); 130 } 131 132 /* Find first data disk in a raid6 stripe */ 133 static inline int raid6_d0(struct stripe_head *sh) 134 { 135 if (sh->ddf_layout) 136 /* ddf always start from first device */ 137 return 0; 138 /* md starts just after Q block */ 139 if (sh->qd_idx == sh->disks - 1) 140 return 0; 141 else 142 return sh->qd_idx + 1; 143 } 144 static inline int raid6_next_disk(int disk, int raid_disks) 145 { 146 disk++; 147 return (disk < raid_disks) ? disk : 0; 148 } 149 150 /* When walking through the disks in a raid5, starting at raid6_d0, 151 * We need to map each disk to a 'slot', where the data disks are slot 152 * 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk 153 * is raid_disks-1. This help does that mapping. 154 */ 155 static int raid6_idx_to_slot(int idx, struct stripe_head *sh, 156 int *count, int syndrome_disks) 157 { 158 int slot = *count; 159 160 if (sh->ddf_layout) 161 (*count)++; 162 if (idx == sh->pd_idx) 163 return syndrome_disks; 164 if (idx == sh->qd_idx) 165 return syndrome_disks + 1; 166 if (!sh->ddf_layout) 167 (*count)++; 168 return slot; 169 } 170 171 static void return_io(struct bio *return_bi) 172 { 173 struct bio *bi = return_bi; 174 while (bi) { 175 176 return_bi = bi->bi_next; 177 bi->bi_next = NULL; 178 bi->bi_size = 0; 179 bio_endio(bi, 0); 180 bi = return_bi; 181 } 182 } 183 184 static void print_raid5_conf (struct r5conf *conf); 185 186 static int stripe_operations_active(struct stripe_head *sh) 187 { 188 return sh->check_state || sh->reconstruct_state || 189 test_bit(STRIPE_BIOFILL_RUN, &sh->state) || 190 test_bit(STRIPE_COMPUTE_RUN, &sh->state); 191 } 192 193 static void __release_stripe(struct r5conf *conf, struct stripe_head *sh) 194 { 195 if (atomic_dec_and_test(&sh->count)) { 196 BUG_ON(!list_empty(&sh->lru)); 197 BUG_ON(atomic_read(&conf->active_stripes)==0); 198 if (test_bit(STRIPE_HANDLE, &sh->state)) { 199 if (test_bit(STRIPE_DELAYED, &sh->state)) 200 list_add_tail(&sh->lru, &conf->delayed_list); 201 else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && 202 sh->bm_seq - conf->seq_write > 0) 203 list_add_tail(&sh->lru, &conf->bitmap_list); 204 else { 205 clear_bit(STRIPE_BIT_DELAY, &sh->state); 206 list_add_tail(&sh->lru, &conf->handle_list); 207 } 208 md_wakeup_thread(conf->mddev->thread); 209 } else { 210 BUG_ON(stripe_operations_active(sh)); 211 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 212 atomic_dec(&conf->preread_active_stripes); 213 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) 214 md_wakeup_thread(conf->mddev->thread); 215 } 216 atomic_dec(&conf->active_stripes); 217 if (!test_bit(STRIPE_EXPANDING, &sh->state)) { 218 list_add_tail(&sh->lru, &conf->inactive_list); 219 wake_up(&conf->wait_for_stripe); 220 if (conf->retry_read_aligned) 221 md_wakeup_thread(conf->mddev->thread); 222 } 223 } 224 } 225 } 226 227 static void release_stripe(struct stripe_head *sh) 228 { 229 struct r5conf *conf = sh->raid_conf; 230 unsigned long flags; 231 232 spin_lock_irqsave(&conf->device_lock, flags); 233 __release_stripe(conf, sh); 234 spin_unlock_irqrestore(&conf->device_lock, flags); 235 } 236 237 static inline void remove_hash(struct stripe_head *sh) 238 { 239 pr_debug("remove_hash(), stripe %llu\n", 240 (unsigned long long)sh->sector); 241 242 hlist_del_init(&sh->hash); 243 } 244 245 static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh) 246 { 247 struct hlist_head *hp = stripe_hash(conf, sh->sector); 248 249 pr_debug("insert_hash(), stripe %llu\n", 250 (unsigned long long)sh->sector); 251 252 hlist_add_head(&sh->hash, hp); 253 } 254 255 256 /* find an idle stripe, make sure it is unhashed, and return it. */ 257 static struct stripe_head *get_free_stripe(struct r5conf *conf) 258 { 259 struct stripe_head *sh = NULL; 260 struct list_head *first; 261 262 if (list_empty(&conf->inactive_list)) 263 goto out; 264 first = conf->inactive_list.next; 265 sh = list_entry(first, struct stripe_head, lru); 266 list_del_init(first); 267 remove_hash(sh); 268 atomic_inc(&conf->active_stripes); 269 out: 270 return sh; 271 } 272 273 static void shrink_buffers(struct stripe_head *sh) 274 { 275 struct page *p; 276 int i; 277 int num = sh->raid_conf->pool_size; 278 279 for (i = 0; i < num ; i++) { 280 p = sh->dev[i].page; 281 if (!p) 282 continue; 283 sh->dev[i].page = NULL; 284 put_page(p); 285 } 286 } 287 288 static int grow_buffers(struct stripe_head *sh) 289 { 290 int i; 291 int num = sh->raid_conf->pool_size; 292 293 for (i = 0; i < num; i++) { 294 struct page *page; 295 296 if (!(page = alloc_page(GFP_KERNEL))) { 297 return 1; 298 } 299 sh->dev[i].page = page; 300 } 301 return 0; 302 } 303 304 static void raid5_build_block(struct stripe_head *sh, int i, int previous); 305 static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous, 306 struct stripe_head *sh); 307 308 static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) 309 { 310 struct r5conf *conf = sh->raid_conf; 311 int i; 312 313 BUG_ON(atomic_read(&sh->count) != 0); 314 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); 315 BUG_ON(stripe_operations_active(sh)); 316 317 pr_debug("init_stripe called, stripe %llu\n", 318 (unsigned long long)sh->sector); 319 320 remove_hash(sh); 321 322 sh->generation = conf->generation - previous; 323 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks; 324 sh->sector = sector; 325 stripe_set_idx(sector, conf, previous, sh); 326 sh->state = 0; 327 328 329 for (i = sh->disks; i--; ) { 330 struct r5dev *dev = &sh->dev[i]; 331 332 if (dev->toread || dev->read || dev->towrite || dev->written || 333 test_bit(R5_LOCKED, &dev->flags)) { 334 printk(KERN_ERR "sector=%llx i=%d %p %p %p %p %d\n", 335 (unsigned long long)sh->sector, i, dev->toread, 336 dev->read, dev->towrite, dev->written, 337 test_bit(R5_LOCKED, &dev->flags)); 338 WARN_ON(1); 339 } 340 dev->flags = 0; 341 raid5_build_block(sh, i, previous); 342 } 343 insert_hash(conf, sh); 344 } 345 346 static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector, 347 short generation) 348 { 349 struct stripe_head *sh; 350 struct hlist_node *hn; 351 352 pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector); 353 hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash) 354 if (sh->sector == sector && sh->generation == generation) 355 return sh; 356 pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector); 357 return NULL; 358 } 359 360 /* 361 * Need to check if array has failed when deciding whether to: 362 * - start an array 363 * - remove non-faulty devices 364 * - add a spare 365 * - allow a reshape 366 * This determination is simple when no reshape is happening. 367 * However if there is a reshape, we need to carefully check 368 * both the before and after sections. 369 * This is because some failed devices may only affect one 370 * of the two sections, and some non-in_sync devices may 371 * be insync in the section most affected by failed devices. 372 */ 373 static int has_failed(struct r5conf *conf) 374 { 375 int degraded; 376 int i; 377 if (conf->mddev->reshape_position == MaxSector) 378 return conf->mddev->degraded > conf->max_degraded; 379 380 rcu_read_lock(); 381 degraded = 0; 382 for (i = 0; i < conf->previous_raid_disks; i++) { 383 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); 384 if (!rdev || test_bit(Faulty, &rdev->flags)) 385 degraded++; 386 else if (test_bit(In_sync, &rdev->flags)) 387 ; 388 else 389 /* not in-sync or faulty. 390 * If the reshape increases the number of devices, 391 * this is being recovered by the reshape, so 392 * this 'previous' section is not in_sync. 393 * If the number of devices is being reduced however, 394 * the device can only be part of the array if 395 * we are reverting a reshape, so this section will 396 * be in-sync. 397 */ 398 if (conf->raid_disks >= conf->previous_raid_disks) 399 degraded++; 400 } 401 rcu_read_unlock(); 402 if (degraded > conf->max_degraded) 403 return 1; 404 rcu_read_lock(); 405 degraded = 0; 406 for (i = 0; i < conf->raid_disks; i++) { 407 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); 408 if (!rdev || test_bit(Faulty, &rdev->flags)) 409 degraded++; 410 else if (test_bit(In_sync, &rdev->flags)) 411 ; 412 else 413 /* not in-sync or faulty. 414 * If reshape increases the number of devices, this 415 * section has already been recovered, else it 416 * almost certainly hasn't. 417 */ 418 if (conf->raid_disks <= conf->previous_raid_disks) 419 degraded++; 420 } 421 rcu_read_unlock(); 422 if (degraded > conf->max_degraded) 423 return 1; 424 return 0; 425 } 426 427 static struct stripe_head * 428 get_active_stripe(struct r5conf *conf, sector_t sector, 429 int previous, int noblock, int noquiesce) 430 { 431 struct stripe_head *sh; 432 433 pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector); 434 435 spin_lock_irq(&conf->device_lock); 436 437 do { 438 wait_event_lock_irq(conf->wait_for_stripe, 439 conf->quiesce == 0 || noquiesce, 440 conf->device_lock, /* nothing */); 441 sh = __find_stripe(conf, sector, conf->generation - previous); 442 if (!sh) { 443 if (!conf->inactive_blocked) 444 sh = get_free_stripe(conf); 445 if (noblock && sh == NULL) 446 break; 447 if (!sh) { 448 conf->inactive_blocked = 1; 449 wait_event_lock_irq(conf->wait_for_stripe, 450 !list_empty(&conf->inactive_list) && 451 (atomic_read(&conf->active_stripes) 452 < (conf->max_nr_stripes *3/4) 453 || !conf->inactive_blocked), 454 conf->device_lock, 455 ); 456 conf->inactive_blocked = 0; 457 } else 458 init_stripe(sh, sector, previous); 459 } else { 460 if (atomic_read(&sh->count)) { 461 BUG_ON(!list_empty(&sh->lru) 462 && !test_bit(STRIPE_EXPANDING, &sh->state)); 463 } else { 464 if (!test_bit(STRIPE_HANDLE, &sh->state)) 465 atomic_inc(&conf->active_stripes); 466 if (list_empty(&sh->lru) && 467 !test_bit(STRIPE_EXPANDING, &sh->state)) 468 BUG(); 469 list_del_init(&sh->lru); 470 } 471 } 472 } while (sh == NULL); 473 474 if (sh) 475 atomic_inc(&sh->count); 476 477 spin_unlock_irq(&conf->device_lock); 478 return sh; 479 } 480 481 static void 482 raid5_end_read_request(struct bio *bi, int error); 483 static void 484 raid5_end_write_request(struct bio *bi, int error); 485 486 static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) 487 { 488 struct r5conf *conf = sh->raid_conf; 489 int i, disks = sh->disks; 490 491 might_sleep(); 492 493 for (i = disks; i--; ) { 494 int rw; 495 struct bio *bi; 496 struct md_rdev *rdev; 497 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) { 498 if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags)) 499 rw = WRITE_FUA; 500 else 501 rw = WRITE; 502 } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) 503 rw = READ; 504 else 505 continue; 506 507 bi = &sh->dev[i].req; 508 509 bi->bi_rw = rw; 510 if (rw & WRITE) 511 bi->bi_end_io = raid5_end_write_request; 512 else 513 bi->bi_end_io = raid5_end_read_request; 514 515 rcu_read_lock(); 516 rdev = rcu_dereference(conf->disks[i].rdev); 517 if (rdev && test_bit(Faulty, &rdev->flags)) 518 rdev = NULL; 519 if (rdev) 520 atomic_inc(&rdev->nr_pending); 521 rcu_read_unlock(); 522 523 /* We have already checked bad blocks for reads. Now 524 * need to check for writes. 525 */ 526 while ((rw & WRITE) && rdev && 527 test_bit(WriteErrorSeen, &rdev->flags)) { 528 sector_t first_bad; 529 int bad_sectors; 530 int bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS, 531 &first_bad, &bad_sectors); 532 if (!bad) 533 break; 534 535 if (bad < 0) { 536 set_bit(BlockedBadBlocks, &rdev->flags); 537 if (!conf->mddev->external && 538 conf->mddev->flags) { 539 /* It is very unlikely, but we might 540 * still need to write out the 541 * bad block log - better give it 542 * a chance*/ 543 md_check_recovery(conf->mddev); 544 } 545 md_wait_for_blocked_rdev(rdev, conf->mddev); 546 } else { 547 /* Acknowledged bad block - skip the write */ 548 rdev_dec_pending(rdev, conf->mddev); 549 rdev = NULL; 550 } 551 } 552 553 if (rdev) { 554 if (s->syncing || s->expanding || s->expanded) 555 md_sync_acct(rdev->bdev, STRIPE_SECTORS); 556 557 set_bit(STRIPE_IO_STARTED, &sh->state); 558 559 bi->bi_bdev = rdev->bdev; 560 pr_debug("%s: for %llu schedule op %ld on disc %d\n", 561 __func__, (unsigned long long)sh->sector, 562 bi->bi_rw, i); 563 atomic_inc(&sh->count); 564 bi->bi_sector = sh->sector + rdev->data_offset; 565 bi->bi_flags = 1 << BIO_UPTODATE; 566 bi->bi_vcnt = 1; 567 bi->bi_max_vecs = 1; 568 bi->bi_idx = 0; 569 bi->bi_io_vec = &sh->dev[i].vec; 570 bi->bi_io_vec[0].bv_len = STRIPE_SIZE; 571 bi->bi_io_vec[0].bv_offset = 0; 572 bi->bi_size = STRIPE_SIZE; 573 bi->bi_next = NULL; 574 generic_make_request(bi); 575 } else { 576 if (rw & WRITE) 577 set_bit(STRIPE_DEGRADED, &sh->state); 578 pr_debug("skip op %ld on disc %d for sector %llu\n", 579 bi->bi_rw, i, (unsigned long long)sh->sector); 580 clear_bit(R5_LOCKED, &sh->dev[i].flags); 581 set_bit(STRIPE_HANDLE, &sh->state); 582 } 583 } 584 } 585 586 static struct dma_async_tx_descriptor * 587 async_copy_data(int frombio, struct bio *bio, struct page *page, 588 sector_t sector, struct dma_async_tx_descriptor *tx) 589 { 590 struct bio_vec *bvl; 591 struct page *bio_page; 592 int i; 593 int page_offset; 594 struct async_submit_ctl submit; 595 enum async_tx_flags flags = 0; 596 597 if (bio->bi_sector >= sector) 598 page_offset = (signed)(bio->bi_sector - sector) * 512; 599 else 600 page_offset = (signed)(sector - bio->bi_sector) * -512; 601 602 if (frombio) 603 flags |= ASYNC_TX_FENCE; 604 init_async_submit(&submit, flags, tx, NULL, NULL, NULL); 605 606 bio_for_each_segment(bvl, bio, i) { 607 int len = bvl->bv_len; 608 int clen; 609 int b_offset = 0; 610 611 if (page_offset < 0) { 612 b_offset = -page_offset; 613 page_offset += b_offset; 614 len -= b_offset; 615 } 616 617 if (len > 0 && page_offset + len > STRIPE_SIZE) 618 clen = STRIPE_SIZE - page_offset; 619 else 620 clen = len; 621 622 if (clen > 0) { 623 b_offset += bvl->bv_offset; 624 bio_page = bvl->bv_page; 625 if (frombio) 626 tx = async_memcpy(page, bio_page, page_offset, 627 b_offset, clen, &submit); 628 else 629 tx = async_memcpy(bio_page, page, b_offset, 630 page_offset, clen, &submit); 631 } 632 /* chain the operations */ 633 submit.depend_tx = tx; 634 635 if (clen < len) /* hit end of page */ 636 break; 637 page_offset += len; 638 } 639 640 return tx; 641 } 642 643 static void ops_complete_biofill(void *stripe_head_ref) 644 { 645 struct stripe_head *sh = stripe_head_ref; 646 struct bio *return_bi = NULL; 647 struct r5conf *conf = sh->raid_conf; 648 int i; 649 650 pr_debug("%s: stripe %llu\n", __func__, 651 (unsigned long long)sh->sector); 652 653 /* clear completed biofills */ 654 spin_lock_irq(&conf->device_lock); 655 for (i = sh->disks; i--; ) { 656 struct r5dev *dev = &sh->dev[i]; 657 658 /* acknowledge completion of a biofill operation */ 659 /* and check if we need to reply to a read request, 660 * new R5_Wantfill requests are held off until 661 * !STRIPE_BIOFILL_RUN 662 */ 663 if (test_and_clear_bit(R5_Wantfill, &dev->flags)) { 664 struct bio *rbi, *rbi2; 665 666 BUG_ON(!dev->read); 667 rbi = dev->read; 668 dev->read = NULL; 669 while (rbi && rbi->bi_sector < 670 dev->sector + STRIPE_SECTORS) { 671 rbi2 = r5_next_bio(rbi, dev->sector); 672 if (!raid5_dec_bi_phys_segments(rbi)) { 673 rbi->bi_next = return_bi; 674 return_bi = rbi; 675 } 676 rbi = rbi2; 677 } 678 } 679 } 680 spin_unlock_irq(&conf->device_lock); 681 clear_bit(STRIPE_BIOFILL_RUN, &sh->state); 682 683 return_io(return_bi); 684 685 set_bit(STRIPE_HANDLE, &sh->state); 686 release_stripe(sh); 687 } 688 689 static void ops_run_biofill(struct stripe_head *sh) 690 { 691 struct dma_async_tx_descriptor *tx = NULL; 692 struct r5conf *conf = sh->raid_conf; 693 struct async_submit_ctl submit; 694 int i; 695 696 pr_debug("%s: stripe %llu\n", __func__, 697 (unsigned long long)sh->sector); 698 699 for (i = sh->disks; i--; ) { 700 struct r5dev *dev = &sh->dev[i]; 701 if (test_bit(R5_Wantfill, &dev->flags)) { 702 struct bio *rbi; 703 spin_lock_irq(&conf->device_lock); 704 dev->read = rbi = dev->toread; 705 dev->toread = NULL; 706 spin_unlock_irq(&conf->device_lock); 707 while (rbi && rbi->bi_sector < 708 dev->sector + STRIPE_SECTORS) { 709 tx = async_copy_data(0, rbi, dev->page, 710 dev->sector, tx); 711 rbi = r5_next_bio(rbi, dev->sector); 712 } 713 } 714 } 715 716 atomic_inc(&sh->count); 717 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL); 718 async_trigger_callback(&submit); 719 } 720 721 static void mark_target_uptodate(struct stripe_head *sh, int target) 722 { 723 struct r5dev *tgt; 724 725 if (target < 0) 726 return; 727 728 tgt = &sh->dev[target]; 729 set_bit(R5_UPTODATE, &tgt->flags); 730 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 731 clear_bit(R5_Wantcompute, &tgt->flags); 732 } 733 734 static void ops_complete_compute(void *stripe_head_ref) 735 { 736 struct stripe_head *sh = stripe_head_ref; 737 738 pr_debug("%s: stripe %llu\n", __func__, 739 (unsigned long long)sh->sector); 740 741 /* mark the computed target(s) as uptodate */ 742 mark_target_uptodate(sh, sh->ops.target); 743 mark_target_uptodate(sh, sh->ops.target2); 744 745 clear_bit(STRIPE_COMPUTE_RUN, &sh->state); 746 if (sh->check_state == check_state_compute_run) 747 sh->check_state = check_state_compute_result; 748 set_bit(STRIPE_HANDLE, &sh->state); 749 release_stripe(sh); 750 } 751 752 /* return a pointer to the address conversion region of the scribble buffer */ 753 static addr_conv_t *to_addr_conv(struct stripe_head *sh, 754 struct raid5_percpu *percpu) 755 { 756 return percpu->scribble + sizeof(struct page *) * (sh->disks + 2); 757 } 758 759 static struct dma_async_tx_descriptor * 760 ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu) 761 { 762 int disks = sh->disks; 763 struct page **xor_srcs = percpu->scribble; 764 int target = sh->ops.target; 765 struct r5dev *tgt = &sh->dev[target]; 766 struct page *xor_dest = tgt->page; 767 int count = 0; 768 struct dma_async_tx_descriptor *tx; 769 struct async_submit_ctl submit; 770 int i; 771 772 pr_debug("%s: stripe %llu block: %d\n", 773 __func__, (unsigned long long)sh->sector, target); 774 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 775 776 for (i = disks; i--; ) 777 if (i != target) 778 xor_srcs[count++] = sh->dev[i].page; 779 780 atomic_inc(&sh->count); 781 782 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, NULL, 783 ops_complete_compute, sh, to_addr_conv(sh, percpu)); 784 if (unlikely(count == 1)) 785 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit); 786 else 787 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit); 788 789 return tx; 790 } 791 792 /* set_syndrome_sources - populate source buffers for gen_syndrome 793 * @srcs - (struct page *) array of size sh->disks 794 * @sh - stripe_head to parse 795 * 796 * Populates srcs in proper layout order for the stripe and returns the 797 * 'count' of sources to be used in a call to async_gen_syndrome. The P 798 * destination buffer is recorded in srcs[count] and the Q destination 799 * is recorded in srcs[count+1]]. 800 */ 801 static int set_syndrome_sources(struct page **srcs, struct stripe_head *sh) 802 { 803 int disks = sh->disks; 804 int syndrome_disks = sh->ddf_layout ? disks : (disks - 2); 805 int d0_idx = raid6_d0(sh); 806 int count; 807 int i; 808 809 for (i = 0; i < disks; i++) 810 srcs[i] = NULL; 811 812 count = 0; 813 i = d0_idx; 814 do { 815 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); 816 817 srcs[slot] = sh->dev[i].page; 818 i = raid6_next_disk(i, disks); 819 } while (i != d0_idx); 820 821 return syndrome_disks; 822 } 823 824 static struct dma_async_tx_descriptor * 825 ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu) 826 { 827 int disks = sh->disks; 828 struct page **blocks = percpu->scribble; 829 int target; 830 int qd_idx = sh->qd_idx; 831 struct dma_async_tx_descriptor *tx; 832 struct async_submit_ctl submit; 833 struct r5dev *tgt; 834 struct page *dest; 835 int i; 836 int count; 837 838 if (sh->ops.target < 0) 839 target = sh->ops.target2; 840 else if (sh->ops.target2 < 0) 841 target = sh->ops.target; 842 else 843 /* we should only have one valid target */ 844 BUG(); 845 BUG_ON(target < 0); 846 pr_debug("%s: stripe %llu block: %d\n", 847 __func__, (unsigned long long)sh->sector, target); 848 849 tgt = &sh->dev[target]; 850 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 851 dest = tgt->page; 852 853 atomic_inc(&sh->count); 854 855 if (target == qd_idx) { 856 count = set_syndrome_sources(blocks, sh); 857 blocks[count] = NULL; /* regenerating p is not necessary */ 858 BUG_ON(blocks[count+1] != dest); /* q should already be set */ 859 init_async_submit(&submit, ASYNC_TX_FENCE, NULL, 860 ops_complete_compute, sh, 861 to_addr_conv(sh, percpu)); 862 tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit); 863 } else { 864 /* Compute any data- or p-drive using XOR */ 865 count = 0; 866 for (i = disks; i-- ; ) { 867 if (i == target || i == qd_idx) 868 continue; 869 blocks[count++] = sh->dev[i].page; 870 } 871 872 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, 873 NULL, ops_complete_compute, sh, 874 to_addr_conv(sh, percpu)); 875 tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, &submit); 876 } 877 878 return tx; 879 } 880 881 static struct dma_async_tx_descriptor * 882 ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu) 883 { 884 int i, count, disks = sh->disks; 885 int syndrome_disks = sh->ddf_layout ? disks : disks-2; 886 int d0_idx = raid6_d0(sh); 887 int faila = -1, failb = -1; 888 int target = sh->ops.target; 889 int target2 = sh->ops.target2; 890 struct r5dev *tgt = &sh->dev[target]; 891 struct r5dev *tgt2 = &sh->dev[target2]; 892 struct dma_async_tx_descriptor *tx; 893 struct page **blocks = percpu->scribble; 894 struct async_submit_ctl submit; 895 896 pr_debug("%s: stripe %llu block1: %d block2: %d\n", 897 __func__, (unsigned long long)sh->sector, target, target2); 898 BUG_ON(target < 0 || target2 < 0); 899 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 900 BUG_ON(!test_bit(R5_Wantcompute, &tgt2->flags)); 901 902 /* we need to open-code set_syndrome_sources to handle the 903 * slot number conversion for 'faila' and 'failb' 904 */ 905 for (i = 0; i < disks ; i++) 906 blocks[i] = NULL; 907 count = 0; 908 i = d0_idx; 909 do { 910 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); 911 912 blocks[slot] = sh->dev[i].page; 913 914 if (i == target) 915 faila = slot; 916 if (i == target2) 917 failb = slot; 918 i = raid6_next_disk(i, disks); 919 } while (i != d0_idx); 920 921 BUG_ON(faila == failb); 922 if (failb < faila) 923 swap(faila, failb); 924 pr_debug("%s: stripe: %llu faila: %d failb: %d\n", 925 __func__, (unsigned long long)sh->sector, faila, failb); 926 927 atomic_inc(&sh->count); 928 929 if (failb == syndrome_disks+1) { 930 /* Q disk is one of the missing disks */ 931 if (faila == syndrome_disks) { 932 /* Missing P+Q, just recompute */ 933 init_async_submit(&submit, ASYNC_TX_FENCE, NULL, 934 ops_complete_compute, sh, 935 to_addr_conv(sh, percpu)); 936 return async_gen_syndrome(blocks, 0, syndrome_disks+2, 937 STRIPE_SIZE, &submit); 938 } else { 939 struct page *dest; 940 int data_target; 941 int qd_idx = sh->qd_idx; 942 943 /* Missing D+Q: recompute D from P, then recompute Q */ 944 if (target == qd_idx) 945 data_target = target2; 946 else 947 data_target = target; 948 949 count = 0; 950 for (i = disks; i-- ; ) { 951 if (i == data_target || i == qd_idx) 952 continue; 953 blocks[count++] = sh->dev[i].page; 954 } 955 dest = sh->dev[data_target].page; 956 init_async_submit(&submit, 957 ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, 958 NULL, NULL, NULL, 959 to_addr_conv(sh, percpu)); 960 tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, 961 &submit); 962 963 count = set_syndrome_sources(blocks, sh); 964 init_async_submit(&submit, ASYNC_TX_FENCE, tx, 965 ops_complete_compute, sh, 966 to_addr_conv(sh, percpu)); 967 return async_gen_syndrome(blocks, 0, count+2, 968 STRIPE_SIZE, &submit); 969 } 970 } else { 971 init_async_submit(&submit, ASYNC_TX_FENCE, NULL, 972 ops_complete_compute, sh, 973 to_addr_conv(sh, percpu)); 974 if (failb == syndrome_disks) { 975 /* We're missing D+P. */ 976 return async_raid6_datap_recov(syndrome_disks+2, 977 STRIPE_SIZE, faila, 978 blocks, &submit); 979 } else { 980 /* We're missing D+D. */ 981 return async_raid6_2data_recov(syndrome_disks+2, 982 STRIPE_SIZE, faila, failb, 983 blocks, &submit); 984 } 985 } 986 } 987 988 989 static void ops_complete_prexor(void *stripe_head_ref) 990 { 991 struct stripe_head *sh = stripe_head_ref; 992 993 pr_debug("%s: stripe %llu\n", __func__, 994 (unsigned long long)sh->sector); 995 } 996 997 static struct dma_async_tx_descriptor * 998 ops_run_prexor(struct stripe_head *sh, struct raid5_percpu *percpu, 999 struct dma_async_tx_descriptor *tx) 1000 { 1001 int disks = sh->disks; 1002 struct page **xor_srcs = percpu->scribble; 1003 int count = 0, pd_idx = sh->pd_idx, i; 1004 struct async_submit_ctl submit; 1005 1006 /* existing parity data subtracted */ 1007 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; 1008 1009 pr_debug("%s: stripe %llu\n", __func__, 1010 (unsigned long long)sh->sector); 1011 1012 for (i = disks; i--; ) { 1013 struct r5dev *dev = &sh->dev[i]; 1014 /* Only process blocks that are known to be uptodate */ 1015 if (test_bit(R5_Wantdrain, &dev->flags)) 1016 xor_srcs[count++] = dev->page; 1017 } 1018 1019 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, 1020 ops_complete_prexor, sh, to_addr_conv(sh, percpu)); 1021 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit); 1022 1023 return tx; 1024 } 1025 1026 static struct dma_async_tx_descriptor * 1027 ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) 1028 { 1029 int disks = sh->disks; 1030 int i; 1031 1032 pr_debug("%s: stripe %llu\n", __func__, 1033 (unsigned long long)sh->sector); 1034 1035 for (i = disks; i--; ) { 1036 struct r5dev *dev = &sh->dev[i]; 1037 struct bio *chosen; 1038 1039 if (test_and_clear_bit(R5_Wantdrain, &dev->flags)) { 1040 struct bio *wbi; 1041 1042 spin_lock_irq(&sh->raid_conf->device_lock); 1043 chosen = dev->towrite; 1044 dev->towrite = NULL; 1045 BUG_ON(dev->written); 1046 wbi = dev->written = chosen; 1047 spin_unlock_irq(&sh->raid_conf->device_lock); 1048 1049 while (wbi && wbi->bi_sector < 1050 dev->sector + STRIPE_SECTORS) { 1051 if (wbi->bi_rw & REQ_FUA) 1052 set_bit(R5_WantFUA, &dev->flags); 1053 tx = async_copy_data(1, wbi, dev->page, 1054 dev->sector, tx); 1055 wbi = r5_next_bio(wbi, dev->sector); 1056 } 1057 } 1058 } 1059 1060 return tx; 1061 } 1062 1063 static void ops_complete_reconstruct(void *stripe_head_ref) 1064 { 1065 struct stripe_head *sh = stripe_head_ref; 1066 int disks = sh->disks; 1067 int pd_idx = sh->pd_idx; 1068 int qd_idx = sh->qd_idx; 1069 int i; 1070 bool fua = false; 1071 1072 pr_debug("%s: stripe %llu\n", __func__, 1073 (unsigned long long)sh->sector); 1074 1075 for (i = disks; i--; ) 1076 fua |= test_bit(R5_WantFUA, &sh->dev[i].flags); 1077 1078 for (i = disks; i--; ) { 1079 struct r5dev *dev = &sh->dev[i]; 1080 1081 if (dev->written || i == pd_idx || i == qd_idx) { 1082 set_bit(R5_UPTODATE, &dev->flags); 1083 if (fua) 1084 set_bit(R5_WantFUA, &dev->flags); 1085 } 1086 } 1087 1088 if (sh->reconstruct_state == reconstruct_state_drain_run) 1089 sh->reconstruct_state = reconstruct_state_drain_result; 1090 else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) 1091 sh->reconstruct_state = reconstruct_state_prexor_drain_result; 1092 else { 1093 BUG_ON(sh->reconstruct_state != reconstruct_state_run); 1094 sh->reconstruct_state = reconstruct_state_result; 1095 } 1096 1097 set_bit(STRIPE_HANDLE, &sh->state); 1098 release_stripe(sh); 1099 } 1100 1101 static void 1102 ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu, 1103 struct dma_async_tx_descriptor *tx) 1104 { 1105 int disks = sh->disks; 1106 struct page **xor_srcs = percpu->scribble; 1107 struct async_submit_ctl submit; 1108 int count = 0, pd_idx = sh->pd_idx, i; 1109 struct page *xor_dest; 1110 int prexor = 0; 1111 unsigned long flags; 1112 1113 pr_debug("%s: stripe %llu\n", __func__, 1114 (unsigned long long)sh->sector); 1115 1116 /* check if prexor is active which means only process blocks 1117 * that are part of a read-modify-write (written) 1118 */ 1119 if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) { 1120 prexor = 1; 1121 xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; 1122 for (i = disks; i--; ) { 1123 struct r5dev *dev = &sh->dev[i]; 1124 if (dev->written) 1125 xor_srcs[count++] = dev->page; 1126 } 1127 } else { 1128 xor_dest = sh->dev[pd_idx].page; 1129 for (i = disks; i--; ) { 1130 struct r5dev *dev = &sh->dev[i]; 1131 if (i != pd_idx) 1132 xor_srcs[count++] = dev->page; 1133 } 1134 } 1135 1136 /* 1/ if we prexor'd then the dest is reused as a source 1137 * 2/ if we did not prexor then we are redoing the parity 1138 * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST 1139 * for the synchronous xor case 1140 */ 1141 flags = ASYNC_TX_ACK | 1142 (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST); 1143 1144 atomic_inc(&sh->count); 1145 1146 init_async_submit(&submit, flags, tx, ops_complete_reconstruct, sh, 1147 to_addr_conv(sh, percpu)); 1148 if (unlikely(count == 1)) 1149 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit); 1150 else 1151 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit); 1152 } 1153 1154 static void 1155 ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu, 1156 struct dma_async_tx_descriptor *tx) 1157 { 1158 struct async_submit_ctl submit; 1159 struct page **blocks = percpu->scribble; 1160 int count; 1161 1162 pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector); 1163 1164 count = set_syndrome_sources(blocks, sh); 1165 1166 atomic_inc(&sh->count); 1167 1168 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_reconstruct, 1169 sh, to_addr_conv(sh, percpu)); 1170 async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit); 1171 } 1172 1173 static void ops_complete_check(void *stripe_head_ref) 1174 { 1175 struct stripe_head *sh = stripe_head_ref; 1176 1177 pr_debug("%s: stripe %llu\n", __func__, 1178 (unsigned long long)sh->sector); 1179 1180 sh->check_state = check_state_check_result; 1181 set_bit(STRIPE_HANDLE, &sh->state); 1182 release_stripe(sh); 1183 } 1184 1185 static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu) 1186 { 1187 int disks = sh->disks; 1188 int pd_idx = sh->pd_idx; 1189 int qd_idx = sh->qd_idx; 1190 struct page *xor_dest; 1191 struct page **xor_srcs = percpu->scribble; 1192 struct dma_async_tx_descriptor *tx; 1193 struct async_submit_ctl submit; 1194 int count; 1195 int i; 1196 1197 pr_debug("%s: stripe %llu\n", __func__, 1198 (unsigned long long)sh->sector); 1199 1200 count = 0; 1201 xor_dest = sh->dev[pd_idx].page; 1202 xor_srcs[count++] = xor_dest; 1203 for (i = disks; i--; ) { 1204 if (i == pd_idx || i == qd_idx) 1205 continue; 1206 xor_srcs[count++] = sh->dev[i].page; 1207 } 1208 1209 init_async_submit(&submit, 0, NULL, NULL, NULL, 1210 to_addr_conv(sh, percpu)); 1211 tx = async_xor_val(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, 1212 &sh->ops.zero_sum_result, &submit); 1213 1214 atomic_inc(&sh->count); 1215 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL); 1216 tx = async_trigger_callback(&submit); 1217 } 1218 1219 static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp) 1220 { 1221 struct page **srcs = percpu->scribble; 1222 struct async_submit_ctl submit; 1223 int count; 1224 1225 pr_debug("%s: stripe %llu checkp: %d\n", __func__, 1226 (unsigned long long)sh->sector, checkp); 1227 1228 count = set_syndrome_sources(srcs, sh); 1229 if (!checkp) 1230 srcs[count] = NULL; 1231 1232 atomic_inc(&sh->count); 1233 init_async_submit(&submit, ASYNC_TX_ACK, NULL, ops_complete_check, 1234 sh, to_addr_conv(sh, percpu)); 1235 async_syndrome_val(srcs, 0, count+2, STRIPE_SIZE, 1236 &sh->ops.zero_sum_result, percpu->spare_page, &submit); 1237 } 1238 1239 static void __raid_run_ops(struct stripe_head *sh, unsigned long ops_request) 1240 { 1241 int overlap_clear = 0, i, disks = sh->disks; 1242 struct dma_async_tx_descriptor *tx = NULL; 1243 struct r5conf *conf = sh->raid_conf; 1244 int level = conf->level; 1245 struct raid5_percpu *percpu; 1246 unsigned long cpu; 1247 1248 cpu = get_cpu(); 1249 percpu = per_cpu_ptr(conf->percpu, cpu); 1250 if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) { 1251 ops_run_biofill(sh); 1252 overlap_clear++; 1253 } 1254 1255 if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) { 1256 if (level < 6) 1257 tx = ops_run_compute5(sh, percpu); 1258 else { 1259 if (sh->ops.target2 < 0 || sh->ops.target < 0) 1260 tx = ops_run_compute6_1(sh, percpu); 1261 else 1262 tx = ops_run_compute6_2(sh, percpu); 1263 } 1264 /* terminate the chain if reconstruct is not set to be run */ 1265 if (tx && !test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) 1266 async_tx_ack(tx); 1267 } 1268 1269 if (test_bit(STRIPE_OP_PREXOR, &ops_request)) 1270 tx = ops_run_prexor(sh, percpu, tx); 1271 1272 if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) { 1273 tx = ops_run_biodrain(sh, tx); 1274 overlap_clear++; 1275 } 1276 1277 if (test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) { 1278 if (level < 6) 1279 ops_run_reconstruct5(sh, percpu, tx); 1280 else 1281 ops_run_reconstruct6(sh, percpu, tx); 1282 } 1283 1284 if (test_bit(STRIPE_OP_CHECK, &ops_request)) { 1285 if (sh->check_state == check_state_run) 1286 ops_run_check_p(sh, percpu); 1287 else if (sh->check_state == check_state_run_q) 1288 ops_run_check_pq(sh, percpu, 0); 1289 else if (sh->check_state == check_state_run_pq) 1290 ops_run_check_pq(sh, percpu, 1); 1291 else 1292 BUG(); 1293 } 1294 1295 if (overlap_clear) 1296 for (i = disks; i--; ) { 1297 struct r5dev *dev = &sh->dev[i]; 1298 if (test_and_clear_bit(R5_Overlap, &dev->flags)) 1299 wake_up(&sh->raid_conf->wait_for_overlap); 1300 } 1301 put_cpu(); 1302 } 1303 1304 #ifdef CONFIG_MULTICORE_RAID456 1305 static void async_run_ops(void *param, async_cookie_t cookie) 1306 { 1307 struct stripe_head *sh = param; 1308 unsigned long ops_request = sh->ops.request; 1309 1310 clear_bit_unlock(STRIPE_OPS_REQ_PENDING, &sh->state); 1311 wake_up(&sh->ops.wait_for_ops); 1312 1313 __raid_run_ops(sh, ops_request); 1314 release_stripe(sh); 1315 } 1316 1317 static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) 1318 { 1319 /* since handle_stripe can be called outside of raid5d context 1320 * we need to ensure sh->ops.request is de-staged before another 1321 * request arrives 1322 */ 1323 wait_event(sh->ops.wait_for_ops, 1324 !test_and_set_bit_lock(STRIPE_OPS_REQ_PENDING, &sh->state)); 1325 sh->ops.request = ops_request; 1326 1327 atomic_inc(&sh->count); 1328 async_schedule(async_run_ops, sh); 1329 } 1330 #else 1331 #define raid_run_ops __raid_run_ops 1332 #endif 1333 1334 static int grow_one_stripe(struct r5conf *conf) 1335 { 1336 struct stripe_head *sh; 1337 sh = kmem_cache_zalloc(conf->slab_cache, GFP_KERNEL); 1338 if (!sh) 1339 return 0; 1340 1341 sh->raid_conf = conf; 1342 #ifdef CONFIG_MULTICORE_RAID456 1343 init_waitqueue_head(&sh->ops.wait_for_ops); 1344 #endif 1345 1346 if (grow_buffers(sh)) { 1347 shrink_buffers(sh); 1348 kmem_cache_free(conf->slab_cache, sh); 1349 return 0; 1350 } 1351 /* we just created an active stripe so... */ 1352 atomic_set(&sh->count, 1); 1353 atomic_inc(&conf->active_stripes); 1354 INIT_LIST_HEAD(&sh->lru); 1355 release_stripe(sh); 1356 return 1; 1357 } 1358 1359 static int grow_stripes(struct r5conf *conf, int num) 1360 { 1361 struct kmem_cache *sc; 1362 int devs = max(conf->raid_disks, conf->previous_raid_disks); 1363 1364 if (conf->mddev->gendisk) 1365 sprintf(conf->cache_name[0], 1366 "raid%d-%s", conf->level, mdname(conf->mddev)); 1367 else 1368 sprintf(conf->cache_name[0], 1369 "raid%d-%p", conf->level, conf->mddev); 1370 sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]); 1371 1372 conf->active_name = 0; 1373 sc = kmem_cache_create(conf->cache_name[conf->active_name], 1374 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev), 1375 0, 0, NULL); 1376 if (!sc) 1377 return 1; 1378 conf->slab_cache = sc; 1379 conf->pool_size = devs; 1380 while (num--) 1381 if (!grow_one_stripe(conf)) 1382 return 1; 1383 return 0; 1384 } 1385 1386 /** 1387 * scribble_len - return the required size of the scribble region 1388 * @num - total number of disks in the array 1389 * 1390 * The size must be enough to contain: 1391 * 1/ a struct page pointer for each device in the array +2 1392 * 2/ room to convert each entry in (1) to its corresponding dma 1393 * (dma_map_page()) or page (page_address()) address. 1394 * 1395 * Note: the +2 is for the destination buffers of the ddf/raid6 case where we 1396 * calculate over all devices (not just the data blocks), using zeros in place 1397 * of the P and Q blocks. 1398 */ 1399 static size_t scribble_len(int num) 1400 { 1401 size_t len; 1402 1403 len = sizeof(struct page *) * (num+2) + sizeof(addr_conv_t) * (num+2); 1404 1405 return len; 1406 } 1407 1408 static int resize_stripes(struct r5conf *conf, int newsize) 1409 { 1410 /* Make all the stripes able to hold 'newsize' devices. 1411 * New slots in each stripe get 'page' set to a new page. 1412 * 1413 * This happens in stages: 1414 * 1/ create a new kmem_cache and allocate the required number of 1415 * stripe_heads. 1416 * 2/ gather all the old stripe_heads and tranfer the pages across 1417 * to the new stripe_heads. This will have the side effect of 1418 * freezing the array as once all stripe_heads have been collected, 1419 * no IO will be possible. Old stripe heads are freed once their 1420 * pages have been transferred over, and the old kmem_cache is 1421 * freed when all stripes are done. 1422 * 3/ reallocate conf->disks to be suitable bigger. If this fails, 1423 * we simple return a failre status - no need to clean anything up. 1424 * 4/ allocate new pages for the new slots in the new stripe_heads. 1425 * If this fails, we don't bother trying the shrink the 1426 * stripe_heads down again, we just leave them as they are. 1427 * As each stripe_head is processed the new one is released into 1428 * active service. 1429 * 1430 * Once step2 is started, we cannot afford to wait for a write, 1431 * so we use GFP_NOIO allocations. 1432 */ 1433 struct stripe_head *osh, *nsh; 1434 LIST_HEAD(newstripes); 1435 struct disk_info *ndisks; 1436 unsigned long cpu; 1437 int err; 1438 struct kmem_cache *sc; 1439 int i; 1440 1441 if (newsize <= conf->pool_size) 1442 return 0; /* never bother to shrink */ 1443 1444 err = md_allow_write(conf->mddev); 1445 if (err) 1446 return err; 1447 1448 /* Step 1 */ 1449 sc = kmem_cache_create(conf->cache_name[1-conf->active_name], 1450 sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev), 1451 0, 0, NULL); 1452 if (!sc) 1453 return -ENOMEM; 1454 1455 for (i = conf->max_nr_stripes; i; i--) { 1456 nsh = kmem_cache_zalloc(sc, GFP_KERNEL); 1457 if (!nsh) 1458 break; 1459 1460 nsh->raid_conf = conf; 1461 #ifdef CONFIG_MULTICORE_RAID456 1462 init_waitqueue_head(&nsh->ops.wait_for_ops); 1463 #endif 1464 1465 list_add(&nsh->lru, &newstripes); 1466 } 1467 if (i) { 1468 /* didn't get enough, give up */ 1469 while (!list_empty(&newstripes)) { 1470 nsh = list_entry(newstripes.next, struct stripe_head, lru); 1471 list_del(&nsh->lru); 1472 kmem_cache_free(sc, nsh); 1473 } 1474 kmem_cache_destroy(sc); 1475 return -ENOMEM; 1476 } 1477 /* Step 2 - Must use GFP_NOIO now. 1478 * OK, we have enough stripes, start collecting inactive 1479 * stripes and copying them over 1480 */ 1481 list_for_each_entry(nsh, &newstripes, lru) { 1482 spin_lock_irq(&conf->device_lock); 1483 wait_event_lock_irq(conf->wait_for_stripe, 1484 !list_empty(&conf->inactive_list), 1485 conf->device_lock, 1486 ); 1487 osh = get_free_stripe(conf); 1488 spin_unlock_irq(&conf->device_lock); 1489 atomic_set(&nsh->count, 1); 1490 for(i=0; i<conf->pool_size; i++) 1491 nsh->dev[i].page = osh->dev[i].page; 1492 for( ; i<newsize; i++) 1493 nsh->dev[i].page = NULL; 1494 kmem_cache_free(conf->slab_cache, osh); 1495 } 1496 kmem_cache_destroy(conf->slab_cache); 1497 1498 /* Step 3. 1499 * At this point, we are holding all the stripes so the array 1500 * is completely stalled, so now is a good time to resize 1501 * conf->disks and the scribble region 1502 */ 1503 ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO); 1504 if (ndisks) { 1505 for (i=0; i<conf->raid_disks; i++) 1506 ndisks[i] = conf->disks[i]; 1507 kfree(conf->disks); 1508 conf->disks = ndisks; 1509 } else 1510 err = -ENOMEM; 1511 1512 get_online_cpus(); 1513 conf->scribble_len = scribble_len(newsize); 1514 for_each_present_cpu(cpu) { 1515 struct raid5_percpu *percpu; 1516 void *scribble; 1517 1518 percpu = per_cpu_ptr(conf->percpu, cpu); 1519 scribble = kmalloc(conf->scribble_len, GFP_NOIO); 1520 1521 if (scribble) { 1522 kfree(percpu->scribble); 1523 percpu->scribble = scribble; 1524 } else { 1525 err = -ENOMEM; 1526 break; 1527 } 1528 } 1529 put_online_cpus(); 1530 1531 /* Step 4, return new stripes to service */ 1532 while(!list_empty(&newstripes)) { 1533 nsh = list_entry(newstripes.next, struct stripe_head, lru); 1534 list_del_init(&nsh->lru); 1535 1536 for (i=conf->raid_disks; i < newsize; i++) 1537 if (nsh->dev[i].page == NULL) { 1538 struct page *p = alloc_page(GFP_NOIO); 1539 nsh->dev[i].page = p; 1540 if (!p) 1541 err = -ENOMEM; 1542 } 1543 release_stripe(nsh); 1544 } 1545 /* critical section pass, GFP_NOIO no longer needed */ 1546 1547 conf->slab_cache = sc; 1548 conf->active_name = 1-conf->active_name; 1549 conf->pool_size = newsize; 1550 return err; 1551 } 1552 1553 static int drop_one_stripe(struct r5conf *conf) 1554 { 1555 struct stripe_head *sh; 1556 1557 spin_lock_irq(&conf->device_lock); 1558 sh = get_free_stripe(conf); 1559 spin_unlock_irq(&conf->device_lock); 1560 if (!sh) 1561 return 0; 1562 BUG_ON(atomic_read(&sh->count)); 1563 shrink_buffers(sh); 1564 kmem_cache_free(conf->slab_cache, sh); 1565 atomic_dec(&conf->active_stripes); 1566 return 1; 1567 } 1568 1569 static void shrink_stripes(struct r5conf *conf) 1570 { 1571 while (drop_one_stripe(conf)) 1572 ; 1573 1574 if (conf->slab_cache) 1575 kmem_cache_destroy(conf->slab_cache); 1576 conf->slab_cache = NULL; 1577 } 1578 1579 static void raid5_end_read_request(struct bio * bi, int error) 1580 { 1581 struct stripe_head *sh = bi->bi_private; 1582 struct r5conf *conf = sh->raid_conf; 1583 int disks = sh->disks, i; 1584 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 1585 char b[BDEVNAME_SIZE]; 1586 struct md_rdev *rdev; 1587 1588 1589 for (i=0 ; i<disks; i++) 1590 if (bi == &sh->dev[i].req) 1591 break; 1592 1593 pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n", 1594 (unsigned long long)sh->sector, i, atomic_read(&sh->count), 1595 uptodate); 1596 if (i == disks) { 1597 BUG(); 1598 return; 1599 } 1600 1601 if (uptodate) { 1602 set_bit(R5_UPTODATE, &sh->dev[i].flags); 1603 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 1604 rdev = conf->disks[i].rdev; 1605 printk_ratelimited( 1606 KERN_INFO 1607 "md/raid:%s: read error corrected" 1608 " (%lu sectors at %llu on %s)\n", 1609 mdname(conf->mddev), STRIPE_SECTORS, 1610 (unsigned long long)(sh->sector 1611 + rdev->data_offset), 1612 bdevname(rdev->bdev, b)); 1613 atomic_add(STRIPE_SECTORS, &rdev->corrected_errors); 1614 clear_bit(R5_ReadError, &sh->dev[i].flags); 1615 clear_bit(R5_ReWrite, &sh->dev[i].flags); 1616 } 1617 if (atomic_read(&conf->disks[i].rdev->read_errors)) 1618 atomic_set(&conf->disks[i].rdev->read_errors, 0); 1619 } else { 1620 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b); 1621 int retry = 0; 1622 rdev = conf->disks[i].rdev; 1623 1624 clear_bit(R5_UPTODATE, &sh->dev[i].flags); 1625 atomic_inc(&rdev->read_errors); 1626 if (conf->mddev->degraded >= conf->max_degraded) 1627 printk_ratelimited( 1628 KERN_WARNING 1629 "md/raid:%s: read error not correctable " 1630 "(sector %llu on %s).\n", 1631 mdname(conf->mddev), 1632 (unsigned long long)(sh->sector 1633 + rdev->data_offset), 1634 bdn); 1635 else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) 1636 /* Oh, no!!! */ 1637 printk_ratelimited( 1638 KERN_WARNING 1639 "md/raid:%s: read error NOT corrected!! " 1640 "(sector %llu on %s).\n", 1641 mdname(conf->mddev), 1642 (unsigned long long)(sh->sector 1643 + rdev->data_offset), 1644 bdn); 1645 else if (atomic_read(&rdev->read_errors) 1646 > conf->max_nr_stripes) 1647 printk(KERN_WARNING 1648 "md/raid:%s: Too many read errors, failing device %s.\n", 1649 mdname(conf->mddev), bdn); 1650 else 1651 retry = 1; 1652 if (retry) 1653 set_bit(R5_ReadError, &sh->dev[i].flags); 1654 else { 1655 clear_bit(R5_ReadError, &sh->dev[i].flags); 1656 clear_bit(R5_ReWrite, &sh->dev[i].flags); 1657 md_error(conf->mddev, rdev); 1658 } 1659 } 1660 rdev_dec_pending(conf->disks[i].rdev, conf->mddev); 1661 clear_bit(R5_LOCKED, &sh->dev[i].flags); 1662 set_bit(STRIPE_HANDLE, &sh->state); 1663 release_stripe(sh); 1664 } 1665 1666 static void raid5_end_write_request(struct bio *bi, int error) 1667 { 1668 struct stripe_head *sh = bi->bi_private; 1669 struct r5conf *conf = sh->raid_conf; 1670 int disks = sh->disks, i; 1671 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 1672 sector_t first_bad; 1673 int bad_sectors; 1674 1675 for (i=0 ; i<disks; i++) 1676 if (bi == &sh->dev[i].req) 1677 break; 1678 1679 pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n", 1680 (unsigned long long)sh->sector, i, atomic_read(&sh->count), 1681 uptodate); 1682 if (i == disks) { 1683 BUG(); 1684 return; 1685 } 1686 1687 if (!uptodate) { 1688 set_bit(WriteErrorSeen, &conf->disks[i].rdev->flags); 1689 set_bit(R5_WriteError, &sh->dev[i].flags); 1690 } else if (is_badblock(conf->disks[i].rdev, sh->sector, STRIPE_SECTORS, 1691 &first_bad, &bad_sectors)) 1692 set_bit(R5_MadeGood, &sh->dev[i].flags); 1693 1694 rdev_dec_pending(conf->disks[i].rdev, conf->mddev); 1695 1696 clear_bit(R5_LOCKED, &sh->dev[i].flags); 1697 set_bit(STRIPE_HANDLE, &sh->state); 1698 release_stripe(sh); 1699 } 1700 1701 1702 static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous); 1703 1704 static void raid5_build_block(struct stripe_head *sh, int i, int previous) 1705 { 1706 struct r5dev *dev = &sh->dev[i]; 1707 1708 bio_init(&dev->req); 1709 dev->req.bi_io_vec = &dev->vec; 1710 dev->req.bi_vcnt++; 1711 dev->req.bi_max_vecs++; 1712 dev->vec.bv_page = dev->page; 1713 dev->vec.bv_len = STRIPE_SIZE; 1714 dev->vec.bv_offset = 0; 1715 1716 dev->req.bi_sector = sh->sector; 1717 dev->req.bi_private = sh; 1718 1719 dev->flags = 0; 1720 dev->sector = compute_blocknr(sh, i, previous); 1721 } 1722 1723 static void error(struct mddev *mddev, struct md_rdev *rdev) 1724 { 1725 char b[BDEVNAME_SIZE]; 1726 struct r5conf *conf = mddev->private; 1727 pr_debug("raid456: error called\n"); 1728 1729 if (test_and_clear_bit(In_sync, &rdev->flags)) { 1730 unsigned long flags; 1731 spin_lock_irqsave(&conf->device_lock, flags); 1732 mddev->degraded++; 1733 spin_unlock_irqrestore(&conf->device_lock, flags); 1734 /* 1735 * if recovery was running, make sure it aborts. 1736 */ 1737 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 1738 } 1739 set_bit(Blocked, &rdev->flags); 1740 set_bit(Faulty, &rdev->flags); 1741 set_bit(MD_CHANGE_DEVS, &mddev->flags); 1742 printk(KERN_ALERT 1743 "md/raid:%s: Disk failure on %s, disabling device.\n" 1744 "md/raid:%s: Operation continuing on %d devices.\n", 1745 mdname(mddev), 1746 bdevname(rdev->bdev, b), 1747 mdname(mddev), 1748 conf->raid_disks - mddev->degraded); 1749 } 1750 1751 /* 1752 * Input: a 'big' sector number, 1753 * Output: index of the data and parity disk, and the sector # in them. 1754 */ 1755 static sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector, 1756 int previous, int *dd_idx, 1757 struct stripe_head *sh) 1758 { 1759 sector_t stripe, stripe2; 1760 sector_t chunk_number; 1761 unsigned int chunk_offset; 1762 int pd_idx, qd_idx; 1763 int ddf_layout = 0; 1764 sector_t new_sector; 1765 int algorithm = previous ? conf->prev_algo 1766 : conf->algorithm; 1767 int sectors_per_chunk = previous ? conf->prev_chunk_sectors 1768 : conf->chunk_sectors; 1769 int raid_disks = previous ? conf->previous_raid_disks 1770 : conf->raid_disks; 1771 int data_disks = raid_disks - conf->max_degraded; 1772 1773 /* First compute the information on this sector */ 1774 1775 /* 1776 * Compute the chunk number and the sector offset inside the chunk 1777 */ 1778 chunk_offset = sector_div(r_sector, sectors_per_chunk); 1779 chunk_number = r_sector; 1780 1781 /* 1782 * Compute the stripe number 1783 */ 1784 stripe = chunk_number; 1785 *dd_idx = sector_div(stripe, data_disks); 1786 stripe2 = stripe; 1787 /* 1788 * Select the parity disk based on the user selected algorithm. 1789 */ 1790 pd_idx = qd_idx = -1; 1791 switch(conf->level) { 1792 case 4: 1793 pd_idx = data_disks; 1794 break; 1795 case 5: 1796 switch (algorithm) { 1797 case ALGORITHM_LEFT_ASYMMETRIC: 1798 pd_idx = data_disks - sector_div(stripe2, raid_disks); 1799 if (*dd_idx >= pd_idx) 1800 (*dd_idx)++; 1801 break; 1802 case ALGORITHM_RIGHT_ASYMMETRIC: 1803 pd_idx = sector_div(stripe2, raid_disks); 1804 if (*dd_idx >= pd_idx) 1805 (*dd_idx)++; 1806 break; 1807 case ALGORITHM_LEFT_SYMMETRIC: 1808 pd_idx = data_disks - sector_div(stripe2, raid_disks); 1809 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 1810 break; 1811 case ALGORITHM_RIGHT_SYMMETRIC: 1812 pd_idx = sector_div(stripe2, raid_disks); 1813 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 1814 break; 1815 case ALGORITHM_PARITY_0: 1816 pd_idx = 0; 1817 (*dd_idx)++; 1818 break; 1819 case ALGORITHM_PARITY_N: 1820 pd_idx = data_disks; 1821 break; 1822 default: 1823 BUG(); 1824 } 1825 break; 1826 case 6: 1827 1828 switch (algorithm) { 1829 case ALGORITHM_LEFT_ASYMMETRIC: 1830 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); 1831 qd_idx = pd_idx + 1; 1832 if (pd_idx == raid_disks-1) { 1833 (*dd_idx)++; /* Q D D D P */ 1834 qd_idx = 0; 1835 } else if (*dd_idx >= pd_idx) 1836 (*dd_idx) += 2; /* D D P Q D */ 1837 break; 1838 case ALGORITHM_RIGHT_ASYMMETRIC: 1839 pd_idx = sector_div(stripe2, raid_disks); 1840 qd_idx = pd_idx + 1; 1841 if (pd_idx == raid_disks-1) { 1842 (*dd_idx)++; /* Q D D D P */ 1843 qd_idx = 0; 1844 } else if (*dd_idx >= pd_idx) 1845 (*dd_idx) += 2; /* D D P Q D */ 1846 break; 1847 case ALGORITHM_LEFT_SYMMETRIC: 1848 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); 1849 qd_idx = (pd_idx + 1) % raid_disks; 1850 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; 1851 break; 1852 case ALGORITHM_RIGHT_SYMMETRIC: 1853 pd_idx = sector_div(stripe2, raid_disks); 1854 qd_idx = (pd_idx + 1) % raid_disks; 1855 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; 1856 break; 1857 1858 case ALGORITHM_PARITY_0: 1859 pd_idx = 0; 1860 qd_idx = 1; 1861 (*dd_idx) += 2; 1862 break; 1863 case ALGORITHM_PARITY_N: 1864 pd_idx = data_disks; 1865 qd_idx = data_disks + 1; 1866 break; 1867 1868 case ALGORITHM_ROTATING_ZERO_RESTART: 1869 /* Exactly the same as RIGHT_ASYMMETRIC, but or 1870 * of blocks for computing Q is different. 1871 */ 1872 pd_idx = sector_div(stripe2, raid_disks); 1873 qd_idx = pd_idx + 1; 1874 if (pd_idx == raid_disks-1) { 1875 (*dd_idx)++; /* Q D D D P */ 1876 qd_idx = 0; 1877 } else if (*dd_idx >= pd_idx) 1878 (*dd_idx) += 2; /* D D P Q D */ 1879 ddf_layout = 1; 1880 break; 1881 1882 case ALGORITHM_ROTATING_N_RESTART: 1883 /* Same a left_asymmetric, by first stripe is 1884 * D D D P Q rather than 1885 * Q D D D P 1886 */ 1887 stripe2 += 1; 1888 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); 1889 qd_idx = pd_idx + 1; 1890 if (pd_idx == raid_disks-1) { 1891 (*dd_idx)++; /* Q D D D P */ 1892 qd_idx = 0; 1893 } else if (*dd_idx >= pd_idx) 1894 (*dd_idx) += 2; /* D D P Q D */ 1895 ddf_layout = 1; 1896 break; 1897 1898 case ALGORITHM_ROTATING_N_CONTINUE: 1899 /* Same as left_symmetric but Q is before P */ 1900 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); 1901 qd_idx = (pd_idx + raid_disks - 1) % raid_disks; 1902 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 1903 ddf_layout = 1; 1904 break; 1905 1906 case ALGORITHM_LEFT_ASYMMETRIC_6: 1907 /* RAID5 left_asymmetric, with Q on last device */ 1908 pd_idx = data_disks - sector_div(stripe2, raid_disks-1); 1909 if (*dd_idx >= pd_idx) 1910 (*dd_idx)++; 1911 qd_idx = raid_disks - 1; 1912 break; 1913 1914 case ALGORITHM_RIGHT_ASYMMETRIC_6: 1915 pd_idx = sector_div(stripe2, raid_disks-1); 1916 if (*dd_idx >= pd_idx) 1917 (*dd_idx)++; 1918 qd_idx = raid_disks - 1; 1919 break; 1920 1921 case ALGORITHM_LEFT_SYMMETRIC_6: 1922 pd_idx = data_disks - sector_div(stripe2, raid_disks-1); 1923 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); 1924 qd_idx = raid_disks - 1; 1925 break; 1926 1927 case ALGORITHM_RIGHT_SYMMETRIC_6: 1928 pd_idx = sector_div(stripe2, raid_disks-1); 1929 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); 1930 qd_idx = raid_disks - 1; 1931 break; 1932 1933 case ALGORITHM_PARITY_0_6: 1934 pd_idx = 0; 1935 (*dd_idx)++; 1936 qd_idx = raid_disks - 1; 1937 break; 1938 1939 default: 1940 BUG(); 1941 } 1942 break; 1943 } 1944 1945 if (sh) { 1946 sh->pd_idx = pd_idx; 1947 sh->qd_idx = qd_idx; 1948 sh->ddf_layout = ddf_layout; 1949 } 1950 /* 1951 * Finally, compute the new sector number 1952 */ 1953 new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset; 1954 return new_sector; 1955 } 1956 1957 1958 static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous) 1959 { 1960 struct r5conf *conf = sh->raid_conf; 1961 int raid_disks = sh->disks; 1962 int data_disks = raid_disks - conf->max_degraded; 1963 sector_t new_sector = sh->sector, check; 1964 int sectors_per_chunk = previous ? conf->prev_chunk_sectors 1965 : conf->chunk_sectors; 1966 int algorithm = previous ? conf->prev_algo 1967 : conf->algorithm; 1968 sector_t stripe; 1969 int chunk_offset; 1970 sector_t chunk_number; 1971 int dummy1, dd_idx = i; 1972 sector_t r_sector; 1973 struct stripe_head sh2; 1974 1975 1976 chunk_offset = sector_div(new_sector, sectors_per_chunk); 1977 stripe = new_sector; 1978 1979 if (i == sh->pd_idx) 1980 return 0; 1981 switch(conf->level) { 1982 case 4: break; 1983 case 5: 1984 switch (algorithm) { 1985 case ALGORITHM_LEFT_ASYMMETRIC: 1986 case ALGORITHM_RIGHT_ASYMMETRIC: 1987 if (i > sh->pd_idx) 1988 i--; 1989 break; 1990 case ALGORITHM_LEFT_SYMMETRIC: 1991 case ALGORITHM_RIGHT_SYMMETRIC: 1992 if (i < sh->pd_idx) 1993 i += raid_disks; 1994 i -= (sh->pd_idx + 1); 1995 break; 1996 case ALGORITHM_PARITY_0: 1997 i -= 1; 1998 break; 1999 case ALGORITHM_PARITY_N: 2000 break; 2001 default: 2002 BUG(); 2003 } 2004 break; 2005 case 6: 2006 if (i == sh->qd_idx) 2007 return 0; /* It is the Q disk */ 2008 switch (algorithm) { 2009 case ALGORITHM_LEFT_ASYMMETRIC: 2010 case ALGORITHM_RIGHT_ASYMMETRIC: 2011 case ALGORITHM_ROTATING_ZERO_RESTART: 2012 case ALGORITHM_ROTATING_N_RESTART: 2013 if (sh->pd_idx == raid_disks-1) 2014 i--; /* Q D D D P */ 2015 else if (i > sh->pd_idx) 2016 i -= 2; /* D D P Q D */ 2017 break; 2018 case ALGORITHM_LEFT_SYMMETRIC: 2019 case ALGORITHM_RIGHT_SYMMETRIC: 2020 if (sh->pd_idx == raid_disks-1) 2021 i--; /* Q D D D P */ 2022 else { 2023 /* D D P Q D */ 2024 if (i < sh->pd_idx) 2025 i += raid_disks; 2026 i -= (sh->pd_idx + 2); 2027 } 2028 break; 2029 case ALGORITHM_PARITY_0: 2030 i -= 2; 2031 break; 2032 case ALGORITHM_PARITY_N: 2033 break; 2034 case ALGORITHM_ROTATING_N_CONTINUE: 2035 /* Like left_symmetric, but P is before Q */ 2036 if (sh->pd_idx == 0) 2037 i--; /* P D D D Q */ 2038 else { 2039 /* D D Q P D */ 2040 if (i < sh->pd_idx) 2041 i += raid_disks; 2042 i -= (sh->pd_idx + 1); 2043 } 2044 break; 2045 case ALGORITHM_LEFT_ASYMMETRIC_6: 2046 case ALGORITHM_RIGHT_ASYMMETRIC_6: 2047 if (i > sh->pd_idx) 2048 i--; 2049 break; 2050 case ALGORITHM_LEFT_SYMMETRIC_6: 2051 case ALGORITHM_RIGHT_SYMMETRIC_6: 2052 if (i < sh->pd_idx) 2053 i += data_disks + 1; 2054 i -= (sh->pd_idx + 1); 2055 break; 2056 case ALGORITHM_PARITY_0_6: 2057 i -= 1; 2058 break; 2059 default: 2060 BUG(); 2061 } 2062 break; 2063 } 2064 2065 chunk_number = stripe * data_disks + i; 2066 r_sector = chunk_number * sectors_per_chunk + chunk_offset; 2067 2068 check = raid5_compute_sector(conf, r_sector, 2069 previous, &dummy1, &sh2); 2070 if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx 2071 || sh2.qd_idx != sh->qd_idx) { 2072 printk(KERN_ERR "md/raid:%s: compute_blocknr: map not correct\n", 2073 mdname(conf->mddev)); 2074 return 0; 2075 } 2076 return r_sector; 2077 } 2078 2079 2080 static void 2081 schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, 2082 int rcw, int expand) 2083 { 2084 int i, pd_idx = sh->pd_idx, disks = sh->disks; 2085 struct r5conf *conf = sh->raid_conf; 2086 int level = conf->level; 2087 2088 if (rcw) { 2089 /* if we are not expanding this is a proper write request, and 2090 * there will be bios with new data to be drained into the 2091 * stripe cache 2092 */ 2093 if (!expand) { 2094 sh->reconstruct_state = reconstruct_state_drain_run; 2095 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); 2096 } else 2097 sh->reconstruct_state = reconstruct_state_run; 2098 2099 set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request); 2100 2101 for (i = disks; i--; ) { 2102 struct r5dev *dev = &sh->dev[i]; 2103 2104 if (dev->towrite) { 2105 set_bit(R5_LOCKED, &dev->flags); 2106 set_bit(R5_Wantdrain, &dev->flags); 2107 if (!expand) 2108 clear_bit(R5_UPTODATE, &dev->flags); 2109 s->locked++; 2110 } 2111 } 2112 if (s->locked + conf->max_degraded == disks) 2113 if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) 2114 atomic_inc(&conf->pending_full_writes); 2115 } else { 2116 BUG_ON(level == 6); 2117 BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) || 2118 test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags))); 2119 2120 sh->reconstruct_state = reconstruct_state_prexor_drain_run; 2121 set_bit(STRIPE_OP_PREXOR, &s->ops_request); 2122 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); 2123 set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request); 2124 2125 for (i = disks; i--; ) { 2126 struct r5dev *dev = &sh->dev[i]; 2127 if (i == pd_idx) 2128 continue; 2129 2130 if (dev->towrite && 2131 (test_bit(R5_UPTODATE, &dev->flags) || 2132 test_bit(R5_Wantcompute, &dev->flags))) { 2133 set_bit(R5_Wantdrain, &dev->flags); 2134 set_bit(R5_LOCKED, &dev->flags); 2135 clear_bit(R5_UPTODATE, &dev->flags); 2136 s->locked++; 2137 } 2138 } 2139 } 2140 2141 /* keep the parity disk(s) locked while asynchronous operations 2142 * are in flight 2143 */ 2144 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); 2145 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 2146 s->locked++; 2147 2148 if (level == 6) { 2149 int qd_idx = sh->qd_idx; 2150 struct r5dev *dev = &sh->dev[qd_idx]; 2151 2152 set_bit(R5_LOCKED, &dev->flags); 2153 clear_bit(R5_UPTODATE, &dev->flags); 2154 s->locked++; 2155 } 2156 2157 pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n", 2158 __func__, (unsigned long long)sh->sector, 2159 s->locked, s->ops_request); 2160 } 2161 2162 /* 2163 * Each stripe/dev can have one or more bion attached. 2164 * toread/towrite point to the first in a chain. 2165 * The bi_next chain must be in order. 2166 */ 2167 static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite) 2168 { 2169 struct bio **bip; 2170 struct r5conf *conf = sh->raid_conf; 2171 int firstwrite=0; 2172 2173 pr_debug("adding bi b#%llu to stripe s#%llu\n", 2174 (unsigned long long)bi->bi_sector, 2175 (unsigned long long)sh->sector); 2176 2177 2178 spin_lock_irq(&conf->device_lock); 2179 if (forwrite) { 2180 bip = &sh->dev[dd_idx].towrite; 2181 if (*bip == NULL && sh->dev[dd_idx].written == NULL) 2182 firstwrite = 1; 2183 } else 2184 bip = &sh->dev[dd_idx].toread; 2185 while (*bip && (*bip)->bi_sector < bi->bi_sector) { 2186 if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector) 2187 goto overlap; 2188 bip = & (*bip)->bi_next; 2189 } 2190 if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9)) 2191 goto overlap; 2192 2193 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next); 2194 if (*bip) 2195 bi->bi_next = *bip; 2196 *bip = bi; 2197 bi->bi_phys_segments++; 2198 2199 if (forwrite) { 2200 /* check if page is covered */ 2201 sector_t sector = sh->dev[dd_idx].sector; 2202 for (bi=sh->dev[dd_idx].towrite; 2203 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS && 2204 bi && bi->bi_sector <= sector; 2205 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) { 2206 if (bi->bi_sector + (bi->bi_size>>9) >= sector) 2207 sector = bi->bi_sector + (bi->bi_size>>9); 2208 } 2209 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS) 2210 set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags); 2211 } 2212 spin_unlock_irq(&conf->device_lock); 2213 2214 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n", 2215 (unsigned long long)(*bip)->bi_sector, 2216 (unsigned long long)sh->sector, dd_idx); 2217 2218 if (conf->mddev->bitmap && firstwrite) { 2219 bitmap_startwrite(conf->mddev->bitmap, sh->sector, 2220 STRIPE_SECTORS, 0); 2221 sh->bm_seq = conf->seq_flush+1; 2222 set_bit(STRIPE_BIT_DELAY, &sh->state); 2223 } 2224 return 1; 2225 2226 overlap: 2227 set_bit(R5_Overlap, &sh->dev[dd_idx].flags); 2228 spin_unlock_irq(&conf->device_lock); 2229 return 0; 2230 } 2231 2232 static void end_reshape(struct r5conf *conf); 2233 2234 static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous, 2235 struct stripe_head *sh) 2236 { 2237 int sectors_per_chunk = 2238 previous ? conf->prev_chunk_sectors : conf->chunk_sectors; 2239 int dd_idx; 2240 int chunk_offset = sector_div(stripe, sectors_per_chunk); 2241 int disks = previous ? conf->previous_raid_disks : conf->raid_disks; 2242 2243 raid5_compute_sector(conf, 2244 stripe * (disks - conf->max_degraded) 2245 *sectors_per_chunk + chunk_offset, 2246 previous, 2247 &dd_idx, sh); 2248 } 2249 2250 static void 2251 handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, 2252 struct stripe_head_state *s, int disks, 2253 struct bio **return_bi) 2254 { 2255 int i; 2256 for (i = disks; i--; ) { 2257 struct bio *bi; 2258 int bitmap_end = 0; 2259 2260 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 2261 struct md_rdev *rdev; 2262 rcu_read_lock(); 2263 rdev = rcu_dereference(conf->disks[i].rdev); 2264 if (rdev && test_bit(In_sync, &rdev->flags)) 2265 atomic_inc(&rdev->nr_pending); 2266 else 2267 rdev = NULL; 2268 rcu_read_unlock(); 2269 if (rdev) { 2270 if (!rdev_set_badblocks( 2271 rdev, 2272 sh->sector, 2273 STRIPE_SECTORS, 0)) 2274 md_error(conf->mddev, rdev); 2275 rdev_dec_pending(rdev, conf->mddev); 2276 } 2277 } 2278 spin_lock_irq(&conf->device_lock); 2279 /* fail all writes first */ 2280 bi = sh->dev[i].towrite; 2281 sh->dev[i].towrite = NULL; 2282 if (bi) { 2283 s->to_write--; 2284 bitmap_end = 1; 2285 } 2286 2287 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 2288 wake_up(&conf->wait_for_overlap); 2289 2290 while (bi && bi->bi_sector < 2291 sh->dev[i].sector + STRIPE_SECTORS) { 2292 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); 2293 clear_bit(BIO_UPTODATE, &bi->bi_flags); 2294 if (!raid5_dec_bi_phys_segments(bi)) { 2295 md_write_end(conf->mddev); 2296 bi->bi_next = *return_bi; 2297 *return_bi = bi; 2298 } 2299 bi = nextbi; 2300 } 2301 /* and fail all 'written' */ 2302 bi = sh->dev[i].written; 2303 sh->dev[i].written = NULL; 2304 if (bi) bitmap_end = 1; 2305 while (bi && bi->bi_sector < 2306 sh->dev[i].sector + STRIPE_SECTORS) { 2307 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); 2308 clear_bit(BIO_UPTODATE, &bi->bi_flags); 2309 if (!raid5_dec_bi_phys_segments(bi)) { 2310 md_write_end(conf->mddev); 2311 bi->bi_next = *return_bi; 2312 *return_bi = bi; 2313 } 2314 bi = bi2; 2315 } 2316 2317 /* fail any reads if this device is non-operational and 2318 * the data has not reached the cache yet. 2319 */ 2320 if (!test_bit(R5_Wantfill, &sh->dev[i].flags) && 2321 (!test_bit(R5_Insync, &sh->dev[i].flags) || 2322 test_bit(R5_ReadError, &sh->dev[i].flags))) { 2323 bi = sh->dev[i].toread; 2324 sh->dev[i].toread = NULL; 2325 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 2326 wake_up(&conf->wait_for_overlap); 2327 if (bi) s->to_read--; 2328 while (bi && bi->bi_sector < 2329 sh->dev[i].sector + STRIPE_SECTORS) { 2330 struct bio *nextbi = 2331 r5_next_bio(bi, sh->dev[i].sector); 2332 clear_bit(BIO_UPTODATE, &bi->bi_flags); 2333 if (!raid5_dec_bi_phys_segments(bi)) { 2334 bi->bi_next = *return_bi; 2335 *return_bi = bi; 2336 } 2337 bi = nextbi; 2338 } 2339 } 2340 spin_unlock_irq(&conf->device_lock); 2341 if (bitmap_end) 2342 bitmap_endwrite(conf->mddev->bitmap, sh->sector, 2343 STRIPE_SECTORS, 0, 0); 2344 /* If we were in the middle of a write the parity block might 2345 * still be locked - so just clear all R5_LOCKED flags 2346 */ 2347 clear_bit(R5_LOCKED, &sh->dev[i].flags); 2348 } 2349 2350 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) 2351 if (atomic_dec_and_test(&conf->pending_full_writes)) 2352 md_wakeup_thread(conf->mddev->thread); 2353 } 2354 2355 static void 2356 handle_failed_sync(struct r5conf *conf, struct stripe_head *sh, 2357 struct stripe_head_state *s) 2358 { 2359 int abort = 0; 2360 int i; 2361 2362 md_done_sync(conf->mddev, STRIPE_SECTORS, 0); 2363 clear_bit(STRIPE_SYNCING, &sh->state); 2364 s->syncing = 0; 2365 /* There is nothing more to do for sync/check/repair. 2366 * For recover we need to record a bad block on all 2367 * non-sync devices, or abort the recovery 2368 */ 2369 if (!test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery)) 2370 return; 2371 /* During recovery devices cannot be removed, so locking and 2372 * refcounting of rdevs is not needed 2373 */ 2374 for (i = 0; i < conf->raid_disks; i++) { 2375 struct md_rdev *rdev = conf->disks[i].rdev; 2376 if (!rdev 2377 || test_bit(Faulty, &rdev->flags) 2378 || test_bit(In_sync, &rdev->flags)) 2379 continue; 2380 if (!rdev_set_badblocks(rdev, sh->sector, 2381 STRIPE_SECTORS, 0)) 2382 abort = 1; 2383 } 2384 if (abort) { 2385 conf->recovery_disabled = conf->mddev->recovery_disabled; 2386 set_bit(MD_RECOVERY_INTR, &conf->mddev->recovery); 2387 } 2388 } 2389 2390 /* fetch_block - checks the given member device to see if its data needs 2391 * to be read or computed to satisfy a request. 2392 * 2393 * Returns 1 when no more member devices need to be checked, otherwise returns 2394 * 0 to tell the loop in handle_stripe_fill to continue 2395 */ 2396 static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s, 2397 int disk_idx, int disks) 2398 { 2399 struct r5dev *dev = &sh->dev[disk_idx]; 2400 struct r5dev *fdev[2] = { &sh->dev[s->failed_num[0]], 2401 &sh->dev[s->failed_num[1]] }; 2402 2403 /* is the data in this block needed, and can we get it? */ 2404 if (!test_bit(R5_LOCKED, &dev->flags) && 2405 !test_bit(R5_UPTODATE, &dev->flags) && 2406 (dev->toread || 2407 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) || 2408 s->syncing || s->expanding || 2409 (s->failed >= 1 && fdev[0]->toread) || 2410 (s->failed >= 2 && fdev[1]->toread) || 2411 (sh->raid_conf->level <= 5 && s->failed && fdev[0]->towrite && 2412 !test_bit(R5_OVERWRITE, &fdev[0]->flags)) || 2413 (sh->raid_conf->level == 6 && s->failed && s->to_write))) { 2414 /* we would like to get this block, possibly by computing it, 2415 * otherwise read it if the backing disk is insync 2416 */ 2417 BUG_ON(test_bit(R5_Wantcompute, &dev->flags)); 2418 BUG_ON(test_bit(R5_Wantread, &dev->flags)); 2419 if ((s->uptodate == disks - 1) && 2420 (s->failed && (disk_idx == s->failed_num[0] || 2421 disk_idx == s->failed_num[1]))) { 2422 /* have disk failed, and we're requested to fetch it; 2423 * do compute it 2424 */ 2425 pr_debug("Computing stripe %llu block %d\n", 2426 (unsigned long long)sh->sector, disk_idx); 2427 set_bit(STRIPE_COMPUTE_RUN, &sh->state); 2428 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 2429 set_bit(R5_Wantcompute, &dev->flags); 2430 sh->ops.target = disk_idx; 2431 sh->ops.target2 = -1; /* no 2nd target */ 2432 s->req_compute = 1; 2433 /* Careful: from this point on 'uptodate' is in the eye 2434 * of raid_run_ops which services 'compute' operations 2435 * before writes. R5_Wantcompute flags a block that will 2436 * be R5_UPTODATE by the time it is needed for a 2437 * subsequent operation. 2438 */ 2439 s->uptodate++; 2440 return 1; 2441 } else if (s->uptodate == disks-2 && s->failed >= 2) { 2442 /* Computing 2-failure is *very* expensive; only 2443 * do it if failed >= 2 2444 */ 2445 int other; 2446 for (other = disks; other--; ) { 2447 if (other == disk_idx) 2448 continue; 2449 if (!test_bit(R5_UPTODATE, 2450 &sh->dev[other].flags)) 2451 break; 2452 } 2453 BUG_ON(other < 0); 2454 pr_debug("Computing stripe %llu blocks %d,%d\n", 2455 (unsigned long long)sh->sector, 2456 disk_idx, other); 2457 set_bit(STRIPE_COMPUTE_RUN, &sh->state); 2458 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 2459 set_bit(R5_Wantcompute, &sh->dev[disk_idx].flags); 2460 set_bit(R5_Wantcompute, &sh->dev[other].flags); 2461 sh->ops.target = disk_idx; 2462 sh->ops.target2 = other; 2463 s->uptodate += 2; 2464 s->req_compute = 1; 2465 return 1; 2466 } else if (test_bit(R5_Insync, &dev->flags)) { 2467 set_bit(R5_LOCKED, &dev->flags); 2468 set_bit(R5_Wantread, &dev->flags); 2469 s->locked++; 2470 pr_debug("Reading block %d (sync=%d)\n", 2471 disk_idx, s->syncing); 2472 } 2473 } 2474 2475 return 0; 2476 } 2477 2478 /** 2479 * handle_stripe_fill - read or compute data to satisfy pending requests. 2480 */ 2481 static void handle_stripe_fill(struct stripe_head *sh, 2482 struct stripe_head_state *s, 2483 int disks) 2484 { 2485 int i; 2486 2487 /* look for blocks to read/compute, skip this if a compute 2488 * is already in flight, or if the stripe contents are in the 2489 * midst of changing due to a write 2490 */ 2491 if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state && 2492 !sh->reconstruct_state) 2493 for (i = disks; i--; ) 2494 if (fetch_block(sh, s, i, disks)) 2495 break; 2496 set_bit(STRIPE_HANDLE, &sh->state); 2497 } 2498 2499 2500 /* handle_stripe_clean_event 2501 * any written block on an uptodate or failed drive can be returned. 2502 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but 2503 * never LOCKED, so we don't need to test 'failed' directly. 2504 */ 2505 static void handle_stripe_clean_event(struct r5conf *conf, 2506 struct stripe_head *sh, int disks, struct bio **return_bi) 2507 { 2508 int i; 2509 struct r5dev *dev; 2510 2511 for (i = disks; i--; ) 2512 if (sh->dev[i].written) { 2513 dev = &sh->dev[i]; 2514 if (!test_bit(R5_LOCKED, &dev->flags) && 2515 test_bit(R5_UPTODATE, &dev->flags)) { 2516 /* We can return any write requests */ 2517 struct bio *wbi, *wbi2; 2518 int bitmap_end = 0; 2519 pr_debug("Return write for disc %d\n", i); 2520 spin_lock_irq(&conf->device_lock); 2521 wbi = dev->written; 2522 dev->written = NULL; 2523 while (wbi && wbi->bi_sector < 2524 dev->sector + STRIPE_SECTORS) { 2525 wbi2 = r5_next_bio(wbi, dev->sector); 2526 if (!raid5_dec_bi_phys_segments(wbi)) { 2527 md_write_end(conf->mddev); 2528 wbi->bi_next = *return_bi; 2529 *return_bi = wbi; 2530 } 2531 wbi = wbi2; 2532 } 2533 if (dev->towrite == NULL) 2534 bitmap_end = 1; 2535 spin_unlock_irq(&conf->device_lock); 2536 if (bitmap_end) 2537 bitmap_endwrite(conf->mddev->bitmap, 2538 sh->sector, 2539 STRIPE_SECTORS, 2540 !test_bit(STRIPE_DEGRADED, &sh->state), 2541 0); 2542 } 2543 } 2544 2545 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) 2546 if (atomic_dec_and_test(&conf->pending_full_writes)) 2547 md_wakeup_thread(conf->mddev->thread); 2548 } 2549 2550 static void handle_stripe_dirtying(struct r5conf *conf, 2551 struct stripe_head *sh, 2552 struct stripe_head_state *s, 2553 int disks) 2554 { 2555 int rmw = 0, rcw = 0, i; 2556 if (conf->max_degraded == 2) { 2557 /* RAID6 requires 'rcw' in current implementation 2558 * Calculate the real rcw later - for now fake it 2559 * look like rcw is cheaper 2560 */ 2561 rcw = 1; rmw = 2; 2562 } else for (i = disks; i--; ) { 2563 /* would I have to read this buffer for read_modify_write */ 2564 struct r5dev *dev = &sh->dev[i]; 2565 if ((dev->towrite || i == sh->pd_idx) && 2566 !test_bit(R5_LOCKED, &dev->flags) && 2567 !(test_bit(R5_UPTODATE, &dev->flags) || 2568 test_bit(R5_Wantcompute, &dev->flags))) { 2569 if (test_bit(R5_Insync, &dev->flags)) 2570 rmw++; 2571 else 2572 rmw += 2*disks; /* cannot read it */ 2573 } 2574 /* Would I have to read this buffer for reconstruct_write */ 2575 if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx && 2576 !test_bit(R5_LOCKED, &dev->flags) && 2577 !(test_bit(R5_UPTODATE, &dev->flags) || 2578 test_bit(R5_Wantcompute, &dev->flags))) { 2579 if (test_bit(R5_Insync, &dev->flags)) rcw++; 2580 else 2581 rcw += 2*disks; 2582 } 2583 } 2584 pr_debug("for sector %llu, rmw=%d rcw=%d\n", 2585 (unsigned long long)sh->sector, rmw, rcw); 2586 set_bit(STRIPE_HANDLE, &sh->state); 2587 if (rmw < rcw && rmw > 0) 2588 /* prefer read-modify-write, but need to get some data */ 2589 for (i = disks; i--; ) { 2590 struct r5dev *dev = &sh->dev[i]; 2591 if ((dev->towrite || i == sh->pd_idx) && 2592 !test_bit(R5_LOCKED, &dev->flags) && 2593 !(test_bit(R5_UPTODATE, &dev->flags) || 2594 test_bit(R5_Wantcompute, &dev->flags)) && 2595 test_bit(R5_Insync, &dev->flags)) { 2596 if ( 2597 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 2598 pr_debug("Read_old block " 2599 "%d for r-m-w\n", i); 2600 set_bit(R5_LOCKED, &dev->flags); 2601 set_bit(R5_Wantread, &dev->flags); 2602 s->locked++; 2603 } else { 2604 set_bit(STRIPE_DELAYED, &sh->state); 2605 set_bit(STRIPE_HANDLE, &sh->state); 2606 } 2607 } 2608 } 2609 if (rcw <= rmw && rcw > 0) { 2610 /* want reconstruct write, but need to get some data */ 2611 rcw = 0; 2612 for (i = disks; i--; ) { 2613 struct r5dev *dev = &sh->dev[i]; 2614 if (!test_bit(R5_OVERWRITE, &dev->flags) && 2615 i != sh->pd_idx && i != sh->qd_idx && 2616 !test_bit(R5_LOCKED, &dev->flags) && 2617 !(test_bit(R5_UPTODATE, &dev->flags) || 2618 test_bit(R5_Wantcompute, &dev->flags))) { 2619 rcw++; 2620 if (!test_bit(R5_Insync, &dev->flags)) 2621 continue; /* it's a failed drive */ 2622 if ( 2623 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 2624 pr_debug("Read_old block " 2625 "%d for Reconstruct\n", i); 2626 set_bit(R5_LOCKED, &dev->flags); 2627 set_bit(R5_Wantread, &dev->flags); 2628 s->locked++; 2629 } else { 2630 set_bit(STRIPE_DELAYED, &sh->state); 2631 set_bit(STRIPE_HANDLE, &sh->state); 2632 } 2633 } 2634 } 2635 } 2636 /* now if nothing is locked, and if we have enough data, 2637 * we can start a write request 2638 */ 2639 /* since handle_stripe can be called at any time we need to handle the 2640 * case where a compute block operation has been submitted and then a 2641 * subsequent call wants to start a write request. raid_run_ops only 2642 * handles the case where compute block and reconstruct are requested 2643 * simultaneously. If this is not the case then new writes need to be 2644 * held off until the compute completes. 2645 */ 2646 if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) && 2647 (s->locked == 0 && (rcw == 0 || rmw == 0) && 2648 !test_bit(STRIPE_BIT_DELAY, &sh->state))) 2649 schedule_reconstruction(sh, s, rcw == 0, 0); 2650 } 2651 2652 static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh, 2653 struct stripe_head_state *s, int disks) 2654 { 2655 struct r5dev *dev = NULL; 2656 2657 set_bit(STRIPE_HANDLE, &sh->state); 2658 2659 switch (sh->check_state) { 2660 case check_state_idle: 2661 /* start a new check operation if there are no failures */ 2662 if (s->failed == 0) { 2663 BUG_ON(s->uptodate != disks); 2664 sh->check_state = check_state_run; 2665 set_bit(STRIPE_OP_CHECK, &s->ops_request); 2666 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); 2667 s->uptodate--; 2668 break; 2669 } 2670 dev = &sh->dev[s->failed_num[0]]; 2671 /* fall through */ 2672 case check_state_compute_result: 2673 sh->check_state = check_state_idle; 2674 if (!dev) 2675 dev = &sh->dev[sh->pd_idx]; 2676 2677 /* check that a write has not made the stripe insync */ 2678 if (test_bit(STRIPE_INSYNC, &sh->state)) 2679 break; 2680 2681 /* either failed parity check, or recovery is happening */ 2682 BUG_ON(!test_bit(R5_UPTODATE, &dev->flags)); 2683 BUG_ON(s->uptodate != disks); 2684 2685 set_bit(R5_LOCKED, &dev->flags); 2686 s->locked++; 2687 set_bit(R5_Wantwrite, &dev->flags); 2688 2689 clear_bit(STRIPE_DEGRADED, &sh->state); 2690 set_bit(STRIPE_INSYNC, &sh->state); 2691 break; 2692 case check_state_run: 2693 break; /* we will be called again upon completion */ 2694 case check_state_check_result: 2695 sh->check_state = check_state_idle; 2696 2697 /* if a failure occurred during the check operation, leave 2698 * STRIPE_INSYNC not set and let the stripe be handled again 2699 */ 2700 if (s->failed) 2701 break; 2702 2703 /* handle a successful check operation, if parity is correct 2704 * we are done. Otherwise update the mismatch count and repair 2705 * parity if !MD_RECOVERY_CHECK 2706 */ 2707 if ((sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) == 0) 2708 /* parity is correct (on disc, 2709 * not in buffer any more) 2710 */ 2711 set_bit(STRIPE_INSYNC, &sh->state); 2712 else { 2713 conf->mddev->resync_mismatches += STRIPE_SECTORS; 2714 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) 2715 /* don't try to repair!! */ 2716 set_bit(STRIPE_INSYNC, &sh->state); 2717 else { 2718 sh->check_state = check_state_compute_run; 2719 set_bit(STRIPE_COMPUTE_RUN, &sh->state); 2720 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 2721 set_bit(R5_Wantcompute, 2722 &sh->dev[sh->pd_idx].flags); 2723 sh->ops.target = sh->pd_idx; 2724 sh->ops.target2 = -1; 2725 s->uptodate++; 2726 } 2727 } 2728 break; 2729 case check_state_compute_run: 2730 break; 2731 default: 2732 printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n", 2733 __func__, sh->check_state, 2734 (unsigned long long) sh->sector); 2735 BUG(); 2736 } 2737 } 2738 2739 2740 static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh, 2741 struct stripe_head_state *s, 2742 int disks) 2743 { 2744 int pd_idx = sh->pd_idx; 2745 int qd_idx = sh->qd_idx; 2746 struct r5dev *dev; 2747 2748 set_bit(STRIPE_HANDLE, &sh->state); 2749 2750 BUG_ON(s->failed > 2); 2751 2752 /* Want to check and possibly repair P and Q. 2753 * However there could be one 'failed' device, in which 2754 * case we can only check one of them, possibly using the 2755 * other to generate missing data 2756 */ 2757 2758 switch (sh->check_state) { 2759 case check_state_idle: 2760 /* start a new check operation if there are < 2 failures */ 2761 if (s->failed == s->q_failed) { 2762 /* The only possible failed device holds Q, so it 2763 * makes sense to check P (If anything else were failed, 2764 * we would have used P to recreate it). 2765 */ 2766 sh->check_state = check_state_run; 2767 } 2768 if (!s->q_failed && s->failed < 2) { 2769 /* Q is not failed, and we didn't use it to generate 2770 * anything, so it makes sense to check it 2771 */ 2772 if (sh->check_state == check_state_run) 2773 sh->check_state = check_state_run_pq; 2774 else 2775 sh->check_state = check_state_run_q; 2776 } 2777 2778 /* discard potentially stale zero_sum_result */ 2779 sh->ops.zero_sum_result = 0; 2780 2781 if (sh->check_state == check_state_run) { 2782 /* async_xor_zero_sum destroys the contents of P */ 2783 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 2784 s->uptodate--; 2785 } 2786 if (sh->check_state >= check_state_run && 2787 sh->check_state <= check_state_run_pq) { 2788 /* async_syndrome_zero_sum preserves P and Q, so 2789 * no need to mark them !uptodate here 2790 */ 2791 set_bit(STRIPE_OP_CHECK, &s->ops_request); 2792 break; 2793 } 2794 2795 /* we have 2-disk failure */ 2796 BUG_ON(s->failed != 2); 2797 /* fall through */ 2798 case check_state_compute_result: 2799 sh->check_state = check_state_idle; 2800 2801 /* check that a write has not made the stripe insync */ 2802 if (test_bit(STRIPE_INSYNC, &sh->state)) 2803 break; 2804 2805 /* now write out any block on a failed drive, 2806 * or P or Q if they were recomputed 2807 */ 2808 BUG_ON(s->uptodate < disks - 1); /* We don't need Q to recover */ 2809 if (s->failed == 2) { 2810 dev = &sh->dev[s->failed_num[1]]; 2811 s->locked++; 2812 set_bit(R5_LOCKED, &dev->flags); 2813 set_bit(R5_Wantwrite, &dev->flags); 2814 } 2815 if (s->failed >= 1) { 2816 dev = &sh->dev[s->failed_num[0]]; 2817 s->locked++; 2818 set_bit(R5_LOCKED, &dev->flags); 2819 set_bit(R5_Wantwrite, &dev->flags); 2820 } 2821 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) { 2822 dev = &sh->dev[pd_idx]; 2823 s->locked++; 2824 set_bit(R5_LOCKED, &dev->flags); 2825 set_bit(R5_Wantwrite, &dev->flags); 2826 } 2827 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) { 2828 dev = &sh->dev[qd_idx]; 2829 s->locked++; 2830 set_bit(R5_LOCKED, &dev->flags); 2831 set_bit(R5_Wantwrite, &dev->flags); 2832 } 2833 clear_bit(STRIPE_DEGRADED, &sh->state); 2834 2835 set_bit(STRIPE_INSYNC, &sh->state); 2836 break; 2837 case check_state_run: 2838 case check_state_run_q: 2839 case check_state_run_pq: 2840 break; /* we will be called again upon completion */ 2841 case check_state_check_result: 2842 sh->check_state = check_state_idle; 2843 2844 /* handle a successful check operation, if parity is correct 2845 * we are done. Otherwise update the mismatch count and repair 2846 * parity if !MD_RECOVERY_CHECK 2847 */ 2848 if (sh->ops.zero_sum_result == 0) { 2849 /* both parities are correct */ 2850 if (!s->failed) 2851 set_bit(STRIPE_INSYNC, &sh->state); 2852 else { 2853 /* in contrast to the raid5 case we can validate 2854 * parity, but still have a failure to write 2855 * back 2856 */ 2857 sh->check_state = check_state_compute_result; 2858 /* Returning at this point means that we may go 2859 * off and bring p and/or q uptodate again so 2860 * we make sure to check zero_sum_result again 2861 * to verify if p or q need writeback 2862 */ 2863 } 2864 } else { 2865 conf->mddev->resync_mismatches += STRIPE_SECTORS; 2866 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) 2867 /* don't try to repair!! */ 2868 set_bit(STRIPE_INSYNC, &sh->state); 2869 else { 2870 int *target = &sh->ops.target; 2871 2872 sh->ops.target = -1; 2873 sh->ops.target2 = -1; 2874 sh->check_state = check_state_compute_run; 2875 set_bit(STRIPE_COMPUTE_RUN, &sh->state); 2876 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 2877 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) { 2878 set_bit(R5_Wantcompute, 2879 &sh->dev[pd_idx].flags); 2880 *target = pd_idx; 2881 target = &sh->ops.target2; 2882 s->uptodate++; 2883 } 2884 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) { 2885 set_bit(R5_Wantcompute, 2886 &sh->dev[qd_idx].flags); 2887 *target = qd_idx; 2888 s->uptodate++; 2889 } 2890 } 2891 } 2892 break; 2893 case check_state_compute_run: 2894 break; 2895 default: 2896 printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n", 2897 __func__, sh->check_state, 2898 (unsigned long long) sh->sector); 2899 BUG(); 2900 } 2901 } 2902 2903 static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh) 2904 { 2905 int i; 2906 2907 /* We have read all the blocks in this stripe and now we need to 2908 * copy some of them into a target stripe for expand. 2909 */ 2910 struct dma_async_tx_descriptor *tx = NULL; 2911 clear_bit(STRIPE_EXPAND_SOURCE, &sh->state); 2912 for (i = 0; i < sh->disks; i++) 2913 if (i != sh->pd_idx && i != sh->qd_idx) { 2914 int dd_idx, j; 2915 struct stripe_head *sh2; 2916 struct async_submit_ctl submit; 2917 2918 sector_t bn = compute_blocknr(sh, i, 1); 2919 sector_t s = raid5_compute_sector(conf, bn, 0, 2920 &dd_idx, NULL); 2921 sh2 = get_active_stripe(conf, s, 0, 1, 1); 2922 if (sh2 == NULL) 2923 /* so far only the early blocks of this stripe 2924 * have been requested. When later blocks 2925 * get requested, we will try again 2926 */ 2927 continue; 2928 if (!test_bit(STRIPE_EXPANDING, &sh2->state) || 2929 test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) { 2930 /* must have already done this block */ 2931 release_stripe(sh2); 2932 continue; 2933 } 2934 2935 /* place all the copies on one channel */ 2936 init_async_submit(&submit, 0, tx, NULL, NULL, NULL); 2937 tx = async_memcpy(sh2->dev[dd_idx].page, 2938 sh->dev[i].page, 0, 0, STRIPE_SIZE, 2939 &submit); 2940 2941 set_bit(R5_Expanded, &sh2->dev[dd_idx].flags); 2942 set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags); 2943 for (j = 0; j < conf->raid_disks; j++) 2944 if (j != sh2->pd_idx && 2945 j != sh2->qd_idx && 2946 !test_bit(R5_Expanded, &sh2->dev[j].flags)) 2947 break; 2948 if (j == conf->raid_disks) { 2949 set_bit(STRIPE_EXPAND_READY, &sh2->state); 2950 set_bit(STRIPE_HANDLE, &sh2->state); 2951 } 2952 release_stripe(sh2); 2953 2954 } 2955 /* done submitting copies, wait for them to complete */ 2956 if (tx) { 2957 async_tx_ack(tx); 2958 dma_wait_for_async_tx(tx); 2959 } 2960 } 2961 2962 2963 /* 2964 * handle_stripe - do things to a stripe. 2965 * 2966 * We lock the stripe and then examine the state of various bits 2967 * to see what needs to be done. 2968 * Possible results: 2969 * return some read request which now have data 2970 * return some write requests which are safely on disc 2971 * schedule a read on some buffers 2972 * schedule a write of some buffers 2973 * return confirmation of parity correctness 2974 * 2975 * buffers are taken off read_list or write_list, and bh_cache buffers 2976 * get BH_Lock set before the stripe lock is released. 2977 * 2978 */ 2979 2980 static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) 2981 { 2982 struct r5conf *conf = sh->raid_conf; 2983 int disks = sh->disks; 2984 struct r5dev *dev; 2985 int i; 2986 2987 memset(s, 0, sizeof(*s)); 2988 2989 s->syncing = test_bit(STRIPE_SYNCING, &sh->state); 2990 s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state); 2991 s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state); 2992 s->failed_num[0] = -1; 2993 s->failed_num[1] = -1; 2994 2995 /* Now to look around and see what can be done */ 2996 rcu_read_lock(); 2997 spin_lock_irq(&conf->device_lock); 2998 for (i=disks; i--; ) { 2999 struct md_rdev *rdev; 3000 sector_t first_bad; 3001 int bad_sectors; 3002 int is_bad = 0; 3003 3004 dev = &sh->dev[i]; 3005 3006 pr_debug("check %d: state 0x%lx read %p write %p written %p\n", 3007 i, dev->flags, dev->toread, dev->towrite, dev->written); 3008 /* maybe we can reply to a read 3009 * 3010 * new wantfill requests are only permitted while 3011 * ops_complete_biofill is guaranteed to be inactive 3012 */ 3013 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread && 3014 !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) 3015 set_bit(R5_Wantfill, &dev->flags); 3016 3017 /* now count some things */ 3018 if (test_bit(R5_LOCKED, &dev->flags)) 3019 s->locked++; 3020 if (test_bit(R5_UPTODATE, &dev->flags)) 3021 s->uptodate++; 3022 if (test_bit(R5_Wantcompute, &dev->flags)) { 3023 s->compute++; 3024 BUG_ON(s->compute > 2); 3025 } 3026 3027 if (test_bit(R5_Wantfill, &dev->flags)) 3028 s->to_fill++; 3029 else if (dev->toread) 3030 s->to_read++; 3031 if (dev->towrite) { 3032 s->to_write++; 3033 if (!test_bit(R5_OVERWRITE, &dev->flags)) 3034 s->non_overwrite++; 3035 } 3036 if (dev->written) 3037 s->written++; 3038 rdev = rcu_dereference(conf->disks[i].rdev); 3039 if (rdev) { 3040 is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS, 3041 &first_bad, &bad_sectors); 3042 if (s->blocked_rdev == NULL 3043 && (test_bit(Blocked, &rdev->flags) 3044 || is_bad < 0)) { 3045 if (is_bad < 0) 3046 set_bit(BlockedBadBlocks, 3047 &rdev->flags); 3048 s->blocked_rdev = rdev; 3049 atomic_inc(&rdev->nr_pending); 3050 } 3051 } 3052 clear_bit(R5_Insync, &dev->flags); 3053 if (!rdev) 3054 /* Not in-sync */; 3055 else if (is_bad) { 3056 /* also not in-sync */ 3057 if (!test_bit(WriteErrorSeen, &rdev->flags)) { 3058 /* treat as in-sync, but with a read error 3059 * which we can now try to correct 3060 */ 3061 set_bit(R5_Insync, &dev->flags); 3062 set_bit(R5_ReadError, &dev->flags); 3063 } 3064 } else if (test_bit(In_sync, &rdev->flags)) 3065 set_bit(R5_Insync, &dev->flags); 3066 else if (!test_bit(Faulty, &rdev->flags)) { 3067 /* in sync if before recovery_offset */ 3068 if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset) 3069 set_bit(R5_Insync, &dev->flags); 3070 } 3071 if (test_bit(R5_WriteError, &dev->flags)) { 3072 clear_bit(R5_Insync, &dev->flags); 3073 if (!test_bit(Faulty, &rdev->flags)) { 3074 s->handle_bad_blocks = 1; 3075 atomic_inc(&rdev->nr_pending); 3076 } else 3077 clear_bit(R5_WriteError, &dev->flags); 3078 } 3079 if (test_bit(R5_MadeGood, &dev->flags)) { 3080 if (!test_bit(Faulty, &rdev->flags)) { 3081 s->handle_bad_blocks = 1; 3082 atomic_inc(&rdev->nr_pending); 3083 } else 3084 clear_bit(R5_MadeGood, &dev->flags); 3085 } 3086 if (!test_bit(R5_Insync, &dev->flags)) { 3087 /* The ReadError flag will just be confusing now */ 3088 clear_bit(R5_ReadError, &dev->flags); 3089 clear_bit(R5_ReWrite, &dev->flags); 3090 } 3091 if (test_bit(R5_ReadError, &dev->flags)) 3092 clear_bit(R5_Insync, &dev->flags); 3093 if (!test_bit(R5_Insync, &dev->flags)) { 3094 if (s->failed < 2) 3095 s->failed_num[s->failed] = i; 3096 s->failed++; 3097 } 3098 } 3099 spin_unlock_irq(&conf->device_lock); 3100 rcu_read_unlock(); 3101 } 3102 3103 static void handle_stripe(struct stripe_head *sh) 3104 { 3105 struct stripe_head_state s; 3106 struct r5conf *conf = sh->raid_conf; 3107 int i; 3108 int prexor; 3109 int disks = sh->disks; 3110 struct r5dev *pdev, *qdev; 3111 3112 clear_bit(STRIPE_HANDLE, &sh->state); 3113 if (test_and_set_bit(STRIPE_ACTIVE, &sh->state)) { 3114 /* already being handled, ensure it gets handled 3115 * again when current action finishes */ 3116 set_bit(STRIPE_HANDLE, &sh->state); 3117 return; 3118 } 3119 3120 if (test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { 3121 set_bit(STRIPE_SYNCING, &sh->state); 3122 clear_bit(STRIPE_INSYNC, &sh->state); 3123 } 3124 clear_bit(STRIPE_DELAYED, &sh->state); 3125 3126 pr_debug("handling stripe %llu, state=%#lx cnt=%d, " 3127 "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n", 3128 (unsigned long long)sh->sector, sh->state, 3129 atomic_read(&sh->count), sh->pd_idx, sh->qd_idx, 3130 sh->check_state, sh->reconstruct_state); 3131 3132 analyse_stripe(sh, &s); 3133 3134 if (s.handle_bad_blocks) { 3135 set_bit(STRIPE_HANDLE, &sh->state); 3136 goto finish; 3137 } 3138 3139 if (unlikely(s.blocked_rdev)) { 3140 if (s.syncing || s.expanding || s.expanded || 3141 s.to_write || s.written) { 3142 set_bit(STRIPE_HANDLE, &sh->state); 3143 goto finish; 3144 } 3145 /* There is nothing for the blocked_rdev to block */ 3146 rdev_dec_pending(s.blocked_rdev, conf->mddev); 3147 s.blocked_rdev = NULL; 3148 } 3149 3150 if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) { 3151 set_bit(STRIPE_OP_BIOFILL, &s.ops_request); 3152 set_bit(STRIPE_BIOFILL_RUN, &sh->state); 3153 } 3154 3155 pr_debug("locked=%d uptodate=%d to_read=%d" 3156 " to_write=%d failed=%d failed_num=%d,%d\n", 3157 s.locked, s.uptodate, s.to_read, s.to_write, s.failed, 3158 s.failed_num[0], s.failed_num[1]); 3159 /* check if the array has lost more than max_degraded devices and, 3160 * if so, some requests might need to be failed. 3161 */ 3162 if (s.failed > conf->max_degraded && s.to_read+s.to_write+s.written) 3163 handle_failed_stripe(conf, sh, &s, disks, &s.return_bi); 3164 if (s.failed > conf->max_degraded && s.syncing) 3165 handle_failed_sync(conf, sh, &s); 3166 3167 /* 3168 * might be able to return some write requests if the parity blocks 3169 * are safe, or on a failed drive 3170 */ 3171 pdev = &sh->dev[sh->pd_idx]; 3172 s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx) 3173 || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx); 3174 qdev = &sh->dev[sh->qd_idx]; 3175 s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx) 3176 || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx) 3177 || conf->level < 6; 3178 3179 if (s.written && 3180 (s.p_failed || ((test_bit(R5_Insync, &pdev->flags) 3181 && !test_bit(R5_LOCKED, &pdev->flags) 3182 && test_bit(R5_UPTODATE, &pdev->flags)))) && 3183 (s.q_failed || ((test_bit(R5_Insync, &qdev->flags) 3184 && !test_bit(R5_LOCKED, &qdev->flags) 3185 && test_bit(R5_UPTODATE, &qdev->flags))))) 3186 handle_stripe_clean_event(conf, sh, disks, &s.return_bi); 3187 3188 /* Now we might consider reading some blocks, either to check/generate 3189 * parity, or to satisfy requests 3190 * or to load a block that is being partially written. 3191 */ 3192 if (s.to_read || s.non_overwrite 3193 || (conf->level == 6 && s.to_write && s.failed) 3194 || (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding) 3195 handle_stripe_fill(sh, &s, disks); 3196 3197 /* Now we check to see if any write operations have recently 3198 * completed 3199 */ 3200 prexor = 0; 3201 if (sh->reconstruct_state == reconstruct_state_prexor_drain_result) 3202 prexor = 1; 3203 if (sh->reconstruct_state == reconstruct_state_drain_result || 3204 sh->reconstruct_state == reconstruct_state_prexor_drain_result) { 3205 sh->reconstruct_state = reconstruct_state_idle; 3206 3207 /* All the 'written' buffers and the parity block are ready to 3208 * be written back to disk 3209 */ 3210 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags)); 3211 BUG_ON(sh->qd_idx >= 0 && 3212 !test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags)); 3213 for (i = disks; i--; ) { 3214 struct r5dev *dev = &sh->dev[i]; 3215 if (test_bit(R5_LOCKED, &dev->flags) && 3216 (i == sh->pd_idx || i == sh->qd_idx || 3217 dev->written)) { 3218 pr_debug("Writing block %d\n", i); 3219 set_bit(R5_Wantwrite, &dev->flags); 3220 if (prexor) 3221 continue; 3222 if (!test_bit(R5_Insync, &dev->flags) || 3223 ((i == sh->pd_idx || i == sh->qd_idx) && 3224 s.failed == 0)) 3225 set_bit(STRIPE_INSYNC, &sh->state); 3226 } 3227 } 3228 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 3229 s.dec_preread_active = 1; 3230 } 3231 3232 /* Now to consider new write requests and what else, if anything 3233 * should be read. We do not handle new writes when: 3234 * 1/ A 'write' operation (copy+xor) is already in flight. 3235 * 2/ A 'check' operation is in flight, as it may clobber the parity 3236 * block. 3237 */ 3238 if (s.to_write && !sh->reconstruct_state && !sh->check_state) 3239 handle_stripe_dirtying(conf, sh, &s, disks); 3240 3241 /* maybe we need to check and possibly fix the parity for this stripe 3242 * Any reads will already have been scheduled, so we just see if enough 3243 * data is available. The parity check is held off while parity 3244 * dependent operations are in flight. 3245 */ 3246 if (sh->check_state || 3247 (s.syncing && s.locked == 0 && 3248 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && 3249 !test_bit(STRIPE_INSYNC, &sh->state))) { 3250 if (conf->level == 6) 3251 handle_parity_checks6(conf, sh, &s, disks); 3252 else 3253 handle_parity_checks5(conf, sh, &s, disks); 3254 } 3255 3256 if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) { 3257 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); 3258 clear_bit(STRIPE_SYNCING, &sh->state); 3259 } 3260 3261 /* If the failed drives are just a ReadError, then we might need 3262 * to progress the repair/check process 3263 */ 3264 if (s.failed <= conf->max_degraded && !conf->mddev->ro) 3265 for (i = 0; i < s.failed; i++) { 3266 struct r5dev *dev = &sh->dev[s.failed_num[i]]; 3267 if (test_bit(R5_ReadError, &dev->flags) 3268 && !test_bit(R5_LOCKED, &dev->flags) 3269 && test_bit(R5_UPTODATE, &dev->flags) 3270 ) { 3271 if (!test_bit(R5_ReWrite, &dev->flags)) { 3272 set_bit(R5_Wantwrite, &dev->flags); 3273 set_bit(R5_ReWrite, &dev->flags); 3274 set_bit(R5_LOCKED, &dev->flags); 3275 s.locked++; 3276 } else { 3277 /* let's read it back */ 3278 set_bit(R5_Wantread, &dev->flags); 3279 set_bit(R5_LOCKED, &dev->flags); 3280 s.locked++; 3281 } 3282 } 3283 } 3284 3285 3286 /* Finish reconstruct operations initiated by the expansion process */ 3287 if (sh->reconstruct_state == reconstruct_state_result) { 3288 struct stripe_head *sh_src 3289 = get_active_stripe(conf, sh->sector, 1, 1, 1); 3290 if (sh_src && test_bit(STRIPE_EXPAND_SOURCE, &sh_src->state)) { 3291 /* sh cannot be written until sh_src has been read. 3292 * so arrange for sh to be delayed a little 3293 */ 3294 set_bit(STRIPE_DELAYED, &sh->state); 3295 set_bit(STRIPE_HANDLE, &sh->state); 3296 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, 3297 &sh_src->state)) 3298 atomic_inc(&conf->preread_active_stripes); 3299 release_stripe(sh_src); 3300 goto finish; 3301 } 3302 if (sh_src) 3303 release_stripe(sh_src); 3304 3305 sh->reconstruct_state = reconstruct_state_idle; 3306 clear_bit(STRIPE_EXPANDING, &sh->state); 3307 for (i = conf->raid_disks; i--; ) { 3308 set_bit(R5_Wantwrite, &sh->dev[i].flags); 3309 set_bit(R5_LOCKED, &sh->dev[i].flags); 3310 s.locked++; 3311 } 3312 } 3313 3314 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) && 3315 !sh->reconstruct_state) { 3316 /* Need to write out all blocks after computing parity */ 3317 sh->disks = conf->raid_disks; 3318 stripe_set_idx(sh->sector, conf, 0, sh); 3319 schedule_reconstruction(sh, &s, 1, 1); 3320 } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) { 3321 clear_bit(STRIPE_EXPAND_READY, &sh->state); 3322 atomic_dec(&conf->reshape_stripes); 3323 wake_up(&conf->wait_for_overlap); 3324 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); 3325 } 3326 3327 if (s.expanding && s.locked == 0 && 3328 !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) 3329 handle_stripe_expansion(conf, sh); 3330 3331 finish: 3332 /* wait for this device to become unblocked */ 3333 if (conf->mddev->external && unlikely(s.blocked_rdev)) 3334 md_wait_for_blocked_rdev(s.blocked_rdev, conf->mddev); 3335 3336 if (s.handle_bad_blocks) 3337 for (i = disks; i--; ) { 3338 struct md_rdev *rdev; 3339 struct r5dev *dev = &sh->dev[i]; 3340 if (test_and_clear_bit(R5_WriteError, &dev->flags)) { 3341 /* We own a safe reference to the rdev */ 3342 rdev = conf->disks[i].rdev; 3343 if (!rdev_set_badblocks(rdev, sh->sector, 3344 STRIPE_SECTORS, 0)) 3345 md_error(conf->mddev, rdev); 3346 rdev_dec_pending(rdev, conf->mddev); 3347 } 3348 if (test_and_clear_bit(R5_MadeGood, &dev->flags)) { 3349 rdev = conf->disks[i].rdev; 3350 rdev_clear_badblocks(rdev, sh->sector, 3351 STRIPE_SECTORS); 3352 rdev_dec_pending(rdev, conf->mddev); 3353 } 3354 } 3355 3356 if (s.ops_request) 3357 raid_run_ops(sh, s.ops_request); 3358 3359 ops_run_io(sh, &s); 3360 3361 if (s.dec_preread_active) { 3362 /* We delay this until after ops_run_io so that if make_request 3363 * is waiting on a flush, it won't continue until the writes 3364 * have actually been submitted. 3365 */ 3366 atomic_dec(&conf->preread_active_stripes); 3367 if (atomic_read(&conf->preread_active_stripes) < 3368 IO_THRESHOLD) 3369 md_wakeup_thread(conf->mddev->thread); 3370 } 3371 3372 return_io(s.return_bi); 3373 3374 clear_bit(STRIPE_ACTIVE, &sh->state); 3375 } 3376 3377 static void raid5_activate_delayed(struct r5conf *conf) 3378 { 3379 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) { 3380 while (!list_empty(&conf->delayed_list)) { 3381 struct list_head *l = conf->delayed_list.next; 3382 struct stripe_head *sh; 3383 sh = list_entry(l, struct stripe_head, lru); 3384 list_del_init(l); 3385 clear_bit(STRIPE_DELAYED, &sh->state); 3386 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 3387 atomic_inc(&conf->preread_active_stripes); 3388 list_add_tail(&sh->lru, &conf->hold_list); 3389 } 3390 } 3391 } 3392 3393 static void activate_bit_delay(struct r5conf *conf) 3394 { 3395 /* device_lock is held */ 3396 struct list_head head; 3397 list_add(&head, &conf->bitmap_list); 3398 list_del_init(&conf->bitmap_list); 3399 while (!list_empty(&head)) { 3400 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru); 3401 list_del_init(&sh->lru); 3402 atomic_inc(&sh->count); 3403 __release_stripe(conf, sh); 3404 } 3405 } 3406 3407 int md_raid5_congested(struct mddev *mddev, int bits) 3408 { 3409 struct r5conf *conf = mddev->private; 3410 3411 /* No difference between reads and writes. Just check 3412 * how busy the stripe_cache is 3413 */ 3414 3415 if (conf->inactive_blocked) 3416 return 1; 3417 if (conf->quiesce) 3418 return 1; 3419 if (list_empty_careful(&conf->inactive_list)) 3420 return 1; 3421 3422 return 0; 3423 } 3424 EXPORT_SYMBOL_GPL(md_raid5_congested); 3425 3426 static int raid5_congested(void *data, int bits) 3427 { 3428 struct mddev *mddev = data; 3429 3430 return mddev_congested(mddev, bits) || 3431 md_raid5_congested(mddev, bits); 3432 } 3433 3434 /* We want read requests to align with chunks where possible, 3435 * but write requests don't need to. 3436 */ 3437 static int raid5_mergeable_bvec(struct request_queue *q, 3438 struct bvec_merge_data *bvm, 3439 struct bio_vec *biovec) 3440 { 3441 struct mddev *mddev = q->queuedata; 3442 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); 3443 int max; 3444 unsigned int chunk_sectors = mddev->chunk_sectors; 3445 unsigned int bio_sectors = bvm->bi_size >> 9; 3446 3447 if ((bvm->bi_rw & 1) == WRITE) 3448 return biovec->bv_len; /* always allow writes to be mergeable */ 3449 3450 if (mddev->new_chunk_sectors < mddev->chunk_sectors) 3451 chunk_sectors = mddev->new_chunk_sectors; 3452 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; 3453 if (max < 0) max = 0; 3454 if (max <= biovec->bv_len && bio_sectors == 0) 3455 return biovec->bv_len; 3456 else 3457 return max; 3458 } 3459 3460 3461 static int in_chunk_boundary(struct mddev *mddev, struct bio *bio) 3462 { 3463 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); 3464 unsigned int chunk_sectors = mddev->chunk_sectors; 3465 unsigned int bio_sectors = bio->bi_size >> 9; 3466 3467 if (mddev->new_chunk_sectors < mddev->chunk_sectors) 3468 chunk_sectors = mddev->new_chunk_sectors; 3469 return chunk_sectors >= 3470 ((sector & (chunk_sectors - 1)) + bio_sectors); 3471 } 3472 3473 /* 3474 * add bio to the retry LIFO ( in O(1) ... we are in interrupt ) 3475 * later sampled by raid5d. 3476 */ 3477 static void add_bio_to_retry(struct bio *bi,struct r5conf *conf) 3478 { 3479 unsigned long flags; 3480 3481 spin_lock_irqsave(&conf->device_lock, flags); 3482 3483 bi->bi_next = conf->retry_read_aligned_list; 3484 conf->retry_read_aligned_list = bi; 3485 3486 spin_unlock_irqrestore(&conf->device_lock, flags); 3487 md_wakeup_thread(conf->mddev->thread); 3488 } 3489 3490 3491 static struct bio *remove_bio_from_retry(struct r5conf *conf) 3492 { 3493 struct bio *bi; 3494 3495 bi = conf->retry_read_aligned; 3496 if (bi) { 3497 conf->retry_read_aligned = NULL; 3498 return bi; 3499 } 3500 bi = conf->retry_read_aligned_list; 3501 if(bi) { 3502 conf->retry_read_aligned_list = bi->bi_next; 3503 bi->bi_next = NULL; 3504 /* 3505 * this sets the active strip count to 1 and the processed 3506 * strip count to zero (upper 8 bits) 3507 */ 3508 bi->bi_phys_segments = 1; /* biased count of active stripes */ 3509 } 3510 3511 return bi; 3512 } 3513 3514 3515 /* 3516 * The "raid5_align_endio" should check if the read succeeded and if it 3517 * did, call bio_endio on the original bio (having bio_put the new bio 3518 * first). 3519 * If the read failed.. 3520 */ 3521 static void raid5_align_endio(struct bio *bi, int error) 3522 { 3523 struct bio* raid_bi = bi->bi_private; 3524 struct mddev *mddev; 3525 struct r5conf *conf; 3526 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 3527 struct md_rdev *rdev; 3528 3529 bio_put(bi); 3530 3531 rdev = (void*)raid_bi->bi_next; 3532 raid_bi->bi_next = NULL; 3533 mddev = rdev->mddev; 3534 conf = mddev->private; 3535 3536 rdev_dec_pending(rdev, conf->mddev); 3537 3538 if (!error && uptodate) { 3539 bio_endio(raid_bi, 0); 3540 if (atomic_dec_and_test(&conf->active_aligned_reads)) 3541 wake_up(&conf->wait_for_stripe); 3542 return; 3543 } 3544 3545 3546 pr_debug("raid5_align_endio : io error...handing IO for a retry\n"); 3547 3548 add_bio_to_retry(raid_bi, conf); 3549 } 3550 3551 static int bio_fits_rdev(struct bio *bi) 3552 { 3553 struct request_queue *q = bdev_get_queue(bi->bi_bdev); 3554 3555 if ((bi->bi_size>>9) > queue_max_sectors(q)) 3556 return 0; 3557 blk_recount_segments(q, bi); 3558 if (bi->bi_phys_segments > queue_max_segments(q)) 3559 return 0; 3560 3561 if (q->merge_bvec_fn) 3562 /* it's too hard to apply the merge_bvec_fn at this stage, 3563 * just just give up 3564 */ 3565 return 0; 3566 3567 return 1; 3568 } 3569 3570 3571 static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio) 3572 { 3573 struct r5conf *conf = mddev->private; 3574 int dd_idx; 3575 struct bio* align_bi; 3576 struct md_rdev *rdev; 3577 3578 if (!in_chunk_boundary(mddev, raid_bio)) { 3579 pr_debug("chunk_aligned_read : non aligned\n"); 3580 return 0; 3581 } 3582 /* 3583 * use bio_clone_mddev to make a copy of the bio 3584 */ 3585 align_bi = bio_clone_mddev(raid_bio, GFP_NOIO, mddev); 3586 if (!align_bi) 3587 return 0; 3588 /* 3589 * set bi_end_io to a new function, and set bi_private to the 3590 * original bio. 3591 */ 3592 align_bi->bi_end_io = raid5_align_endio; 3593 align_bi->bi_private = raid_bio; 3594 /* 3595 * compute position 3596 */ 3597 align_bi->bi_sector = raid5_compute_sector(conf, raid_bio->bi_sector, 3598 0, 3599 &dd_idx, NULL); 3600 3601 rcu_read_lock(); 3602 rdev = rcu_dereference(conf->disks[dd_idx].rdev); 3603 if (rdev && test_bit(In_sync, &rdev->flags)) { 3604 sector_t first_bad; 3605 int bad_sectors; 3606 3607 atomic_inc(&rdev->nr_pending); 3608 rcu_read_unlock(); 3609 raid_bio->bi_next = (void*)rdev; 3610 align_bi->bi_bdev = rdev->bdev; 3611 align_bi->bi_flags &= ~(1 << BIO_SEG_VALID); 3612 align_bi->bi_sector += rdev->data_offset; 3613 3614 if (!bio_fits_rdev(align_bi) || 3615 is_badblock(rdev, align_bi->bi_sector, align_bi->bi_size>>9, 3616 &first_bad, &bad_sectors)) { 3617 /* too big in some way, or has a known bad block */ 3618 bio_put(align_bi); 3619 rdev_dec_pending(rdev, mddev); 3620 return 0; 3621 } 3622 3623 spin_lock_irq(&conf->device_lock); 3624 wait_event_lock_irq(conf->wait_for_stripe, 3625 conf->quiesce == 0, 3626 conf->device_lock, /* nothing */); 3627 atomic_inc(&conf->active_aligned_reads); 3628 spin_unlock_irq(&conf->device_lock); 3629 3630 generic_make_request(align_bi); 3631 return 1; 3632 } else { 3633 rcu_read_unlock(); 3634 bio_put(align_bi); 3635 return 0; 3636 } 3637 } 3638 3639 /* __get_priority_stripe - get the next stripe to process 3640 * 3641 * Full stripe writes are allowed to pass preread active stripes up until 3642 * the bypass_threshold is exceeded. In general the bypass_count 3643 * increments when the handle_list is handled before the hold_list; however, it 3644 * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a 3645 * stripe with in flight i/o. The bypass_count will be reset when the 3646 * head of the hold_list has changed, i.e. the head was promoted to the 3647 * handle_list. 3648 */ 3649 static struct stripe_head *__get_priority_stripe(struct r5conf *conf) 3650 { 3651 struct stripe_head *sh; 3652 3653 pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n", 3654 __func__, 3655 list_empty(&conf->handle_list) ? "empty" : "busy", 3656 list_empty(&conf->hold_list) ? "empty" : "busy", 3657 atomic_read(&conf->pending_full_writes), conf->bypass_count); 3658 3659 if (!list_empty(&conf->handle_list)) { 3660 sh = list_entry(conf->handle_list.next, typeof(*sh), lru); 3661 3662 if (list_empty(&conf->hold_list)) 3663 conf->bypass_count = 0; 3664 else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) { 3665 if (conf->hold_list.next == conf->last_hold) 3666 conf->bypass_count++; 3667 else { 3668 conf->last_hold = conf->hold_list.next; 3669 conf->bypass_count -= conf->bypass_threshold; 3670 if (conf->bypass_count < 0) 3671 conf->bypass_count = 0; 3672 } 3673 } 3674 } else if (!list_empty(&conf->hold_list) && 3675 ((conf->bypass_threshold && 3676 conf->bypass_count > conf->bypass_threshold) || 3677 atomic_read(&conf->pending_full_writes) == 0)) { 3678 sh = list_entry(conf->hold_list.next, 3679 typeof(*sh), lru); 3680 conf->bypass_count -= conf->bypass_threshold; 3681 if (conf->bypass_count < 0) 3682 conf->bypass_count = 0; 3683 } else 3684 return NULL; 3685 3686 list_del_init(&sh->lru); 3687 atomic_inc(&sh->count); 3688 BUG_ON(atomic_read(&sh->count) != 1); 3689 return sh; 3690 } 3691 3692 static void make_request(struct mddev *mddev, struct bio * bi) 3693 { 3694 struct r5conf *conf = mddev->private; 3695 int dd_idx; 3696 sector_t new_sector; 3697 sector_t logical_sector, last_sector; 3698 struct stripe_head *sh; 3699 const int rw = bio_data_dir(bi); 3700 int remaining; 3701 int plugged; 3702 3703 if (unlikely(bi->bi_rw & REQ_FLUSH)) { 3704 md_flush_request(mddev, bi); 3705 return; 3706 } 3707 3708 md_write_start(mddev, bi); 3709 3710 if (rw == READ && 3711 mddev->reshape_position == MaxSector && 3712 chunk_aligned_read(mddev,bi)) 3713 return; 3714 3715 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 3716 last_sector = bi->bi_sector + (bi->bi_size>>9); 3717 bi->bi_next = NULL; 3718 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ 3719 3720 plugged = mddev_check_plugged(mddev); 3721 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) { 3722 DEFINE_WAIT(w); 3723 int disks, data_disks; 3724 int previous; 3725 3726 retry: 3727 previous = 0; 3728 disks = conf->raid_disks; 3729 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); 3730 if (unlikely(conf->reshape_progress != MaxSector)) { 3731 /* spinlock is needed as reshape_progress may be 3732 * 64bit on a 32bit platform, and so it might be 3733 * possible to see a half-updated value 3734 * Of course reshape_progress could change after 3735 * the lock is dropped, so once we get a reference 3736 * to the stripe that we think it is, we will have 3737 * to check again. 3738 */ 3739 spin_lock_irq(&conf->device_lock); 3740 if (mddev->delta_disks < 0 3741 ? logical_sector < conf->reshape_progress 3742 : logical_sector >= conf->reshape_progress) { 3743 disks = conf->previous_raid_disks; 3744 previous = 1; 3745 } else { 3746 if (mddev->delta_disks < 0 3747 ? logical_sector < conf->reshape_safe 3748 : logical_sector >= conf->reshape_safe) { 3749 spin_unlock_irq(&conf->device_lock); 3750 schedule(); 3751 goto retry; 3752 } 3753 } 3754 spin_unlock_irq(&conf->device_lock); 3755 } 3756 data_disks = disks - conf->max_degraded; 3757 3758 new_sector = raid5_compute_sector(conf, logical_sector, 3759 previous, 3760 &dd_idx, NULL); 3761 pr_debug("raid456: make_request, sector %llu logical %llu\n", 3762 (unsigned long long)new_sector, 3763 (unsigned long long)logical_sector); 3764 3765 sh = get_active_stripe(conf, new_sector, previous, 3766 (bi->bi_rw&RWA_MASK), 0); 3767 if (sh) { 3768 if (unlikely(previous)) { 3769 /* expansion might have moved on while waiting for a 3770 * stripe, so we must do the range check again. 3771 * Expansion could still move past after this 3772 * test, but as we are holding a reference to 3773 * 'sh', we know that if that happens, 3774 * STRIPE_EXPANDING will get set and the expansion 3775 * won't proceed until we finish with the stripe. 3776 */ 3777 int must_retry = 0; 3778 spin_lock_irq(&conf->device_lock); 3779 if (mddev->delta_disks < 0 3780 ? logical_sector >= conf->reshape_progress 3781 : logical_sector < conf->reshape_progress) 3782 /* mismatch, need to try again */ 3783 must_retry = 1; 3784 spin_unlock_irq(&conf->device_lock); 3785 if (must_retry) { 3786 release_stripe(sh); 3787 schedule(); 3788 goto retry; 3789 } 3790 } 3791 3792 if (rw == WRITE && 3793 logical_sector >= mddev->suspend_lo && 3794 logical_sector < mddev->suspend_hi) { 3795 release_stripe(sh); 3796 /* As the suspend_* range is controlled by 3797 * userspace, we want an interruptible 3798 * wait. 3799 */ 3800 flush_signals(current); 3801 prepare_to_wait(&conf->wait_for_overlap, 3802 &w, TASK_INTERRUPTIBLE); 3803 if (logical_sector >= mddev->suspend_lo && 3804 logical_sector < mddev->suspend_hi) 3805 schedule(); 3806 goto retry; 3807 } 3808 3809 if (test_bit(STRIPE_EXPANDING, &sh->state) || 3810 !add_stripe_bio(sh, bi, dd_idx, rw)) { 3811 /* Stripe is busy expanding or 3812 * add failed due to overlap. Flush everything 3813 * and wait a while 3814 */ 3815 md_wakeup_thread(mddev->thread); 3816 release_stripe(sh); 3817 schedule(); 3818 goto retry; 3819 } 3820 finish_wait(&conf->wait_for_overlap, &w); 3821 set_bit(STRIPE_HANDLE, &sh->state); 3822 clear_bit(STRIPE_DELAYED, &sh->state); 3823 if ((bi->bi_rw & REQ_SYNC) && 3824 !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 3825 atomic_inc(&conf->preread_active_stripes); 3826 release_stripe(sh); 3827 } else { 3828 /* cannot get stripe for read-ahead, just give-up */ 3829 clear_bit(BIO_UPTODATE, &bi->bi_flags); 3830 finish_wait(&conf->wait_for_overlap, &w); 3831 break; 3832 } 3833 3834 } 3835 if (!plugged) 3836 md_wakeup_thread(mddev->thread); 3837 3838 spin_lock_irq(&conf->device_lock); 3839 remaining = raid5_dec_bi_phys_segments(bi); 3840 spin_unlock_irq(&conf->device_lock); 3841 if (remaining == 0) { 3842 3843 if ( rw == WRITE ) 3844 md_write_end(mddev); 3845 3846 bio_endio(bi, 0); 3847 } 3848 } 3849 3850 static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks); 3851 3852 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *skipped) 3853 { 3854 /* reshaping is quite different to recovery/resync so it is 3855 * handled quite separately ... here. 3856 * 3857 * On each call to sync_request, we gather one chunk worth of 3858 * destination stripes and flag them as expanding. 3859 * Then we find all the source stripes and request reads. 3860 * As the reads complete, handle_stripe will copy the data 3861 * into the destination stripe and release that stripe. 3862 */ 3863 struct r5conf *conf = mddev->private; 3864 struct stripe_head *sh; 3865 sector_t first_sector, last_sector; 3866 int raid_disks = conf->previous_raid_disks; 3867 int data_disks = raid_disks - conf->max_degraded; 3868 int new_data_disks = conf->raid_disks - conf->max_degraded; 3869 int i; 3870 int dd_idx; 3871 sector_t writepos, readpos, safepos; 3872 sector_t stripe_addr; 3873 int reshape_sectors; 3874 struct list_head stripes; 3875 3876 if (sector_nr == 0) { 3877 /* If restarting in the middle, skip the initial sectors */ 3878 if (mddev->delta_disks < 0 && 3879 conf->reshape_progress < raid5_size(mddev, 0, 0)) { 3880 sector_nr = raid5_size(mddev, 0, 0) 3881 - conf->reshape_progress; 3882 } else if (mddev->delta_disks >= 0 && 3883 conf->reshape_progress > 0) 3884 sector_nr = conf->reshape_progress; 3885 sector_div(sector_nr, new_data_disks); 3886 if (sector_nr) { 3887 mddev->curr_resync_completed = sector_nr; 3888 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 3889 *skipped = 1; 3890 return sector_nr; 3891 } 3892 } 3893 3894 /* We need to process a full chunk at a time. 3895 * If old and new chunk sizes differ, we need to process the 3896 * largest of these 3897 */ 3898 if (mddev->new_chunk_sectors > mddev->chunk_sectors) 3899 reshape_sectors = mddev->new_chunk_sectors; 3900 else 3901 reshape_sectors = mddev->chunk_sectors; 3902 3903 /* we update the metadata when there is more than 3Meg 3904 * in the block range (that is rather arbitrary, should 3905 * probably be time based) or when the data about to be 3906 * copied would over-write the source of the data at 3907 * the front of the range. 3908 * i.e. one new_stripe along from reshape_progress new_maps 3909 * to after where reshape_safe old_maps to 3910 */ 3911 writepos = conf->reshape_progress; 3912 sector_div(writepos, new_data_disks); 3913 readpos = conf->reshape_progress; 3914 sector_div(readpos, data_disks); 3915 safepos = conf->reshape_safe; 3916 sector_div(safepos, data_disks); 3917 if (mddev->delta_disks < 0) { 3918 writepos -= min_t(sector_t, reshape_sectors, writepos); 3919 readpos += reshape_sectors; 3920 safepos += reshape_sectors; 3921 } else { 3922 writepos += reshape_sectors; 3923 readpos -= min_t(sector_t, reshape_sectors, readpos); 3924 safepos -= min_t(sector_t, reshape_sectors, safepos); 3925 } 3926 3927 /* 'writepos' is the most advanced device address we might write. 3928 * 'readpos' is the least advanced device address we might read. 3929 * 'safepos' is the least address recorded in the metadata as having 3930 * been reshaped. 3931 * If 'readpos' is behind 'writepos', then there is no way that we can 3932 * ensure safety in the face of a crash - that must be done by userspace 3933 * making a backup of the data. So in that case there is no particular 3934 * rush to update metadata. 3935 * Otherwise if 'safepos' is behind 'writepos', then we really need to 3936 * update the metadata to advance 'safepos' to match 'readpos' so that 3937 * we can be safe in the event of a crash. 3938 * So we insist on updating metadata if safepos is behind writepos and 3939 * readpos is beyond writepos. 3940 * In any case, update the metadata every 10 seconds. 3941 * Maybe that number should be configurable, but I'm not sure it is 3942 * worth it.... maybe it could be a multiple of safemode_delay??? 3943 */ 3944 if ((mddev->delta_disks < 0 3945 ? (safepos > writepos && readpos < writepos) 3946 : (safepos < writepos && readpos > writepos)) || 3947 time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) { 3948 /* Cannot proceed until we've updated the superblock... */ 3949 wait_event(conf->wait_for_overlap, 3950 atomic_read(&conf->reshape_stripes)==0); 3951 mddev->reshape_position = conf->reshape_progress; 3952 mddev->curr_resync_completed = sector_nr; 3953 conf->reshape_checkpoint = jiffies; 3954 set_bit(MD_CHANGE_DEVS, &mddev->flags); 3955 md_wakeup_thread(mddev->thread); 3956 wait_event(mddev->sb_wait, mddev->flags == 0 || 3957 kthread_should_stop()); 3958 spin_lock_irq(&conf->device_lock); 3959 conf->reshape_safe = mddev->reshape_position; 3960 spin_unlock_irq(&conf->device_lock); 3961 wake_up(&conf->wait_for_overlap); 3962 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 3963 } 3964 3965 if (mddev->delta_disks < 0) { 3966 BUG_ON(conf->reshape_progress == 0); 3967 stripe_addr = writepos; 3968 BUG_ON((mddev->dev_sectors & 3969 ~((sector_t)reshape_sectors - 1)) 3970 - reshape_sectors - stripe_addr 3971 != sector_nr); 3972 } else { 3973 BUG_ON(writepos != sector_nr + reshape_sectors); 3974 stripe_addr = sector_nr; 3975 } 3976 INIT_LIST_HEAD(&stripes); 3977 for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) { 3978 int j; 3979 int skipped_disk = 0; 3980 sh = get_active_stripe(conf, stripe_addr+i, 0, 0, 1); 3981 set_bit(STRIPE_EXPANDING, &sh->state); 3982 atomic_inc(&conf->reshape_stripes); 3983 /* If any of this stripe is beyond the end of the old 3984 * array, then we need to zero those blocks 3985 */ 3986 for (j=sh->disks; j--;) { 3987 sector_t s; 3988 if (j == sh->pd_idx) 3989 continue; 3990 if (conf->level == 6 && 3991 j == sh->qd_idx) 3992 continue; 3993 s = compute_blocknr(sh, j, 0); 3994 if (s < raid5_size(mddev, 0, 0)) { 3995 skipped_disk = 1; 3996 continue; 3997 } 3998 memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE); 3999 set_bit(R5_Expanded, &sh->dev[j].flags); 4000 set_bit(R5_UPTODATE, &sh->dev[j].flags); 4001 } 4002 if (!skipped_disk) { 4003 set_bit(STRIPE_EXPAND_READY, &sh->state); 4004 set_bit(STRIPE_HANDLE, &sh->state); 4005 } 4006 list_add(&sh->lru, &stripes); 4007 } 4008 spin_lock_irq(&conf->device_lock); 4009 if (mddev->delta_disks < 0) 4010 conf->reshape_progress -= reshape_sectors * new_data_disks; 4011 else 4012 conf->reshape_progress += reshape_sectors * new_data_disks; 4013 spin_unlock_irq(&conf->device_lock); 4014 /* Ok, those stripe are ready. We can start scheduling 4015 * reads on the source stripes. 4016 * The source stripes are determined by mapping the first and last 4017 * block on the destination stripes. 4018 */ 4019 first_sector = 4020 raid5_compute_sector(conf, stripe_addr*(new_data_disks), 4021 1, &dd_idx, NULL); 4022 last_sector = 4023 raid5_compute_sector(conf, ((stripe_addr+reshape_sectors) 4024 * new_data_disks - 1), 4025 1, &dd_idx, NULL); 4026 if (last_sector >= mddev->dev_sectors) 4027 last_sector = mddev->dev_sectors - 1; 4028 while (first_sector <= last_sector) { 4029 sh = get_active_stripe(conf, first_sector, 1, 0, 1); 4030 set_bit(STRIPE_EXPAND_SOURCE, &sh->state); 4031 set_bit(STRIPE_HANDLE, &sh->state); 4032 release_stripe(sh); 4033 first_sector += STRIPE_SECTORS; 4034 } 4035 /* Now that the sources are clearly marked, we can release 4036 * the destination stripes 4037 */ 4038 while (!list_empty(&stripes)) { 4039 sh = list_entry(stripes.next, struct stripe_head, lru); 4040 list_del_init(&sh->lru); 4041 release_stripe(sh); 4042 } 4043 /* If this takes us to the resync_max point where we have to pause, 4044 * then we need to write out the superblock. 4045 */ 4046 sector_nr += reshape_sectors; 4047 if ((sector_nr - mddev->curr_resync_completed) * 2 4048 >= mddev->resync_max - mddev->curr_resync_completed) { 4049 /* Cannot proceed until we've updated the superblock... */ 4050 wait_event(conf->wait_for_overlap, 4051 atomic_read(&conf->reshape_stripes) == 0); 4052 mddev->reshape_position = conf->reshape_progress; 4053 mddev->curr_resync_completed = sector_nr; 4054 conf->reshape_checkpoint = jiffies; 4055 set_bit(MD_CHANGE_DEVS, &mddev->flags); 4056 md_wakeup_thread(mddev->thread); 4057 wait_event(mddev->sb_wait, 4058 !test_bit(MD_CHANGE_DEVS, &mddev->flags) 4059 || kthread_should_stop()); 4060 spin_lock_irq(&conf->device_lock); 4061 conf->reshape_safe = mddev->reshape_position; 4062 spin_unlock_irq(&conf->device_lock); 4063 wake_up(&conf->wait_for_overlap); 4064 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 4065 } 4066 return reshape_sectors; 4067 } 4068 4069 /* FIXME go_faster isn't used */ 4070 static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster) 4071 { 4072 struct r5conf *conf = mddev->private; 4073 struct stripe_head *sh; 4074 sector_t max_sector = mddev->dev_sectors; 4075 sector_t sync_blocks; 4076 int still_degraded = 0; 4077 int i; 4078 4079 if (sector_nr >= max_sector) { 4080 /* just being told to finish up .. nothing much to do */ 4081 4082 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { 4083 end_reshape(conf); 4084 return 0; 4085 } 4086 4087 if (mddev->curr_resync < max_sector) /* aborted */ 4088 bitmap_end_sync(mddev->bitmap, mddev->curr_resync, 4089 &sync_blocks, 1); 4090 else /* completed sync */ 4091 conf->fullsync = 0; 4092 bitmap_close_sync(mddev->bitmap); 4093 4094 return 0; 4095 } 4096 4097 /* Allow raid5_quiesce to complete */ 4098 wait_event(conf->wait_for_overlap, conf->quiesce != 2); 4099 4100 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 4101 return reshape_request(mddev, sector_nr, skipped); 4102 4103 /* No need to check resync_max as we never do more than one 4104 * stripe, and as resync_max will always be on a chunk boundary, 4105 * if the check in md_do_sync didn't fire, there is no chance 4106 * of overstepping resync_max here 4107 */ 4108 4109 /* if there is too many failed drives and we are trying 4110 * to resync, then assert that we are finished, because there is 4111 * nothing we can do. 4112 */ 4113 if (mddev->degraded >= conf->max_degraded && 4114 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 4115 sector_t rv = mddev->dev_sectors - sector_nr; 4116 *skipped = 1; 4117 return rv; 4118 } 4119 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && 4120 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && 4121 !conf->fullsync && sync_blocks >= STRIPE_SECTORS) { 4122 /* we can skip this block, and probably more */ 4123 sync_blocks /= STRIPE_SECTORS; 4124 *skipped = 1; 4125 return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */ 4126 } 4127 4128 4129 bitmap_cond_end_sync(mddev->bitmap, sector_nr); 4130 4131 sh = get_active_stripe(conf, sector_nr, 0, 1, 0); 4132 if (sh == NULL) { 4133 sh = get_active_stripe(conf, sector_nr, 0, 0, 0); 4134 /* make sure we don't swamp the stripe cache if someone else 4135 * is trying to get access 4136 */ 4137 schedule_timeout_uninterruptible(1); 4138 } 4139 /* Need to check if array will still be degraded after recovery/resync 4140 * We don't need to check the 'failed' flag as when that gets set, 4141 * recovery aborts. 4142 */ 4143 for (i = 0; i < conf->raid_disks; i++) 4144 if (conf->disks[i].rdev == NULL) 4145 still_degraded = 1; 4146 4147 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded); 4148 4149 set_bit(STRIPE_SYNC_REQUESTED, &sh->state); 4150 4151 handle_stripe(sh); 4152 release_stripe(sh); 4153 4154 return STRIPE_SECTORS; 4155 } 4156 4157 static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio) 4158 { 4159 /* We may not be able to submit a whole bio at once as there 4160 * may not be enough stripe_heads available. 4161 * We cannot pre-allocate enough stripe_heads as we may need 4162 * more than exist in the cache (if we allow ever large chunks). 4163 * So we do one stripe head at a time and record in 4164 * ->bi_hw_segments how many have been done. 4165 * 4166 * We *know* that this entire raid_bio is in one chunk, so 4167 * it will be only one 'dd_idx' and only need one call to raid5_compute_sector. 4168 */ 4169 struct stripe_head *sh; 4170 int dd_idx; 4171 sector_t sector, logical_sector, last_sector; 4172 int scnt = 0; 4173 int remaining; 4174 int handled = 0; 4175 4176 logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 4177 sector = raid5_compute_sector(conf, logical_sector, 4178 0, &dd_idx, NULL); 4179 last_sector = raid_bio->bi_sector + (raid_bio->bi_size>>9); 4180 4181 for (; logical_sector < last_sector; 4182 logical_sector += STRIPE_SECTORS, 4183 sector += STRIPE_SECTORS, 4184 scnt++) { 4185 4186 if (scnt < raid5_bi_hw_segments(raid_bio)) 4187 /* already done this stripe */ 4188 continue; 4189 4190 sh = get_active_stripe(conf, sector, 0, 1, 0); 4191 4192 if (!sh) { 4193 /* failed to get a stripe - must wait */ 4194 raid5_set_bi_hw_segments(raid_bio, scnt); 4195 conf->retry_read_aligned = raid_bio; 4196 return handled; 4197 } 4198 4199 set_bit(R5_ReadError, &sh->dev[dd_idx].flags); 4200 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) { 4201 release_stripe(sh); 4202 raid5_set_bi_hw_segments(raid_bio, scnt); 4203 conf->retry_read_aligned = raid_bio; 4204 return handled; 4205 } 4206 4207 handle_stripe(sh); 4208 release_stripe(sh); 4209 handled++; 4210 } 4211 spin_lock_irq(&conf->device_lock); 4212 remaining = raid5_dec_bi_phys_segments(raid_bio); 4213 spin_unlock_irq(&conf->device_lock); 4214 if (remaining == 0) 4215 bio_endio(raid_bio, 0); 4216 if (atomic_dec_and_test(&conf->active_aligned_reads)) 4217 wake_up(&conf->wait_for_stripe); 4218 return handled; 4219 } 4220 4221 4222 /* 4223 * This is our raid5 kernel thread. 4224 * 4225 * We scan the hash table for stripes which can be handled now. 4226 * During the scan, completed stripes are saved for us by the interrupt 4227 * handler, so that they will not have to wait for our next wakeup. 4228 */ 4229 static void raid5d(struct mddev *mddev) 4230 { 4231 struct stripe_head *sh; 4232 struct r5conf *conf = mddev->private; 4233 int handled; 4234 struct blk_plug plug; 4235 4236 pr_debug("+++ raid5d active\n"); 4237 4238 md_check_recovery(mddev); 4239 4240 blk_start_plug(&plug); 4241 handled = 0; 4242 spin_lock_irq(&conf->device_lock); 4243 while (1) { 4244 struct bio *bio; 4245 4246 if (atomic_read(&mddev->plug_cnt) == 0 && 4247 !list_empty(&conf->bitmap_list)) { 4248 /* Now is a good time to flush some bitmap updates */ 4249 conf->seq_flush++; 4250 spin_unlock_irq(&conf->device_lock); 4251 bitmap_unplug(mddev->bitmap); 4252 spin_lock_irq(&conf->device_lock); 4253 conf->seq_write = conf->seq_flush; 4254 activate_bit_delay(conf); 4255 } 4256 if (atomic_read(&mddev->plug_cnt) == 0) 4257 raid5_activate_delayed(conf); 4258 4259 while ((bio = remove_bio_from_retry(conf))) { 4260 int ok; 4261 spin_unlock_irq(&conf->device_lock); 4262 ok = retry_aligned_read(conf, bio); 4263 spin_lock_irq(&conf->device_lock); 4264 if (!ok) 4265 break; 4266 handled++; 4267 } 4268 4269 sh = __get_priority_stripe(conf); 4270 4271 if (!sh) 4272 break; 4273 spin_unlock_irq(&conf->device_lock); 4274 4275 handled++; 4276 handle_stripe(sh); 4277 release_stripe(sh); 4278 cond_resched(); 4279 4280 if (mddev->flags & ~(1<<MD_CHANGE_PENDING)) 4281 md_check_recovery(mddev); 4282 4283 spin_lock_irq(&conf->device_lock); 4284 } 4285 pr_debug("%d stripes handled\n", handled); 4286 4287 spin_unlock_irq(&conf->device_lock); 4288 4289 async_tx_issue_pending_all(); 4290 blk_finish_plug(&plug); 4291 4292 pr_debug("--- raid5d inactive\n"); 4293 } 4294 4295 static ssize_t 4296 raid5_show_stripe_cache_size(struct mddev *mddev, char *page) 4297 { 4298 struct r5conf *conf = mddev->private; 4299 if (conf) 4300 return sprintf(page, "%d\n", conf->max_nr_stripes); 4301 else 4302 return 0; 4303 } 4304 4305 int 4306 raid5_set_cache_size(struct mddev *mddev, int size) 4307 { 4308 struct r5conf *conf = mddev->private; 4309 int err; 4310 4311 if (size <= 16 || size > 32768) 4312 return -EINVAL; 4313 while (size < conf->max_nr_stripes) { 4314 if (drop_one_stripe(conf)) 4315 conf->max_nr_stripes--; 4316 else 4317 break; 4318 } 4319 err = md_allow_write(mddev); 4320 if (err) 4321 return err; 4322 while (size > conf->max_nr_stripes) { 4323 if (grow_one_stripe(conf)) 4324 conf->max_nr_stripes++; 4325 else break; 4326 } 4327 return 0; 4328 } 4329 EXPORT_SYMBOL(raid5_set_cache_size); 4330 4331 static ssize_t 4332 raid5_store_stripe_cache_size(struct mddev *mddev, const char *page, size_t len) 4333 { 4334 struct r5conf *conf = mddev->private; 4335 unsigned long new; 4336 int err; 4337 4338 if (len >= PAGE_SIZE) 4339 return -EINVAL; 4340 if (!conf) 4341 return -ENODEV; 4342 4343 if (strict_strtoul(page, 10, &new)) 4344 return -EINVAL; 4345 err = raid5_set_cache_size(mddev, new); 4346 if (err) 4347 return err; 4348 return len; 4349 } 4350 4351 static struct md_sysfs_entry 4352 raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR, 4353 raid5_show_stripe_cache_size, 4354 raid5_store_stripe_cache_size); 4355 4356 static ssize_t 4357 raid5_show_preread_threshold(struct mddev *mddev, char *page) 4358 { 4359 struct r5conf *conf = mddev->private; 4360 if (conf) 4361 return sprintf(page, "%d\n", conf->bypass_threshold); 4362 else 4363 return 0; 4364 } 4365 4366 static ssize_t 4367 raid5_store_preread_threshold(struct mddev *mddev, const char *page, size_t len) 4368 { 4369 struct r5conf *conf = mddev->private; 4370 unsigned long new; 4371 if (len >= PAGE_SIZE) 4372 return -EINVAL; 4373 if (!conf) 4374 return -ENODEV; 4375 4376 if (strict_strtoul(page, 10, &new)) 4377 return -EINVAL; 4378 if (new > conf->max_nr_stripes) 4379 return -EINVAL; 4380 conf->bypass_threshold = new; 4381 return len; 4382 } 4383 4384 static struct md_sysfs_entry 4385 raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold, 4386 S_IRUGO | S_IWUSR, 4387 raid5_show_preread_threshold, 4388 raid5_store_preread_threshold); 4389 4390 static ssize_t 4391 stripe_cache_active_show(struct mddev *mddev, char *page) 4392 { 4393 struct r5conf *conf = mddev->private; 4394 if (conf) 4395 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes)); 4396 else 4397 return 0; 4398 } 4399 4400 static struct md_sysfs_entry 4401 raid5_stripecache_active = __ATTR_RO(stripe_cache_active); 4402 4403 static struct attribute *raid5_attrs[] = { 4404 &raid5_stripecache_size.attr, 4405 &raid5_stripecache_active.attr, 4406 &raid5_preread_bypass_threshold.attr, 4407 NULL, 4408 }; 4409 static struct attribute_group raid5_attrs_group = { 4410 .name = NULL, 4411 .attrs = raid5_attrs, 4412 }; 4413 4414 static sector_t 4415 raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks) 4416 { 4417 struct r5conf *conf = mddev->private; 4418 4419 if (!sectors) 4420 sectors = mddev->dev_sectors; 4421 if (!raid_disks) 4422 /* size is defined by the smallest of previous and new size */ 4423 raid_disks = min(conf->raid_disks, conf->previous_raid_disks); 4424 4425 sectors &= ~((sector_t)mddev->chunk_sectors - 1); 4426 sectors &= ~((sector_t)mddev->new_chunk_sectors - 1); 4427 return sectors * (raid_disks - conf->max_degraded); 4428 } 4429 4430 static void raid5_free_percpu(struct r5conf *conf) 4431 { 4432 struct raid5_percpu *percpu; 4433 unsigned long cpu; 4434 4435 if (!conf->percpu) 4436 return; 4437 4438 get_online_cpus(); 4439 for_each_possible_cpu(cpu) { 4440 percpu = per_cpu_ptr(conf->percpu, cpu); 4441 safe_put_page(percpu->spare_page); 4442 kfree(percpu->scribble); 4443 } 4444 #ifdef CONFIG_HOTPLUG_CPU 4445 unregister_cpu_notifier(&conf->cpu_notify); 4446 #endif 4447 put_online_cpus(); 4448 4449 free_percpu(conf->percpu); 4450 } 4451 4452 static void free_conf(struct r5conf *conf) 4453 { 4454 shrink_stripes(conf); 4455 raid5_free_percpu(conf); 4456 kfree(conf->disks); 4457 kfree(conf->stripe_hashtbl); 4458 kfree(conf); 4459 } 4460 4461 #ifdef CONFIG_HOTPLUG_CPU 4462 static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action, 4463 void *hcpu) 4464 { 4465 struct r5conf *conf = container_of(nfb, struct r5conf, cpu_notify); 4466 long cpu = (long)hcpu; 4467 struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu); 4468 4469 switch (action) { 4470 case CPU_UP_PREPARE: 4471 case CPU_UP_PREPARE_FROZEN: 4472 if (conf->level == 6 && !percpu->spare_page) 4473 percpu->spare_page = alloc_page(GFP_KERNEL); 4474 if (!percpu->scribble) 4475 percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL); 4476 4477 if (!percpu->scribble || 4478 (conf->level == 6 && !percpu->spare_page)) { 4479 safe_put_page(percpu->spare_page); 4480 kfree(percpu->scribble); 4481 pr_err("%s: failed memory allocation for cpu%ld\n", 4482 __func__, cpu); 4483 return notifier_from_errno(-ENOMEM); 4484 } 4485 break; 4486 case CPU_DEAD: 4487 case CPU_DEAD_FROZEN: 4488 safe_put_page(percpu->spare_page); 4489 kfree(percpu->scribble); 4490 percpu->spare_page = NULL; 4491 percpu->scribble = NULL; 4492 break; 4493 default: 4494 break; 4495 } 4496 return NOTIFY_OK; 4497 } 4498 #endif 4499 4500 static int raid5_alloc_percpu(struct r5conf *conf) 4501 { 4502 unsigned long cpu; 4503 struct page *spare_page; 4504 struct raid5_percpu __percpu *allcpus; 4505 void *scribble; 4506 int err; 4507 4508 allcpus = alloc_percpu(struct raid5_percpu); 4509 if (!allcpus) 4510 return -ENOMEM; 4511 conf->percpu = allcpus; 4512 4513 get_online_cpus(); 4514 err = 0; 4515 for_each_present_cpu(cpu) { 4516 if (conf->level == 6) { 4517 spare_page = alloc_page(GFP_KERNEL); 4518 if (!spare_page) { 4519 err = -ENOMEM; 4520 break; 4521 } 4522 per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page; 4523 } 4524 scribble = kmalloc(conf->scribble_len, GFP_KERNEL); 4525 if (!scribble) { 4526 err = -ENOMEM; 4527 break; 4528 } 4529 per_cpu_ptr(conf->percpu, cpu)->scribble = scribble; 4530 } 4531 #ifdef CONFIG_HOTPLUG_CPU 4532 conf->cpu_notify.notifier_call = raid456_cpu_notify; 4533 conf->cpu_notify.priority = 0; 4534 if (err == 0) 4535 err = register_cpu_notifier(&conf->cpu_notify); 4536 #endif 4537 put_online_cpus(); 4538 4539 return err; 4540 } 4541 4542 static struct r5conf *setup_conf(struct mddev *mddev) 4543 { 4544 struct r5conf *conf; 4545 int raid_disk, memory, max_disks; 4546 struct md_rdev *rdev; 4547 struct disk_info *disk; 4548 4549 if (mddev->new_level != 5 4550 && mddev->new_level != 4 4551 && mddev->new_level != 6) { 4552 printk(KERN_ERR "md/raid:%s: raid level not set to 4/5/6 (%d)\n", 4553 mdname(mddev), mddev->new_level); 4554 return ERR_PTR(-EIO); 4555 } 4556 if ((mddev->new_level == 5 4557 && !algorithm_valid_raid5(mddev->new_layout)) || 4558 (mddev->new_level == 6 4559 && !algorithm_valid_raid6(mddev->new_layout))) { 4560 printk(KERN_ERR "md/raid:%s: layout %d not supported\n", 4561 mdname(mddev), mddev->new_layout); 4562 return ERR_PTR(-EIO); 4563 } 4564 if (mddev->new_level == 6 && mddev->raid_disks < 4) { 4565 printk(KERN_ERR "md/raid:%s: not enough configured devices (%d, minimum 4)\n", 4566 mdname(mddev), mddev->raid_disks); 4567 return ERR_PTR(-EINVAL); 4568 } 4569 4570 if (!mddev->new_chunk_sectors || 4571 (mddev->new_chunk_sectors << 9) % PAGE_SIZE || 4572 !is_power_of_2(mddev->new_chunk_sectors)) { 4573 printk(KERN_ERR "md/raid:%s: invalid chunk size %d\n", 4574 mdname(mddev), mddev->new_chunk_sectors << 9); 4575 return ERR_PTR(-EINVAL); 4576 } 4577 4578 conf = kzalloc(sizeof(struct r5conf), GFP_KERNEL); 4579 if (conf == NULL) 4580 goto abort; 4581 spin_lock_init(&conf->device_lock); 4582 init_waitqueue_head(&conf->wait_for_stripe); 4583 init_waitqueue_head(&conf->wait_for_overlap); 4584 INIT_LIST_HEAD(&conf->handle_list); 4585 INIT_LIST_HEAD(&conf->hold_list); 4586 INIT_LIST_HEAD(&conf->delayed_list); 4587 INIT_LIST_HEAD(&conf->bitmap_list); 4588 INIT_LIST_HEAD(&conf->inactive_list); 4589 atomic_set(&conf->active_stripes, 0); 4590 atomic_set(&conf->preread_active_stripes, 0); 4591 atomic_set(&conf->active_aligned_reads, 0); 4592 conf->bypass_threshold = BYPASS_THRESHOLD; 4593 conf->recovery_disabled = mddev->recovery_disabled - 1; 4594 4595 conf->raid_disks = mddev->raid_disks; 4596 if (mddev->reshape_position == MaxSector) 4597 conf->previous_raid_disks = mddev->raid_disks; 4598 else 4599 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks; 4600 max_disks = max(conf->raid_disks, conf->previous_raid_disks); 4601 conf->scribble_len = scribble_len(max_disks); 4602 4603 conf->disks = kzalloc(max_disks * sizeof(struct disk_info), 4604 GFP_KERNEL); 4605 if (!conf->disks) 4606 goto abort; 4607 4608 conf->mddev = mddev; 4609 4610 if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) 4611 goto abort; 4612 4613 conf->level = mddev->new_level; 4614 if (raid5_alloc_percpu(conf) != 0) 4615 goto abort; 4616 4617 pr_debug("raid456: run(%s) called.\n", mdname(mddev)); 4618 4619 list_for_each_entry(rdev, &mddev->disks, same_set) { 4620 raid_disk = rdev->raid_disk; 4621 if (raid_disk >= max_disks 4622 || raid_disk < 0) 4623 continue; 4624 disk = conf->disks + raid_disk; 4625 4626 disk->rdev = rdev; 4627 4628 if (test_bit(In_sync, &rdev->flags)) { 4629 char b[BDEVNAME_SIZE]; 4630 printk(KERN_INFO "md/raid:%s: device %s operational as raid" 4631 " disk %d\n", 4632 mdname(mddev), bdevname(rdev->bdev, b), raid_disk); 4633 } else if (rdev->saved_raid_disk != raid_disk) 4634 /* Cannot rely on bitmap to complete recovery */ 4635 conf->fullsync = 1; 4636 } 4637 4638 conf->chunk_sectors = mddev->new_chunk_sectors; 4639 conf->level = mddev->new_level; 4640 if (conf->level == 6) 4641 conf->max_degraded = 2; 4642 else 4643 conf->max_degraded = 1; 4644 conf->algorithm = mddev->new_layout; 4645 conf->max_nr_stripes = NR_STRIPES; 4646 conf->reshape_progress = mddev->reshape_position; 4647 if (conf->reshape_progress != MaxSector) { 4648 conf->prev_chunk_sectors = mddev->chunk_sectors; 4649 conf->prev_algo = mddev->layout; 4650 } 4651 4652 memory = conf->max_nr_stripes * (sizeof(struct stripe_head) + 4653 max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024; 4654 if (grow_stripes(conf, conf->max_nr_stripes)) { 4655 printk(KERN_ERR 4656 "md/raid:%s: couldn't allocate %dkB for buffers\n", 4657 mdname(mddev), memory); 4658 goto abort; 4659 } else 4660 printk(KERN_INFO "md/raid:%s: allocated %dkB\n", 4661 mdname(mddev), memory); 4662 4663 conf->thread = md_register_thread(raid5d, mddev, NULL); 4664 if (!conf->thread) { 4665 printk(KERN_ERR 4666 "md/raid:%s: couldn't allocate thread.\n", 4667 mdname(mddev)); 4668 goto abort; 4669 } 4670 4671 return conf; 4672 4673 abort: 4674 if (conf) { 4675 free_conf(conf); 4676 return ERR_PTR(-EIO); 4677 } else 4678 return ERR_PTR(-ENOMEM); 4679 } 4680 4681 4682 static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded) 4683 { 4684 switch (algo) { 4685 case ALGORITHM_PARITY_0: 4686 if (raid_disk < max_degraded) 4687 return 1; 4688 break; 4689 case ALGORITHM_PARITY_N: 4690 if (raid_disk >= raid_disks - max_degraded) 4691 return 1; 4692 break; 4693 case ALGORITHM_PARITY_0_6: 4694 if (raid_disk == 0 || 4695 raid_disk == raid_disks - 1) 4696 return 1; 4697 break; 4698 case ALGORITHM_LEFT_ASYMMETRIC_6: 4699 case ALGORITHM_RIGHT_ASYMMETRIC_6: 4700 case ALGORITHM_LEFT_SYMMETRIC_6: 4701 case ALGORITHM_RIGHT_SYMMETRIC_6: 4702 if (raid_disk == raid_disks - 1) 4703 return 1; 4704 } 4705 return 0; 4706 } 4707 4708 static int run(struct mddev *mddev) 4709 { 4710 struct r5conf *conf; 4711 int working_disks = 0; 4712 int dirty_parity_disks = 0; 4713 struct md_rdev *rdev; 4714 sector_t reshape_offset = 0; 4715 4716 if (mddev->recovery_cp != MaxSector) 4717 printk(KERN_NOTICE "md/raid:%s: not clean" 4718 " -- starting background reconstruction\n", 4719 mdname(mddev)); 4720 if (mddev->reshape_position != MaxSector) { 4721 /* Check that we can continue the reshape. 4722 * Currently only disks can change, it must 4723 * increase, and we must be past the point where 4724 * a stripe over-writes itself 4725 */ 4726 sector_t here_new, here_old; 4727 int old_disks; 4728 int max_degraded = (mddev->level == 6 ? 2 : 1); 4729 4730 if (mddev->new_level != mddev->level) { 4731 printk(KERN_ERR "md/raid:%s: unsupported reshape " 4732 "required - aborting.\n", 4733 mdname(mddev)); 4734 return -EINVAL; 4735 } 4736 old_disks = mddev->raid_disks - mddev->delta_disks; 4737 /* reshape_position must be on a new-stripe boundary, and one 4738 * further up in new geometry must map after here in old 4739 * geometry. 4740 */ 4741 here_new = mddev->reshape_position; 4742 if (sector_div(here_new, mddev->new_chunk_sectors * 4743 (mddev->raid_disks - max_degraded))) { 4744 printk(KERN_ERR "md/raid:%s: reshape_position not " 4745 "on a stripe boundary\n", mdname(mddev)); 4746 return -EINVAL; 4747 } 4748 reshape_offset = here_new * mddev->new_chunk_sectors; 4749 /* here_new is the stripe we will write to */ 4750 here_old = mddev->reshape_position; 4751 sector_div(here_old, mddev->chunk_sectors * 4752 (old_disks-max_degraded)); 4753 /* here_old is the first stripe that we might need to read 4754 * from */ 4755 if (mddev->delta_disks == 0) { 4756 /* We cannot be sure it is safe to start an in-place 4757 * reshape. It is only safe if user-space if monitoring 4758 * and taking constant backups. 4759 * mdadm always starts a situation like this in 4760 * readonly mode so it can take control before 4761 * allowing any writes. So just check for that. 4762 */ 4763 if ((here_new * mddev->new_chunk_sectors != 4764 here_old * mddev->chunk_sectors) || 4765 mddev->ro == 0) { 4766 printk(KERN_ERR "md/raid:%s: in-place reshape must be started" 4767 " in read-only mode - aborting\n", 4768 mdname(mddev)); 4769 return -EINVAL; 4770 } 4771 } else if (mddev->delta_disks < 0 4772 ? (here_new * mddev->new_chunk_sectors <= 4773 here_old * mddev->chunk_sectors) 4774 : (here_new * mddev->new_chunk_sectors >= 4775 here_old * mddev->chunk_sectors)) { 4776 /* Reading from the same stripe as writing to - bad */ 4777 printk(KERN_ERR "md/raid:%s: reshape_position too early for " 4778 "auto-recovery - aborting.\n", 4779 mdname(mddev)); 4780 return -EINVAL; 4781 } 4782 printk(KERN_INFO "md/raid:%s: reshape will continue\n", 4783 mdname(mddev)); 4784 /* OK, we should be able to continue; */ 4785 } else { 4786 BUG_ON(mddev->level != mddev->new_level); 4787 BUG_ON(mddev->layout != mddev->new_layout); 4788 BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors); 4789 BUG_ON(mddev->delta_disks != 0); 4790 } 4791 4792 if (mddev->private == NULL) 4793 conf = setup_conf(mddev); 4794 else 4795 conf = mddev->private; 4796 4797 if (IS_ERR(conf)) 4798 return PTR_ERR(conf); 4799 4800 mddev->thread = conf->thread; 4801 conf->thread = NULL; 4802 mddev->private = conf; 4803 4804 /* 4805 * 0 for a fully functional array, 1 or 2 for a degraded array. 4806 */ 4807 list_for_each_entry(rdev, &mddev->disks, same_set) { 4808 if (rdev->raid_disk < 0) 4809 continue; 4810 if (test_bit(In_sync, &rdev->flags)) { 4811 working_disks++; 4812 continue; 4813 } 4814 /* This disc is not fully in-sync. However if it 4815 * just stored parity (beyond the recovery_offset), 4816 * when we don't need to be concerned about the 4817 * array being dirty. 4818 * When reshape goes 'backwards', we never have 4819 * partially completed devices, so we only need 4820 * to worry about reshape going forwards. 4821 */ 4822 /* Hack because v0.91 doesn't store recovery_offset properly. */ 4823 if (mddev->major_version == 0 && 4824 mddev->minor_version > 90) 4825 rdev->recovery_offset = reshape_offset; 4826 4827 if (rdev->recovery_offset < reshape_offset) { 4828 /* We need to check old and new layout */ 4829 if (!only_parity(rdev->raid_disk, 4830 conf->algorithm, 4831 conf->raid_disks, 4832 conf->max_degraded)) 4833 continue; 4834 } 4835 if (!only_parity(rdev->raid_disk, 4836 conf->prev_algo, 4837 conf->previous_raid_disks, 4838 conf->max_degraded)) 4839 continue; 4840 dirty_parity_disks++; 4841 } 4842 4843 mddev->degraded = (max(conf->raid_disks, conf->previous_raid_disks) 4844 - working_disks); 4845 4846 if (has_failed(conf)) { 4847 printk(KERN_ERR "md/raid:%s: not enough operational devices" 4848 " (%d/%d failed)\n", 4849 mdname(mddev), mddev->degraded, conf->raid_disks); 4850 goto abort; 4851 } 4852 4853 /* device size must be a multiple of chunk size */ 4854 mddev->dev_sectors &= ~(mddev->chunk_sectors - 1); 4855 mddev->resync_max_sectors = mddev->dev_sectors; 4856 4857 if (mddev->degraded > dirty_parity_disks && 4858 mddev->recovery_cp != MaxSector) { 4859 if (mddev->ok_start_degraded) 4860 printk(KERN_WARNING 4861 "md/raid:%s: starting dirty degraded array" 4862 " - data corruption possible.\n", 4863 mdname(mddev)); 4864 else { 4865 printk(KERN_ERR 4866 "md/raid:%s: cannot start dirty degraded array.\n", 4867 mdname(mddev)); 4868 goto abort; 4869 } 4870 } 4871 4872 if (mddev->degraded == 0) 4873 printk(KERN_INFO "md/raid:%s: raid level %d active with %d out of %d" 4874 " devices, algorithm %d\n", mdname(mddev), conf->level, 4875 mddev->raid_disks-mddev->degraded, mddev->raid_disks, 4876 mddev->new_layout); 4877 else 4878 printk(KERN_ALERT "md/raid:%s: raid level %d active with %d" 4879 " out of %d devices, algorithm %d\n", 4880 mdname(mddev), conf->level, 4881 mddev->raid_disks - mddev->degraded, 4882 mddev->raid_disks, mddev->new_layout); 4883 4884 print_raid5_conf(conf); 4885 4886 if (conf->reshape_progress != MaxSector) { 4887 conf->reshape_safe = conf->reshape_progress; 4888 atomic_set(&conf->reshape_stripes, 0); 4889 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 4890 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 4891 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 4892 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 4893 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 4894 "reshape"); 4895 } 4896 4897 4898 /* Ok, everything is just fine now */ 4899 if (mddev->to_remove == &raid5_attrs_group) 4900 mddev->to_remove = NULL; 4901 else if (mddev->kobj.sd && 4902 sysfs_create_group(&mddev->kobj, &raid5_attrs_group)) 4903 printk(KERN_WARNING 4904 "raid5: failed to create sysfs attributes for %s\n", 4905 mdname(mddev)); 4906 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); 4907 4908 if (mddev->queue) { 4909 int chunk_size; 4910 /* read-ahead size must cover two whole stripes, which 4911 * is 2 * (datadisks) * chunksize where 'n' is the 4912 * number of raid devices 4913 */ 4914 int data_disks = conf->previous_raid_disks - conf->max_degraded; 4915 int stripe = data_disks * 4916 ((mddev->chunk_sectors << 9) / PAGE_SIZE); 4917 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 4918 mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 4919 4920 blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec); 4921 4922 mddev->queue->backing_dev_info.congested_data = mddev; 4923 mddev->queue->backing_dev_info.congested_fn = raid5_congested; 4924 4925 chunk_size = mddev->chunk_sectors << 9; 4926 blk_queue_io_min(mddev->queue, chunk_size); 4927 blk_queue_io_opt(mddev->queue, chunk_size * 4928 (conf->raid_disks - conf->max_degraded)); 4929 4930 list_for_each_entry(rdev, &mddev->disks, same_set) 4931 disk_stack_limits(mddev->gendisk, rdev->bdev, 4932 rdev->data_offset << 9); 4933 } 4934 4935 return 0; 4936 abort: 4937 md_unregister_thread(&mddev->thread); 4938 print_raid5_conf(conf); 4939 free_conf(conf); 4940 mddev->private = NULL; 4941 printk(KERN_ALERT "md/raid:%s: failed to run raid set.\n", mdname(mddev)); 4942 return -EIO; 4943 } 4944 4945 static int stop(struct mddev *mddev) 4946 { 4947 struct r5conf *conf = mddev->private; 4948 4949 md_unregister_thread(&mddev->thread); 4950 if (mddev->queue) 4951 mddev->queue->backing_dev_info.congested_fn = NULL; 4952 free_conf(conf); 4953 mddev->private = NULL; 4954 mddev->to_remove = &raid5_attrs_group; 4955 return 0; 4956 } 4957 4958 static void status(struct seq_file *seq, struct mddev *mddev) 4959 { 4960 struct r5conf *conf = mddev->private; 4961 int i; 4962 4963 seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level, 4964 mddev->chunk_sectors / 2, mddev->layout); 4965 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded); 4966 for (i = 0; i < conf->raid_disks; i++) 4967 seq_printf (seq, "%s", 4968 conf->disks[i].rdev && 4969 test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_"); 4970 seq_printf (seq, "]"); 4971 } 4972 4973 static void print_raid5_conf (struct r5conf *conf) 4974 { 4975 int i; 4976 struct disk_info *tmp; 4977 4978 printk(KERN_DEBUG "RAID conf printout:\n"); 4979 if (!conf) { 4980 printk("(conf==NULL)\n"); 4981 return; 4982 } 4983 printk(KERN_DEBUG " --- level:%d rd:%d wd:%d\n", conf->level, 4984 conf->raid_disks, 4985 conf->raid_disks - conf->mddev->degraded); 4986 4987 for (i = 0; i < conf->raid_disks; i++) { 4988 char b[BDEVNAME_SIZE]; 4989 tmp = conf->disks + i; 4990 if (tmp->rdev) 4991 printk(KERN_DEBUG " disk %d, o:%d, dev:%s\n", 4992 i, !test_bit(Faulty, &tmp->rdev->flags), 4993 bdevname(tmp->rdev->bdev, b)); 4994 } 4995 } 4996 4997 static int raid5_spare_active(struct mddev *mddev) 4998 { 4999 int i; 5000 struct r5conf *conf = mddev->private; 5001 struct disk_info *tmp; 5002 int count = 0; 5003 unsigned long flags; 5004 5005 for (i = 0; i < conf->raid_disks; i++) { 5006 tmp = conf->disks + i; 5007 if (tmp->rdev 5008 && tmp->rdev->recovery_offset == MaxSector 5009 && !test_bit(Faulty, &tmp->rdev->flags) 5010 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) { 5011 count++; 5012 sysfs_notify_dirent_safe(tmp->rdev->sysfs_state); 5013 } 5014 } 5015 spin_lock_irqsave(&conf->device_lock, flags); 5016 mddev->degraded -= count; 5017 spin_unlock_irqrestore(&conf->device_lock, flags); 5018 print_raid5_conf(conf); 5019 return count; 5020 } 5021 5022 static int raid5_remove_disk(struct mddev *mddev, int number) 5023 { 5024 struct r5conf *conf = mddev->private; 5025 int err = 0; 5026 struct md_rdev *rdev; 5027 struct disk_info *p = conf->disks + number; 5028 5029 print_raid5_conf(conf); 5030 rdev = p->rdev; 5031 if (rdev) { 5032 if (number >= conf->raid_disks && 5033 conf->reshape_progress == MaxSector) 5034 clear_bit(In_sync, &rdev->flags); 5035 5036 if (test_bit(In_sync, &rdev->flags) || 5037 atomic_read(&rdev->nr_pending)) { 5038 err = -EBUSY; 5039 goto abort; 5040 } 5041 /* Only remove non-faulty devices if recovery 5042 * isn't possible. 5043 */ 5044 if (!test_bit(Faulty, &rdev->flags) && 5045 mddev->recovery_disabled != conf->recovery_disabled && 5046 !has_failed(conf) && 5047 number < conf->raid_disks) { 5048 err = -EBUSY; 5049 goto abort; 5050 } 5051 p->rdev = NULL; 5052 synchronize_rcu(); 5053 if (atomic_read(&rdev->nr_pending)) { 5054 /* lost the race, try later */ 5055 err = -EBUSY; 5056 p->rdev = rdev; 5057 } 5058 } 5059 abort: 5060 5061 print_raid5_conf(conf); 5062 return err; 5063 } 5064 5065 static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev) 5066 { 5067 struct r5conf *conf = mddev->private; 5068 int err = -EEXIST; 5069 int disk; 5070 struct disk_info *p; 5071 int first = 0; 5072 int last = conf->raid_disks - 1; 5073 5074 if (mddev->recovery_disabled == conf->recovery_disabled) 5075 return -EBUSY; 5076 5077 if (has_failed(conf)) 5078 /* no point adding a device */ 5079 return -EINVAL; 5080 5081 if (rdev->raid_disk >= 0) 5082 first = last = rdev->raid_disk; 5083 5084 /* 5085 * find the disk ... but prefer rdev->saved_raid_disk 5086 * if possible. 5087 */ 5088 if (rdev->saved_raid_disk >= 0 && 5089 rdev->saved_raid_disk >= first && 5090 conf->disks[rdev->saved_raid_disk].rdev == NULL) 5091 disk = rdev->saved_raid_disk; 5092 else 5093 disk = first; 5094 for ( ; disk <= last ; disk++) 5095 if ((p=conf->disks + disk)->rdev == NULL) { 5096 clear_bit(In_sync, &rdev->flags); 5097 rdev->raid_disk = disk; 5098 err = 0; 5099 if (rdev->saved_raid_disk != disk) 5100 conf->fullsync = 1; 5101 rcu_assign_pointer(p->rdev, rdev); 5102 break; 5103 } 5104 print_raid5_conf(conf); 5105 return err; 5106 } 5107 5108 static int raid5_resize(struct mddev *mddev, sector_t sectors) 5109 { 5110 /* no resync is happening, and there is enough space 5111 * on all devices, so we can resize. 5112 * We need to make sure resync covers any new space. 5113 * If the array is shrinking we should possibly wait until 5114 * any io in the removed space completes, but it hardly seems 5115 * worth it. 5116 */ 5117 sectors &= ~((sector_t)mddev->chunk_sectors - 1); 5118 md_set_array_sectors(mddev, raid5_size(mddev, sectors, 5119 mddev->raid_disks)); 5120 if (mddev->array_sectors > 5121 raid5_size(mddev, sectors, mddev->raid_disks)) 5122 return -EINVAL; 5123 set_capacity(mddev->gendisk, mddev->array_sectors); 5124 revalidate_disk(mddev->gendisk); 5125 if (sectors > mddev->dev_sectors && 5126 mddev->recovery_cp > mddev->dev_sectors) { 5127 mddev->recovery_cp = mddev->dev_sectors; 5128 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5129 } 5130 mddev->dev_sectors = sectors; 5131 mddev->resync_max_sectors = sectors; 5132 return 0; 5133 } 5134 5135 static int check_stripe_cache(struct mddev *mddev) 5136 { 5137 /* Can only proceed if there are plenty of stripe_heads. 5138 * We need a minimum of one full stripe,, and for sensible progress 5139 * it is best to have about 4 times that. 5140 * If we require 4 times, then the default 256 4K stripe_heads will 5141 * allow for chunk sizes up to 256K, which is probably OK. 5142 * If the chunk size is greater, user-space should request more 5143 * stripe_heads first. 5144 */ 5145 struct r5conf *conf = mddev->private; 5146 if (((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4 5147 > conf->max_nr_stripes || 5148 ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4 5149 > conf->max_nr_stripes) { 5150 printk(KERN_WARNING "md/raid:%s: reshape: not enough stripes. Needed %lu\n", 5151 mdname(mddev), 5152 ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9) 5153 / STRIPE_SIZE)*4); 5154 return 0; 5155 } 5156 return 1; 5157 } 5158 5159 static int check_reshape(struct mddev *mddev) 5160 { 5161 struct r5conf *conf = mddev->private; 5162 5163 if (mddev->delta_disks == 0 && 5164 mddev->new_layout == mddev->layout && 5165 mddev->new_chunk_sectors == mddev->chunk_sectors) 5166 return 0; /* nothing to do */ 5167 if (mddev->bitmap) 5168 /* Cannot grow a bitmap yet */ 5169 return -EBUSY; 5170 if (has_failed(conf)) 5171 return -EINVAL; 5172 if (mddev->delta_disks < 0) { 5173 /* We might be able to shrink, but the devices must 5174 * be made bigger first. 5175 * For raid6, 4 is the minimum size. 5176 * Otherwise 2 is the minimum 5177 */ 5178 int min = 2; 5179 if (mddev->level == 6) 5180 min = 4; 5181 if (mddev->raid_disks + mddev->delta_disks < min) 5182 return -EINVAL; 5183 } 5184 5185 if (!check_stripe_cache(mddev)) 5186 return -ENOSPC; 5187 5188 return resize_stripes(conf, conf->raid_disks + mddev->delta_disks); 5189 } 5190 5191 static int raid5_start_reshape(struct mddev *mddev) 5192 { 5193 struct r5conf *conf = mddev->private; 5194 struct md_rdev *rdev; 5195 int spares = 0; 5196 unsigned long flags; 5197 5198 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 5199 return -EBUSY; 5200 5201 if (!check_stripe_cache(mddev)) 5202 return -ENOSPC; 5203 5204 list_for_each_entry(rdev, &mddev->disks, same_set) 5205 if (!test_bit(In_sync, &rdev->flags) 5206 && !test_bit(Faulty, &rdev->flags)) 5207 spares++; 5208 5209 if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded) 5210 /* Not enough devices even to make a degraded array 5211 * of that size 5212 */ 5213 return -EINVAL; 5214 5215 /* Refuse to reduce size of the array. Any reductions in 5216 * array size must be through explicit setting of array_size 5217 * attribute. 5218 */ 5219 if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks) 5220 < mddev->array_sectors) { 5221 printk(KERN_ERR "md/raid:%s: array size must be reduced " 5222 "before number of disks\n", mdname(mddev)); 5223 return -EINVAL; 5224 } 5225 5226 atomic_set(&conf->reshape_stripes, 0); 5227 spin_lock_irq(&conf->device_lock); 5228 conf->previous_raid_disks = conf->raid_disks; 5229 conf->raid_disks += mddev->delta_disks; 5230 conf->prev_chunk_sectors = conf->chunk_sectors; 5231 conf->chunk_sectors = mddev->new_chunk_sectors; 5232 conf->prev_algo = conf->algorithm; 5233 conf->algorithm = mddev->new_layout; 5234 if (mddev->delta_disks < 0) 5235 conf->reshape_progress = raid5_size(mddev, 0, 0); 5236 else 5237 conf->reshape_progress = 0; 5238 conf->reshape_safe = conf->reshape_progress; 5239 conf->generation++; 5240 spin_unlock_irq(&conf->device_lock); 5241 5242 /* Add some new drives, as many as will fit. 5243 * We know there are enough to make the newly sized array work. 5244 * Don't add devices if we are reducing the number of 5245 * devices in the array. This is because it is not possible 5246 * to correctly record the "partially reconstructed" state of 5247 * such devices during the reshape and confusion could result. 5248 */ 5249 if (mddev->delta_disks >= 0) { 5250 int added_devices = 0; 5251 list_for_each_entry(rdev, &mddev->disks, same_set) 5252 if (rdev->raid_disk < 0 && 5253 !test_bit(Faulty, &rdev->flags)) { 5254 if (raid5_add_disk(mddev, rdev) == 0) { 5255 if (rdev->raid_disk 5256 >= conf->previous_raid_disks) { 5257 set_bit(In_sync, &rdev->flags); 5258 added_devices++; 5259 } else 5260 rdev->recovery_offset = 0; 5261 5262 if (sysfs_link_rdev(mddev, rdev)) 5263 /* Failure here is OK */; 5264 } 5265 } else if (rdev->raid_disk >= conf->previous_raid_disks 5266 && !test_bit(Faulty, &rdev->flags)) { 5267 /* This is a spare that was manually added */ 5268 set_bit(In_sync, &rdev->flags); 5269 added_devices++; 5270 } 5271 5272 /* When a reshape changes the number of devices, 5273 * ->degraded is measured against the larger of the 5274 * pre and post number of devices. 5275 */ 5276 spin_lock_irqsave(&conf->device_lock, flags); 5277 mddev->degraded += (conf->raid_disks - conf->previous_raid_disks) 5278 - added_devices; 5279 spin_unlock_irqrestore(&conf->device_lock, flags); 5280 } 5281 mddev->raid_disks = conf->raid_disks; 5282 mddev->reshape_position = conf->reshape_progress; 5283 set_bit(MD_CHANGE_DEVS, &mddev->flags); 5284 5285 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 5286 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 5287 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 5288 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 5289 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 5290 "reshape"); 5291 if (!mddev->sync_thread) { 5292 mddev->recovery = 0; 5293 spin_lock_irq(&conf->device_lock); 5294 mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks; 5295 conf->reshape_progress = MaxSector; 5296 spin_unlock_irq(&conf->device_lock); 5297 return -EAGAIN; 5298 } 5299 conf->reshape_checkpoint = jiffies; 5300 md_wakeup_thread(mddev->sync_thread); 5301 md_new_event(mddev); 5302 return 0; 5303 } 5304 5305 /* This is called from the reshape thread and should make any 5306 * changes needed in 'conf' 5307 */ 5308 static void end_reshape(struct r5conf *conf) 5309 { 5310 5311 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { 5312 5313 spin_lock_irq(&conf->device_lock); 5314 conf->previous_raid_disks = conf->raid_disks; 5315 conf->reshape_progress = MaxSector; 5316 spin_unlock_irq(&conf->device_lock); 5317 wake_up(&conf->wait_for_overlap); 5318 5319 /* read-ahead size must cover two whole stripes, which is 5320 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices 5321 */ 5322 if (conf->mddev->queue) { 5323 int data_disks = conf->raid_disks - conf->max_degraded; 5324 int stripe = data_disks * ((conf->chunk_sectors << 9) 5325 / PAGE_SIZE); 5326 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 5327 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 5328 } 5329 } 5330 } 5331 5332 /* This is called from the raid5d thread with mddev_lock held. 5333 * It makes config changes to the device. 5334 */ 5335 static void raid5_finish_reshape(struct mddev *mddev) 5336 { 5337 struct r5conf *conf = mddev->private; 5338 5339 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 5340 5341 if (mddev->delta_disks > 0) { 5342 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); 5343 set_capacity(mddev->gendisk, mddev->array_sectors); 5344 revalidate_disk(mddev->gendisk); 5345 } else { 5346 int d; 5347 mddev->degraded = conf->raid_disks; 5348 for (d = 0; d < conf->raid_disks ; d++) 5349 if (conf->disks[d].rdev && 5350 test_bit(In_sync, 5351 &conf->disks[d].rdev->flags)) 5352 mddev->degraded--; 5353 for (d = conf->raid_disks ; 5354 d < conf->raid_disks - mddev->delta_disks; 5355 d++) { 5356 struct md_rdev *rdev = conf->disks[d].rdev; 5357 if (rdev && raid5_remove_disk(mddev, d) == 0) { 5358 sysfs_unlink_rdev(mddev, rdev); 5359 rdev->raid_disk = -1; 5360 } 5361 } 5362 } 5363 mddev->layout = conf->algorithm; 5364 mddev->chunk_sectors = conf->chunk_sectors; 5365 mddev->reshape_position = MaxSector; 5366 mddev->delta_disks = 0; 5367 } 5368 } 5369 5370 static void raid5_quiesce(struct mddev *mddev, int state) 5371 { 5372 struct r5conf *conf = mddev->private; 5373 5374 switch(state) { 5375 case 2: /* resume for a suspend */ 5376 wake_up(&conf->wait_for_overlap); 5377 break; 5378 5379 case 1: /* stop all writes */ 5380 spin_lock_irq(&conf->device_lock); 5381 /* '2' tells resync/reshape to pause so that all 5382 * active stripes can drain 5383 */ 5384 conf->quiesce = 2; 5385 wait_event_lock_irq(conf->wait_for_stripe, 5386 atomic_read(&conf->active_stripes) == 0 && 5387 atomic_read(&conf->active_aligned_reads) == 0, 5388 conf->device_lock, /* nothing */); 5389 conf->quiesce = 1; 5390 spin_unlock_irq(&conf->device_lock); 5391 /* allow reshape to continue */ 5392 wake_up(&conf->wait_for_overlap); 5393 break; 5394 5395 case 0: /* re-enable writes */ 5396 spin_lock_irq(&conf->device_lock); 5397 conf->quiesce = 0; 5398 wake_up(&conf->wait_for_stripe); 5399 wake_up(&conf->wait_for_overlap); 5400 spin_unlock_irq(&conf->device_lock); 5401 break; 5402 } 5403 } 5404 5405 5406 static void *raid45_takeover_raid0(struct mddev *mddev, int level) 5407 { 5408 struct r0conf *raid0_conf = mddev->private; 5409 sector_t sectors; 5410 5411 /* for raid0 takeover only one zone is supported */ 5412 if (raid0_conf->nr_strip_zones > 1) { 5413 printk(KERN_ERR "md/raid:%s: cannot takeover raid0 with more than one zone.\n", 5414 mdname(mddev)); 5415 return ERR_PTR(-EINVAL); 5416 } 5417 5418 sectors = raid0_conf->strip_zone[0].zone_end; 5419 sector_div(sectors, raid0_conf->strip_zone[0].nb_dev); 5420 mddev->dev_sectors = sectors; 5421 mddev->new_level = level; 5422 mddev->new_layout = ALGORITHM_PARITY_N; 5423 mddev->new_chunk_sectors = mddev->chunk_sectors; 5424 mddev->raid_disks += 1; 5425 mddev->delta_disks = 1; 5426 /* make sure it will be not marked as dirty */ 5427 mddev->recovery_cp = MaxSector; 5428 5429 return setup_conf(mddev); 5430 } 5431 5432 5433 static void *raid5_takeover_raid1(struct mddev *mddev) 5434 { 5435 int chunksect; 5436 5437 if (mddev->raid_disks != 2 || 5438 mddev->degraded > 1) 5439 return ERR_PTR(-EINVAL); 5440 5441 /* Should check if there are write-behind devices? */ 5442 5443 chunksect = 64*2; /* 64K by default */ 5444 5445 /* The array must be an exact multiple of chunksize */ 5446 while (chunksect && (mddev->array_sectors & (chunksect-1))) 5447 chunksect >>= 1; 5448 5449 if ((chunksect<<9) < STRIPE_SIZE) 5450 /* array size does not allow a suitable chunk size */ 5451 return ERR_PTR(-EINVAL); 5452 5453 mddev->new_level = 5; 5454 mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC; 5455 mddev->new_chunk_sectors = chunksect; 5456 5457 return setup_conf(mddev); 5458 } 5459 5460 static void *raid5_takeover_raid6(struct mddev *mddev) 5461 { 5462 int new_layout; 5463 5464 switch (mddev->layout) { 5465 case ALGORITHM_LEFT_ASYMMETRIC_6: 5466 new_layout = ALGORITHM_LEFT_ASYMMETRIC; 5467 break; 5468 case ALGORITHM_RIGHT_ASYMMETRIC_6: 5469 new_layout = ALGORITHM_RIGHT_ASYMMETRIC; 5470 break; 5471 case ALGORITHM_LEFT_SYMMETRIC_6: 5472 new_layout = ALGORITHM_LEFT_SYMMETRIC; 5473 break; 5474 case ALGORITHM_RIGHT_SYMMETRIC_6: 5475 new_layout = ALGORITHM_RIGHT_SYMMETRIC; 5476 break; 5477 case ALGORITHM_PARITY_0_6: 5478 new_layout = ALGORITHM_PARITY_0; 5479 break; 5480 case ALGORITHM_PARITY_N: 5481 new_layout = ALGORITHM_PARITY_N; 5482 break; 5483 default: 5484 return ERR_PTR(-EINVAL); 5485 } 5486 mddev->new_level = 5; 5487 mddev->new_layout = new_layout; 5488 mddev->delta_disks = -1; 5489 mddev->raid_disks -= 1; 5490 return setup_conf(mddev); 5491 } 5492 5493 5494 static int raid5_check_reshape(struct mddev *mddev) 5495 { 5496 /* For a 2-drive array, the layout and chunk size can be changed 5497 * immediately as not restriping is needed. 5498 * For larger arrays we record the new value - after validation 5499 * to be used by a reshape pass. 5500 */ 5501 struct r5conf *conf = mddev->private; 5502 int new_chunk = mddev->new_chunk_sectors; 5503 5504 if (mddev->new_layout >= 0 && !algorithm_valid_raid5(mddev->new_layout)) 5505 return -EINVAL; 5506 if (new_chunk > 0) { 5507 if (!is_power_of_2(new_chunk)) 5508 return -EINVAL; 5509 if (new_chunk < (PAGE_SIZE>>9)) 5510 return -EINVAL; 5511 if (mddev->array_sectors & (new_chunk-1)) 5512 /* not factor of array size */ 5513 return -EINVAL; 5514 } 5515 5516 /* They look valid */ 5517 5518 if (mddev->raid_disks == 2) { 5519 /* can make the change immediately */ 5520 if (mddev->new_layout >= 0) { 5521 conf->algorithm = mddev->new_layout; 5522 mddev->layout = mddev->new_layout; 5523 } 5524 if (new_chunk > 0) { 5525 conf->chunk_sectors = new_chunk ; 5526 mddev->chunk_sectors = new_chunk; 5527 } 5528 set_bit(MD_CHANGE_DEVS, &mddev->flags); 5529 md_wakeup_thread(mddev->thread); 5530 } 5531 return check_reshape(mddev); 5532 } 5533 5534 static int raid6_check_reshape(struct mddev *mddev) 5535 { 5536 int new_chunk = mddev->new_chunk_sectors; 5537 5538 if (mddev->new_layout >= 0 && !algorithm_valid_raid6(mddev->new_layout)) 5539 return -EINVAL; 5540 if (new_chunk > 0) { 5541 if (!is_power_of_2(new_chunk)) 5542 return -EINVAL; 5543 if (new_chunk < (PAGE_SIZE >> 9)) 5544 return -EINVAL; 5545 if (mddev->array_sectors & (new_chunk-1)) 5546 /* not factor of array size */ 5547 return -EINVAL; 5548 } 5549 5550 /* They look valid */ 5551 return check_reshape(mddev); 5552 } 5553 5554 static void *raid5_takeover(struct mddev *mddev) 5555 { 5556 /* raid5 can take over: 5557 * raid0 - if there is only one strip zone - make it a raid4 layout 5558 * raid1 - if there are two drives. We need to know the chunk size 5559 * raid4 - trivial - just use a raid4 layout. 5560 * raid6 - Providing it is a *_6 layout 5561 */ 5562 if (mddev->level == 0) 5563 return raid45_takeover_raid0(mddev, 5); 5564 if (mddev->level == 1) 5565 return raid5_takeover_raid1(mddev); 5566 if (mddev->level == 4) { 5567 mddev->new_layout = ALGORITHM_PARITY_N; 5568 mddev->new_level = 5; 5569 return setup_conf(mddev); 5570 } 5571 if (mddev->level == 6) 5572 return raid5_takeover_raid6(mddev); 5573 5574 return ERR_PTR(-EINVAL); 5575 } 5576 5577 static void *raid4_takeover(struct mddev *mddev) 5578 { 5579 /* raid4 can take over: 5580 * raid0 - if there is only one strip zone 5581 * raid5 - if layout is right 5582 */ 5583 if (mddev->level == 0) 5584 return raid45_takeover_raid0(mddev, 4); 5585 if (mddev->level == 5 && 5586 mddev->layout == ALGORITHM_PARITY_N) { 5587 mddev->new_layout = 0; 5588 mddev->new_level = 4; 5589 return setup_conf(mddev); 5590 } 5591 return ERR_PTR(-EINVAL); 5592 } 5593 5594 static struct md_personality raid5_personality; 5595 5596 static void *raid6_takeover(struct mddev *mddev) 5597 { 5598 /* Currently can only take over a raid5. We map the 5599 * personality to an equivalent raid6 personality 5600 * with the Q block at the end. 5601 */ 5602 int new_layout; 5603 5604 if (mddev->pers != &raid5_personality) 5605 return ERR_PTR(-EINVAL); 5606 if (mddev->degraded > 1) 5607 return ERR_PTR(-EINVAL); 5608 if (mddev->raid_disks > 253) 5609 return ERR_PTR(-EINVAL); 5610 if (mddev->raid_disks < 3) 5611 return ERR_PTR(-EINVAL); 5612 5613 switch (mddev->layout) { 5614 case ALGORITHM_LEFT_ASYMMETRIC: 5615 new_layout = ALGORITHM_LEFT_ASYMMETRIC_6; 5616 break; 5617 case ALGORITHM_RIGHT_ASYMMETRIC: 5618 new_layout = ALGORITHM_RIGHT_ASYMMETRIC_6; 5619 break; 5620 case ALGORITHM_LEFT_SYMMETRIC: 5621 new_layout = ALGORITHM_LEFT_SYMMETRIC_6; 5622 break; 5623 case ALGORITHM_RIGHT_SYMMETRIC: 5624 new_layout = ALGORITHM_RIGHT_SYMMETRIC_6; 5625 break; 5626 case ALGORITHM_PARITY_0: 5627 new_layout = ALGORITHM_PARITY_0_6; 5628 break; 5629 case ALGORITHM_PARITY_N: 5630 new_layout = ALGORITHM_PARITY_N; 5631 break; 5632 default: 5633 return ERR_PTR(-EINVAL); 5634 } 5635 mddev->new_level = 6; 5636 mddev->new_layout = new_layout; 5637 mddev->delta_disks = 1; 5638 mddev->raid_disks += 1; 5639 return setup_conf(mddev); 5640 } 5641 5642 5643 static struct md_personality raid6_personality = 5644 { 5645 .name = "raid6", 5646 .level = 6, 5647 .owner = THIS_MODULE, 5648 .make_request = make_request, 5649 .run = run, 5650 .stop = stop, 5651 .status = status, 5652 .error_handler = error, 5653 .hot_add_disk = raid5_add_disk, 5654 .hot_remove_disk= raid5_remove_disk, 5655 .spare_active = raid5_spare_active, 5656 .sync_request = sync_request, 5657 .resize = raid5_resize, 5658 .size = raid5_size, 5659 .check_reshape = raid6_check_reshape, 5660 .start_reshape = raid5_start_reshape, 5661 .finish_reshape = raid5_finish_reshape, 5662 .quiesce = raid5_quiesce, 5663 .takeover = raid6_takeover, 5664 }; 5665 static struct md_personality raid5_personality = 5666 { 5667 .name = "raid5", 5668 .level = 5, 5669 .owner = THIS_MODULE, 5670 .make_request = make_request, 5671 .run = run, 5672 .stop = stop, 5673 .status = status, 5674 .error_handler = error, 5675 .hot_add_disk = raid5_add_disk, 5676 .hot_remove_disk= raid5_remove_disk, 5677 .spare_active = raid5_spare_active, 5678 .sync_request = sync_request, 5679 .resize = raid5_resize, 5680 .size = raid5_size, 5681 .check_reshape = raid5_check_reshape, 5682 .start_reshape = raid5_start_reshape, 5683 .finish_reshape = raid5_finish_reshape, 5684 .quiesce = raid5_quiesce, 5685 .takeover = raid5_takeover, 5686 }; 5687 5688 static struct md_personality raid4_personality = 5689 { 5690 .name = "raid4", 5691 .level = 4, 5692 .owner = THIS_MODULE, 5693 .make_request = make_request, 5694 .run = run, 5695 .stop = stop, 5696 .status = status, 5697 .error_handler = error, 5698 .hot_add_disk = raid5_add_disk, 5699 .hot_remove_disk= raid5_remove_disk, 5700 .spare_active = raid5_spare_active, 5701 .sync_request = sync_request, 5702 .resize = raid5_resize, 5703 .size = raid5_size, 5704 .check_reshape = raid5_check_reshape, 5705 .start_reshape = raid5_start_reshape, 5706 .finish_reshape = raid5_finish_reshape, 5707 .quiesce = raid5_quiesce, 5708 .takeover = raid4_takeover, 5709 }; 5710 5711 static int __init raid5_init(void) 5712 { 5713 register_md_personality(&raid6_personality); 5714 register_md_personality(&raid5_personality); 5715 register_md_personality(&raid4_personality); 5716 return 0; 5717 } 5718 5719 static void raid5_exit(void) 5720 { 5721 unregister_md_personality(&raid6_personality); 5722 unregister_md_personality(&raid5_personality); 5723 unregister_md_personality(&raid4_personality); 5724 } 5725 5726 module_init(raid5_init); 5727 module_exit(raid5_exit); 5728 MODULE_LICENSE("GPL"); 5729 MODULE_DESCRIPTION("RAID4/5/6 (striping with parity) personality for MD"); 5730 MODULE_ALIAS("md-personality-4"); /* RAID5 */ 5731 MODULE_ALIAS("md-raid5"); 5732 MODULE_ALIAS("md-raid4"); 5733 MODULE_ALIAS("md-level-5"); 5734 MODULE_ALIAS("md-level-4"); 5735 MODULE_ALIAS("md-personality-8"); /* RAID6 */ 5736 MODULE_ALIAS("md-raid6"); 5737 MODULE_ALIAS("md-level-6"); 5738 5739 /* This used to be two separate modules, they were: */ 5740 MODULE_ALIAS("raid5"); 5741 MODULE_ALIAS("raid6"); 5742