1 /* 2 * raid5.c : Multiple Devices driver for Linux 3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman 4 * Copyright (C) 1999, 2000 Ingo Molnar 5 * Copyright (C) 2002, 2003 H. Peter Anvin 6 * 7 * RAID-4/5/6 management functions. 8 * Thanks to Penguin Computing for making the RAID-6 development possible 9 * by donating a test server! 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2, or (at your option) 14 * any later version. 15 * 16 * You should have received a copy of the GNU General Public License 17 * (for example /usr/src/linux/COPYING); if not, write to the Free 18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 */ 20 21 /* 22 * BITMAP UNPLUGGING: 23 * 24 * The sequencing for updating the bitmap reliably is a little 25 * subtle (and I got it wrong the first time) so it deserves some 26 * explanation. 27 * 28 * We group bitmap updates into batches. Each batch has a number. 29 * We may write out several batches at once, but that isn't very important. 30 * conf->seq_write is the number of the last batch successfully written. 31 * conf->seq_flush is the number of the last batch that was closed to 32 * new additions. 33 * When we discover that we will need to write to any block in a stripe 34 * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq 35 * the number of the batch it will be in. This is seq_flush+1. 36 * When we are ready to do a write, if that batch hasn't been written yet, 37 * we plug the array and queue the stripe for later. 38 * When an unplug happens, we increment bm_flush, thus closing the current 39 * batch. 40 * When we notice that bm_flush > bm_write, we write out all pending updates 41 * to the bitmap, and advance bm_write to where bm_flush was. 42 * This may occasionally write a bit out twice, but is sure never to 43 * miss any bits. 44 */ 45 46 #include <linux/blkdev.h> 47 #include <linux/kthread.h> 48 #include <linux/raid/pq.h> 49 #include <linux/async_tx.h> 50 #include <linux/module.h> 51 #include <linux/async.h> 52 #include <linux/seq_file.h> 53 #include <linux/cpu.h> 54 #include <linux/slab.h> 55 #include <linux/ratelimit.h> 56 #include "md.h" 57 #include "raid5.h" 58 #include "raid0.h" 59 #include "bitmap.h" 60 61 /* 62 * Stripe cache 63 */ 64 65 #define NR_STRIPES 256 66 #define STRIPE_SIZE PAGE_SIZE 67 #define STRIPE_SHIFT (PAGE_SHIFT - 9) 68 #define STRIPE_SECTORS (STRIPE_SIZE>>9) 69 #define IO_THRESHOLD 1 70 #define BYPASS_THRESHOLD 1 71 #define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head)) 72 #define HASH_MASK (NR_HASH - 1) 73 74 static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect) 75 { 76 int hash = (sect >> STRIPE_SHIFT) & HASH_MASK; 77 return &conf->stripe_hashtbl[hash]; 78 } 79 80 /* bio's attached to a stripe+device for I/O are linked together in bi_sector 81 * order without overlap. There may be several bio's per stripe+device, and 82 * a bio could span several devices. 83 * When walking this list for a particular stripe+device, we must never proceed 84 * beyond a bio that extends past this device, as the next bio might no longer 85 * be valid. 86 * This function is used to determine the 'next' bio in the list, given the sector 87 * of the current stripe+device 88 */ 89 static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector) 90 { 91 int sectors = bio->bi_size >> 9; 92 if (bio->bi_sector + sectors < sector + STRIPE_SECTORS) 93 return bio->bi_next; 94 else 95 return NULL; 96 } 97 98 /* 99 * We maintain a biased count of active stripes in the bottom 16 bits of 100 * bi_phys_segments, and a count of processed stripes in the upper 16 bits 101 */ 102 static inline int raid5_bi_phys_segments(struct bio *bio) 103 { 104 return bio->bi_phys_segments & 0xffff; 105 } 106 107 static inline int raid5_bi_hw_segments(struct bio *bio) 108 { 109 return (bio->bi_phys_segments >> 16) & 0xffff; 110 } 111 112 static inline int raid5_dec_bi_phys_segments(struct bio *bio) 113 { 114 --bio->bi_phys_segments; 115 return raid5_bi_phys_segments(bio); 116 } 117 118 static inline int raid5_dec_bi_hw_segments(struct bio *bio) 119 { 120 unsigned short val = raid5_bi_hw_segments(bio); 121 122 --val; 123 bio->bi_phys_segments = (val << 16) | raid5_bi_phys_segments(bio); 124 return val; 125 } 126 127 static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt) 128 { 129 bio->bi_phys_segments = raid5_bi_phys_segments(bio) | (cnt << 16); 130 } 131 132 /* Find first data disk in a raid6 stripe */ 133 static inline int raid6_d0(struct stripe_head *sh) 134 { 135 if (sh->ddf_layout) 136 /* ddf always start from first device */ 137 return 0; 138 /* md starts just after Q block */ 139 if (sh->qd_idx == sh->disks - 1) 140 return 0; 141 else 142 return sh->qd_idx + 1; 143 } 144 static inline int raid6_next_disk(int disk, int raid_disks) 145 { 146 disk++; 147 return (disk < raid_disks) ? disk : 0; 148 } 149 150 /* When walking through the disks in a raid5, starting at raid6_d0, 151 * We need to map each disk to a 'slot', where the data disks are slot 152 * 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk 153 * is raid_disks-1. This help does that mapping. 154 */ 155 static int raid6_idx_to_slot(int idx, struct stripe_head *sh, 156 int *count, int syndrome_disks) 157 { 158 int slot = *count; 159 160 if (sh->ddf_layout) 161 (*count)++; 162 if (idx == sh->pd_idx) 163 return syndrome_disks; 164 if (idx == sh->qd_idx) 165 return syndrome_disks + 1; 166 if (!sh->ddf_layout) 167 (*count)++; 168 return slot; 169 } 170 171 static void return_io(struct bio *return_bi) 172 { 173 struct bio *bi = return_bi; 174 while (bi) { 175 176 return_bi = bi->bi_next; 177 bi->bi_next = NULL; 178 bi->bi_size = 0; 179 bio_endio(bi, 0); 180 bi = return_bi; 181 } 182 } 183 184 static void print_raid5_conf (struct r5conf *conf); 185 186 static int stripe_operations_active(struct stripe_head *sh) 187 { 188 return sh->check_state || sh->reconstruct_state || 189 test_bit(STRIPE_BIOFILL_RUN, &sh->state) || 190 test_bit(STRIPE_COMPUTE_RUN, &sh->state); 191 } 192 193 static void __release_stripe(struct r5conf *conf, struct stripe_head *sh) 194 { 195 if (atomic_dec_and_test(&sh->count)) { 196 BUG_ON(!list_empty(&sh->lru)); 197 BUG_ON(atomic_read(&conf->active_stripes)==0); 198 if (test_bit(STRIPE_HANDLE, &sh->state)) { 199 if (test_bit(STRIPE_DELAYED, &sh->state)) 200 list_add_tail(&sh->lru, &conf->delayed_list); 201 else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && 202 sh->bm_seq - conf->seq_write > 0) 203 list_add_tail(&sh->lru, &conf->bitmap_list); 204 else { 205 clear_bit(STRIPE_BIT_DELAY, &sh->state); 206 list_add_tail(&sh->lru, &conf->handle_list); 207 } 208 md_wakeup_thread(conf->mddev->thread); 209 } else { 210 BUG_ON(stripe_operations_active(sh)); 211 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 212 atomic_dec(&conf->preread_active_stripes); 213 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) 214 md_wakeup_thread(conf->mddev->thread); 215 } 216 atomic_dec(&conf->active_stripes); 217 if (!test_bit(STRIPE_EXPANDING, &sh->state)) { 218 list_add_tail(&sh->lru, &conf->inactive_list); 219 wake_up(&conf->wait_for_stripe); 220 if (conf->retry_read_aligned) 221 md_wakeup_thread(conf->mddev->thread); 222 } 223 } 224 } 225 } 226 227 static void release_stripe(struct stripe_head *sh) 228 { 229 struct r5conf *conf = sh->raid_conf; 230 unsigned long flags; 231 232 spin_lock_irqsave(&conf->device_lock, flags); 233 __release_stripe(conf, sh); 234 spin_unlock_irqrestore(&conf->device_lock, flags); 235 } 236 237 static inline void remove_hash(struct stripe_head *sh) 238 { 239 pr_debug("remove_hash(), stripe %llu\n", 240 (unsigned long long)sh->sector); 241 242 hlist_del_init(&sh->hash); 243 } 244 245 static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh) 246 { 247 struct hlist_head *hp = stripe_hash(conf, sh->sector); 248 249 pr_debug("insert_hash(), stripe %llu\n", 250 (unsigned long long)sh->sector); 251 252 hlist_add_head(&sh->hash, hp); 253 } 254 255 256 /* find an idle stripe, make sure it is unhashed, and return it. */ 257 static struct stripe_head *get_free_stripe(struct r5conf *conf) 258 { 259 struct stripe_head *sh = NULL; 260 struct list_head *first; 261 262 if (list_empty(&conf->inactive_list)) 263 goto out; 264 first = conf->inactive_list.next; 265 sh = list_entry(first, struct stripe_head, lru); 266 list_del_init(first); 267 remove_hash(sh); 268 atomic_inc(&conf->active_stripes); 269 out: 270 return sh; 271 } 272 273 static void shrink_buffers(struct stripe_head *sh) 274 { 275 struct page *p; 276 int i; 277 int num = sh->raid_conf->pool_size; 278 279 for (i = 0; i < num ; i++) { 280 p = sh->dev[i].page; 281 if (!p) 282 continue; 283 sh->dev[i].page = NULL; 284 put_page(p); 285 } 286 } 287 288 static int grow_buffers(struct stripe_head *sh) 289 { 290 int i; 291 int num = sh->raid_conf->pool_size; 292 293 for (i = 0; i < num; i++) { 294 struct page *page; 295 296 if (!(page = alloc_page(GFP_KERNEL))) { 297 return 1; 298 } 299 sh->dev[i].page = page; 300 } 301 return 0; 302 } 303 304 static void raid5_build_block(struct stripe_head *sh, int i, int previous); 305 static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous, 306 struct stripe_head *sh); 307 308 static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) 309 { 310 struct r5conf *conf = sh->raid_conf; 311 int i; 312 313 BUG_ON(atomic_read(&sh->count) != 0); 314 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); 315 BUG_ON(stripe_operations_active(sh)); 316 317 pr_debug("init_stripe called, stripe %llu\n", 318 (unsigned long long)sh->sector); 319 320 remove_hash(sh); 321 322 sh->generation = conf->generation - previous; 323 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks; 324 sh->sector = sector; 325 stripe_set_idx(sector, conf, previous, sh); 326 sh->state = 0; 327 328 329 for (i = sh->disks; i--; ) { 330 struct r5dev *dev = &sh->dev[i]; 331 332 if (dev->toread || dev->read || dev->towrite || dev->written || 333 test_bit(R5_LOCKED, &dev->flags)) { 334 printk(KERN_ERR "sector=%llx i=%d %p %p %p %p %d\n", 335 (unsigned long long)sh->sector, i, dev->toread, 336 dev->read, dev->towrite, dev->written, 337 test_bit(R5_LOCKED, &dev->flags)); 338 WARN_ON(1); 339 } 340 dev->flags = 0; 341 raid5_build_block(sh, i, previous); 342 } 343 insert_hash(conf, sh); 344 } 345 346 static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector, 347 short generation) 348 { 349 struct stripe_head *sh; 350 struct hlist_node *hn; 351 352 pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector); 353 hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash) 354 if (sh->sector == sector && sh->generation == generation) 355 return sh; 356 pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector); 357 return NULL; 358 } 359 360 /* 361 * Need to check if array has failed when deciding whether to: 362 * - start an array 363 * - remove non-faulty devices 364 * - add a spare 365 * - allow a reshape 366 * This determination is simple when no reshape is happening. 367 * However if there is a reshape, we need to carefully check 368 * both the before and after sections. 369 * This is because some failed devices may only affect one 370 * of the two sections, and some non-in_sync devices may 371 * be insync in the section most affected by failed devices. 372 */ 373 static int has_failed(struct r5conf *conf) 374 { 375 int degraded; 376 int i; 377 if (conf->mddev->reshape_position == MaxSector) 378 return conf->mddev->degraded > conf->max_degraded; 379 380 rcu_read_lock(); 381 degraded = 0; 382 for (i = 0; i < conf->previous_raid_disks; i++) { 383 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); 384 if (!rdev || test_bit(Faulty, &rdev->flags)) 385 degraded++; 386 else if (test_bit(In_sync, &rdev->flags)) 387 ; 388 else 389 /* not in-sync or faulty. 390 * If the reshape increases the number of devices, 391 * this is being recovered by the reshape, so 392 * this 'previous' section is not in_sync. 393 * If the number of devices is being reduced however, 394 * the device can only be part of the array if 395 * we are reverting a reshape, so this section will 396 * be in-sync. 397 */ 398 if (conf->raid_disks >= conf->previous_raid_disks) 399 degraded++; 400 } 401 rcu_read_unlock(); 402 if (degraded > conf->max_degraded) 403 return 1; 404 rcu_read_lock(); 405 degraded = 0; 406 for (i = 0; i < conf->raid_disks; i++) { 407 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); 408 if (!rdev || test_bit(Faulty, &rdev->flags)) 409 degraded++; 410 else if (test_bit(In_sync, &rdev->flags)) 411 ; 412 else 413 /* not in-sync or faulty. 414 * If reshape increases the number of devices, this 415 * section has already been recovered, else it 416 * almost certainly hasn't. 417 */ 418 if (conf->raid_disks <= conf->previous_raid_disks) 419 degraded++; 420 } 421 rcu_read_unlock(); 422 if (degraded > conf->max_degraded) 423 return 1; 424 return 0; 425 } 426 427 static struct stripe_head * 428 get_active_stripe(struct r5conf *conf, sector_t sector, 429 int previous, int noblock, int noquiesce) 430 { 431 struct stripe_head *sh; 432 433 pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector); 434 435 spin_lock_irq(&conf->device_lock); 436 437 do { 438 wait_event_lock_irq(conf->wait_for_stripe, 439 conf->quiesce == 0 || noquiesce, 440 conf->device_lock, /* nothing */); 441 sh = __find_stripe(conf, sector, conf->generation - previous); 442 if (!sh) { 443 if (!conf->inactive_blocked) 444 sh = get_free_stripe(conf); 445 if (noblock && sh == NULL) 446 break; 447 if (!sh) { 448 conf->inactive_blocked = 1; 449 wait_event_lock_irq(conf->wait_for_stripe, 450 !list_empty(&conf->inactive_list) && 451 (atomic_read(&conf->active_stripes) 452 < (conf->max_nr_stripes *3/4) 453 || !conf->inactive_blocked), 454 conf->device_lock, 455 ); 456 conf->inactive_blocked = 0; 457 } else 458 init_stripe(sh, sector, previous); 459 } else { 460 if (atomic_read(&sh->count)) { 461 BUG_ON(!list_empty(&sh->lru) 462 && !test_bit(STRIPE_EXPANDING, &sh->state)); 463 } else { 464 if (!test_bit(STRIPE_HANDLE, &sh->state)) 465 atomic_inc(&conf->active_stripes); 466 if (list_empty(&sh->lru) && 467 !test_bit(STRIPE_EXPANDING, &sh->state)) 468 BUG(); 469 list_del_init(&sh->lru); 470 } 471 } 472 } while (sh == NULL); 473 474 if (sh) 475 atomic_inc(&sh->count); 476 477 spin_unlock_irq(&conf->device_lock); 478 return sh; 479 } 480 481 static void 482 raid5_end_read_request(struct bio *bi, int error); 483 static void 484 raid5_end_write_request(struct bio *bi, int error); 485 486 static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) 487 { 488 struct r5conf *conf = sh->raid_conf; 489 int i, disks = sh->disks; 490 491 might_sleep(); 492 493 for (i = disks; i--; ) { 494 int rw; 495 struct bio *bi; 496 struct md_rdev *rdev; 497 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) { 498 if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags)) 499 rw = WRITE_FUA; 500 else 501 rw = WRITE; 502 } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) 503 rw = READ; 504 else 505 continue; 506 507 bi = &sh->dev[i].req; 508 509 bi->bi_rw = rw; 510 if (rw & WRITE) 511 bi->bi_end_io = raid5_end_write_request; 512 else 513 bi->bi_end_io = raid5_end_read_request; 514 515 rcu_read_lock(); 516 rdev = rcu_dereference(conf->disks[i].rdev); 517 if (rdev && test_bit(Faulty, &rdev->flags)) 518 rdev = NULL; 519 if (rdev) 520 atomic_inc(&rdev->nr_pending); 521 rcu_read_unlock(); 522 523 /* We have already checked bad blocks for reads. Now 524 * need to check for writes. 525 */ 526 while ((rw & WRITE) && rdev && 527 test_bit(WriteErrorSeen, &rdev->flags)) { 528 sector_t first_bad; 529 int bad_sectors; 530 int bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS, 531 &first_bad, &bad_sectors); 532 if (!bad) 533 break; 534 535 if (bad < 0) { 536 set_bit(BlockedBadBlocks, &rdev->flags); 537 if (!conf->mddev->external && 538 conf->mddev->flags) { 539 /* It is very unlikely, but we might 540 * still need to write out the 541 * bad block log - better give it 542 * a chance*/ 543 md_check_recovery(conf->mddev); 544 } 545 md_wait_for_blocked_rdev(rdev, conf->mddev); 546 } else { 547 /* Acknowledged bad block - skip the write */ 548 rdev_dec_pending(rdev, conf->mddev); 549 rdev = NULL; 550 } 551 } 552 553 if (rdev) { 554 if (s->syncing || s->expanding || s->expanded) 555 md_sync_acct(rdev->bdev, STRIPE_SECTORS); 556 557 set_bit(STRIPE_IO_STARTED, &sh->state); 558 559 bi->bi_bdev = rdev->bdev; 560 pr_debug("%s: for %llu schedule op %ld on disc %d\n", 561 __func__, (unsigned long long)sh->sector, 562 bi->bi_rw, i); 563 atomic_inc(&sh->count); 564 bi->bi_sector = sh->sector + rdev->data_offset; 565 bi->bi_flags = 1 << BIO_UPTODATE; 566 bi->bi_vcnt = 1; 567 bi->bi_max_vecs = 1; 568 bi->bi_idx = 0; 569 bi->bi_io_vec = &sh->dev[i].vec; 570 bi->bi_io_vec[0].bv_len = STRIPE_SIZE; 571 bi->bi_io_vec[0].bv_offset = 0; 572 bi->bi_size = STRIPE_SIZE; 573 bi->bi_next = NULL; 574 generic_make_request(bi); 575 } else { 576 if (rw & WRITE) 577 set_bit(STRIPE_DEGRADED, &sh->state); 578 pr_debug("skip op %ld on disc %d for sector %llu\n", 579 bi->bi_rw, i, (unsigned long long)sh->sector); 580 clear_bit(R5_LOCKED, &sh->dev[i].flags); 581 set_bit(STRIPE_HANDLE, &sh->state); 582 } 583 } 584 } 585 586 static struct dma_async_tx_descriptor * 587 async_copy_data(int frombio, struct bio *bio, struct page *page, 588 sector_t sector, struct dma_async_tx_descriptor *tx) 589 { 590 struct bio_vec *bvl; 591 struct page *bio_page; 592 int i; 593 int page_offset; 594 struct async_submit_ctl submit; 595 enum async_tx_flags flags = 0; 596 597 if (bio->bi_sector >= sector) 598 page_offset = (signed)(bio->bi_sector - sector) * 512; 599 else 600 page_offset = (signed)(sector - bio->bi_sector) * -512; 601 602 if (frombio) 603 flags |= ASYNC_TX_FENCE; 604 init_async_submit(&submit, flags, tx, NULL, NULL, NULL); 605 606 bio_for_each_segment(bvl, bio, i) { 607 int len = bvl->bv_len; 608 int clen; 609 int b_offset = 0; 610 611 if (page_offset < 0) { 612 b_offset = -page_offset; 613 page_offset += b_offset; 614 len -= b_offset; 615 } 616 617 if (len > 0 && page_offset + len > STRIPE_SIZE) 618 clen = STRIPE_SIZE - page_offset; 619 else 620 clen = len; 621 622 if (clen > 0) { 623 b_offset += bvl->bv_offset; 624 bio_page = bvl->bv_page; 625 if (frombio) 626 tx = async_memcpy(page, bio_page, page_offset, 627 b_offset, clen, &submit); 628 else 629 tx = async_memcpy(bio_page, page, b_offset, 630 page_offset, clen, &submit); 631 } 632 /* chain the operations */ 633 submit.depend_tx = tx; 634 635 if (clen < len) /* hit end of page */ 636 break; 637 page_offset += len; 638 } 639 640 return tx; 641 } 642 643 static void ops_complete_biofill(void *stripe_head_ref) 644 { 645 struct stripe_head *sh = stripe_head_ref; 646 struct bio *return_bi = NULL; 647 struct r5conf *conf = sh->raid_conf; 648 int i; 649 650 pr_debug("%s: stripe %llu\n", __func__, 651 (unsigned long long)sh->sector); 652 653 /* clear completed biofills */ 654 spin_lock_irq(&conf->device_lock); 655 for (i = sh->disks; i--; ) { 656 struct r5dev *dev = &sh->dev[i]; 657 658 /* acknowledge completion of a biofill operation */ 659 /* and check if we need to reply to a read request, 660 * new R5_Wantfill requests are held off until 661 * !STRIPE_BIOFILL_RUN 662 */ 663 if (test_and_clear_bit(R5_Wantfill, &dev->flags)) { 664 struct bio *rbi, *rbi2; 665 666 BUG_ON(!dev->read); 667 rbi = dev->read; 668 dev->read = NULL; 669 while (rbi && rbi->bi_sector < 670 dev->sector + STRIPE_SECTORS) { 671 rbi2 = r5_next_bio(rbi, dev->sector); 672 if (!raid5_dec_bi_phys_segments(rbi)) { 673 rbi->bi_next = return_bi; 674 return_bi = rbi; 675 } 676 rbi = rbi2; 677 } 678 } 679 } 680 spin_unlock_irq(&conf->device_lock); 681 clear_bit(STRIPE_BIOFILL_RUN, &sh->state); 682 683 return_io(return_bi); 684 685 set_bit(STRIPE_HANDLE, &sh->state); 686 release_stripe(sh); 687 } 688 689 static void ops_run_biofill(struct stripe_head *sh) 690 { 691 struct dma_async_tx_descriptor *tx = NULL; 692 struct r5conf *conf = sh->raid_conf; 693 struct async_submit_ctl submit; 694 int i; 695 696 pr_debug("%s: stripe %llu\n", __func__, 697 (unsigned long long)sh->sector); 698 699 for (i = sh->disks; i--; ) { 700 struct r5dev *dev = &sh->dev[i]; 701 if (test_bit(R5_Wantfill, &dev->flags)) { 702 struct bio *rbi; 703 spin_lock_irq(&conf->device_lock); 704 dev->read = rbi = dev->toread; 705 dev->toread = NULL; 706 spin_unlock_irq(&conf->device_lock); 707 while (rbi && rbi->bi_sector < 708 dev->sector + STRIPE_SECTORS) { 709 tx = async_copy_data(0, rbi, dev->page, 710 dev->sector, tx); 711 rbi = r5_next_bio(rbi, dev->sector); 712 } 713 } 714 } 715 716 atomic_inc(&sh->count); 717 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL); 718 async_trigger_callback(&submit); 719 } 720 721 static void mark_target_uptodate(struct stripe_head *sh, int target) 722 { 723 struct r5dev *tgt; 724 725 if (target < 0) 726 return; 727 728 tgt = &sh->dev[target]; 729 set_bit(R5_UPTODATE, &tgt->flags); 730 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 731 clear_bit(R5_Wantcompute, &tgt->flags); 732 } 733 734 static void ops_complete_compute(void *stripe_head_ref) 735 { 736 struct stripe_head *sh = stripe_head_ref; 737 738 pr_debug("%s: stripe %llu\n", __func__, 739 (unsigned long long)sh->sector); 740 741 /* mark the computed target(s) as uptodate */ 742 mark_target_uptodate(sh, sh->ops.target); 743 mark_target_uptodate(sh, sh->ops.target2); 744 745 clear_bit(STRIPE_COMPUTE_RUN, &sh->state); 746 if (sh->check_state == check_state_compute_run) 747 sh->check_state = check_state_compute_result; 748 set_bit(STRIPE_HANDLE, &sh->state); 749 release_stripe(sh); 750 } 751 752 /* return a pointer to the address conversion region of the scribble buffer */ 753 static addr_conv_t *to_addr_conv(struct stripe_head *sh, 754 struct raid5_percpu *percpu) 755 { 756 return percpu->scribble + sizeof(struct page *) * (sh->disks + 2); 757 } 758 759 static struct dma_async_tx_descriptor * 760 ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu) 761 { 762 int disks = sh->disks; 763 struct page **xor_srcs = percpu->scribble; 764 int target = sh->ops.target; 765 struct r5dev *tgt = &sh->dev[target]; 766 struct page *xor_dest = tgt->page; 767 int count = 0; 768 struct dma_async_tx_descriptor *tx; 769 struct async_submit_ctl submit; 770 int i; 771 772 pr_debug("%s: stripe %llu block: %d\n", 773 __func__, (unsigned long long)sh->sector, target); 774 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 775 776 for (i = disks; i--; ) 777 if (i != target) 778 xor_srcs[count++] = sh->dev[i].page; 779 780 atomic_inc(&sh->count); 781 782 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, NULL, 783 ops_complete_compute, sh, to_addr_conv(sh, percpu)); 784 if (unlikely(count == 1)) 785 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit); 786 else 787 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit); 788 789 return tx; 790 } 791 792 /* set_syndrome_sources - populate source buffers for gen_syndrome 793 * @srcs - (struct page *) array of size sh->disks 794 * @sh - stripe_head to parse 795 * 796 * Populates srcs in proper layout order for the stripe and returns the 797 * 'count' of sources to be used in a call to async_gen_syndrome. The P 798 * destination buffer is recorded in srcs[count] and the Q destination 799 * is recorded in srcs[count+1]]. 800 */ 801 static int set_syndrome_sources(struct page **srcs, struct stripe_head *sh) 802 { 803 int disks = sh->disks; 804 int syndrome_disks = sh->ddf_layout ? disks : (disks - 2); 805 int d0_idx = raid6_d0(sh); 806 int count; 807 int i; 808 809 for (i = 0; i < disks; i++) 810 srcs[i] = NULL; 811 812 count = 0; 813 i = d0_idx; 814 do { 815 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); 816 817 srcs[slot] = sh->dev[i].page; 818 i = raid6_next_disk(i, disks); 819 } while (i != d0_idx); 820 821 return syndrome_disks; 822 } 823 824 static struct dma_async_tx_descriptor * 825 ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu) 826 { 827 int disks = sh->disks; 828 struct page **blocks = percpu->scribble; 829 int target; 830 int qd_idx = sh->qd_idx; 831 struct dma_async_tx_descriptor *tx; 832 struct async_submit_ctl submit; 833 struct r5dev *tgt; 834 struct page *dest; 835 int i; 836 int count; 837 838 if (sh->ops.target < 0) 839 target = sh->ops.target2; 840 else if (sh->ops.target2 < 0) 841 target = sh->ops.target; 842 else 843 /* we should only have one valid target */ 844 BUG(); 845 BUG_ON(target < 0); 846 pr_debug("%s: stripe %llu block: %d\n", 847 __func__, (unsigned long long)sh->sector, target); 848 849 tgt = &sh->dev[target]; 850 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 851 dest = tgt->page; 852 853 atomic_inc(&sh->count); 854 855 if (target == qd_idx) { 856 count = set_syndrome_sources(blocks, sh); 857 blocks[count] = NULL; /* regenerating p is not necessary */ 858 BUG_ON(blocks[count+1] != dest); /* q should already be set */ 859 init_async_submit(&submit, ASYNC_TX_FENCE, NULL, 860 ops_complete_compute, sh, 861 to_addr_conv(sh, percpu)); 862 tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit); 863 } else { 864 /* Compute any data- or p-drive using XOR */ 865 count = 0; 866 for (i = disks; i-- ; ) { 867 if (i == target || i == qd_idx) 868 continue; 869 blocks[count++] = sh->dev[i].page; 870 } 871 872 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, 873 NULL, ops_complete_compute, sh, 874 to_addr_conv(sh, percpu)); 875 tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, &submit); 876 } 877 878 return tx; 879 } 880 881 static struct dma_async_tx_descriptor * 882 ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu) 883 { 884 int i, count, disks = sh->disks; 885 int syndrome_disks = sh->ddf_layout ? disks : disks-2; 886 int d0_idx = raid6_d0(sh); 887 int faila = -1, failb = -1; 888 int target = sh->ops.target; 889 int target2 = sh->ops.target2; 890 struct r5dev *tgt = &sh->dev[target]; 891 struct r5dev *tgt2 = &sh->dev[target2]; 892 struct dma_async_tx_descriptor *tx; 893 struct page **blocks = percpu->scribble; 894 struct async_submit_ctl submit; 895 896 pr_debug("%s: stripe %llu block1: %d block2: %d\n", 897 __func__, (unsigned long long)sh->sector, target, target2); 898 BUG_ON(target < 0 || target2 < 0); 899 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 900 BUG_ON(!test_bit(R5_Wantcompute, &tgt2->flags)); 901 902 /* we need to open-code set_syndrome_sources to handle the 903 * slot number conversion for 'faila' and 'failb' 904 */ 905 for (i = 0; i < disks ; i++) 906 blocks[i] = NULL; 907 count = 0; 908 i = d0_idx; 909 do { 910 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); 911 912 blocks[slot] = sh->dev[i].page; 913 914 if (i == target) 915 faila = slot; 916 if (i == target2) 917 failb = slot; 918 i = raid6_next_disk(i, disks); 919 } while (i != d0_idx); 920 921 BUG_ON(faila == failb); 922 if (failb < faila) 923 swap(faila, failb); 924 pr_debug("%s: stripe: %llu faila: %d failb: %d\n", 925 __func__, (unsigned long long)sh->sector, faila, failb); 926 927 atomic_inc(&sh->count); 928 929 if (failb == syndrome_disks+1) { 930 /* Q disk is one of the missing disks */ 931 if (faila == syndrome_disks) { 932 /* Missing P+Q, just recompute */ 933 init_async_submit(&submit, ASYNC_TX_FENCE, NULL, 934 ops_complete_compute, sh, 935 to_addr_conv(sh, percpu)); 936 return async_gen_syndrome(blocks, 0, syndrome_disks+2, 937 STRIPE_SIZE, &submit); 938 } else { 939 struct page *dest; 940 int data_target; 941 int qd_idx = sh->qd_idx; 942 943 /* Missing D+Q: recompute D from P, then recompute Q */ 944 if (target == qd_idx) 945 data_target = target2; 946 else 947 data_target = target; 948 949 count = 0; 950 for (i = disks; i-- ; ) { 951 if (i == data_target || i == qd_idx) 952 continue; 953 blocks[count++] = sh->dev[i].page; 954 } 955 dest = sh->dev[data_target].page; 956 init_async_submit(&submit, 957 ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, 958 NULL, NULL, NULL, 959 to_addr_conv(sh, percpu)); 960 tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, 961 &submit); 962 963 count = set_syndrome_sources(blocks, sh); 964 init_async_submit(&submit, ASYNC_TX_FENCE, tx, 965 ops_complete_compute, sh, 966 to_addr_conv(sh, percpu)); 967 return async_gen_syndrome(blocks, 0, count+2, 968 STRIPE_SIZE, &submit); 969 } 970 } else { 971 init_async_submit(&submit, ASYNC_TX_FENCE, NULL, 972 ops_complete_compute, sh, 973 to_addr_conv(sh, percpu)); 974 if (failb == syndrome_disks) { 975 /* We're missing D+P. */ 976 return async_raid6_datap_recov(syndrome_disks+2, 977 STRIPE_SIZE, faila, 978 blocks, &submit); 979 } else { 980 /* We're missing D+D. */ 981 return async_raid6_2data_recov(syndrome_disks+2, 982 STRIPE_SIZE, faila, failb, 983 blocks, &submit); 984 } 985 } 986 } 987 988 989 static void ops_complete_prexor(void *stripe_head_ref) 990 { 991 struct stripe_head *sh = stripe_head_ref; 992 993 pr_debug("%s: stripe %llu\n", __func__, 994 (unsigned long long)sh->sector); 995 } 996 997 static struct dma_async_tx_descriptor * 998 ops_run_prexor(struct stripe_head *sh, struct raid5_percpu *percpu, 999 struct dma_async_tx_descriptor *tx) 1000 { 1001 int disks = sh->disks; 1002 struct page **xor_srcs = percpu->scribble; 1003 int count = 0, pd_idx = sh->pd_idx, i; 1004 struct async_submit_ctl submit; 1005 1006 /* existing parity data subtracted */ 1007 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; 1008 1009 pr_debug("%s: stripe %llu\n", __func__, 1010 (unsigned long long)sh->sector); 1011 1012 for (i = disks; i--; ) { 1013 struct r5dev *dev = &sh->dev[i]; 1014 /* Only process blocks that are known to be uptodate */ 1015 if (test_bit(R5_Wantdrain, &dev->flags)) 1016 xor_srcs[count++] = dev->page; 1017 } 1018 1019 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, 1020 ops_complete_prexor, sh, to_addr_conv(sh, percpu)); 1021 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit); 1022 1023 return tx; 1024 } 1025 1026 static struct dma_async_tx_descriptor * 1027 ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) 1028 { 1029 int disks = sh->disks; 1030 int i; 1031 1032 pr_debug("%s: stripe %llu\n", __func__, 1033 (unsigned long long)sh->sector); 1034 1035 for (i = disks; i--; ) { 1036 struct r5dev *dev = &sh->dev[i]; 1037 struct bio *chosen; 1038 1039 if (test_and_clear_bit(R5_Wantdrain, &dev->flags)) { 1040 struct bio *wbi; 1041 1042 spin_lock_irq(&sh->raid_conf->device_lock); 1043 chosen = dev->towrite; 1044 dev->towrite = NULL; 1045 BUG_ON(dev->written); 1046 wbi = dev->written = chosen; 1047 spin_unlock_irq(&sh->raid_conf->device_lock); 1048 1049 while (wbi && wbi->bi_sector < 1050 dev->sector + STRIPE_SECTORS) { 1051 if (wbi->bi_rw & REQ_FUA) 1052 set_bit(R5_WantFUA, &dev->flags); 1053 tx = async_copy_data(1, wbi, dev->page, 1054 dev->sector, tx); 1055 wbi = r5_next_bio(wbi, dev->sector); 1056 } 1057 } 1058 } 1059 1060 return tx; 1061 } 1062 1063 static void ops_complete_reconstruct(void *stripe_head_ref) 1064 { 1065 struct stripe_head *sh = stripe_head_ref; 1066 int disks = sh->disks; 1067 int pd_idx = sh->pd_idx; 1068 int qd_idx = sh->qd_idx; 1069 int i; 1070 bool fua = false; 1071 1072 pr_debug("%s: stripe %llu\n", __func__, 1073 (unsigned long long)sh->sector); 1074 1075 for (i = disks; i--; ) 1076 fua |= test_bit(R5_WantFUA, &sh->dev[i].flags); 1077 1078 for (i = disks; i--; ) { 1079 struct r5dev *dev = &sh->dev[i]; 1080 1081 if (dev->written || i == pd_idx || i == qd_idx) { 1082 set_bit(R5_UPTODATE, &dev->flags); 1083 if (fua) 1084 set_bit(R5_WantFUA, &dev->flags); 1085 } 1086 } 1087 1088 if (sh->reconstruct_state == reconstruct_state_drain_run) 1089 sh->reconstruct_state = reconstruct_state_drain_result; 1090 else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) 1091 sh->reconstruct_state = reconstruct_state_prexor_drain_result; 1092 else { 1093 BUG_ON(sh->reconstruct_state != reconstruct_state_run); 1094 sh->reconstruct_state = reconstruct_state_result; 1095 } 1096 1097 set_bit(STRIPE_HANDLE, &sh->state); 1098 release_stripe(sh); 1099 } 1100 1101 static void 1102 ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu, 1103 struct dma_async_tx_descriptor *tx) 1104 { 1105 int disks = sh->disks; 1106 struct page **xor_srcs = percpu->scribble; 1107 struct async_submit_ctl submit; 1108 int count = 0, pd_idx = sh->pd_idx, i; 1109 struct page *xor_dest; 1110 int prexor = 0; 1111 unsigned long flags; 1112 1113 pr_debug("%s: stripe %llu\n", __func__, 1114 (unsigned long long)sh->sector); 1115 1116 /* check if prexor is active which means only process blocks 1117 * that are part of a read-modify-write (written) 1118 */ 1119 if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) { 1120 prexor = 1; 1121 xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; 1122 for (i = disks; i--; ) { 1123 struct r5dev *dev = &sh->dev[i]; 1124 if (dev->written) 1125 xor_srcs[count++] = dev->page; 1126 } 1127 } else { 1128 xor_dest = sh->dev[pd_idx].page; 1129 for (i = disks; i--; ) { 1130 struct r5dev *dev = &sh->dev[i]; 1131 if (i != pd_idx) 1132 xor_srcs[count++] = dev->page; 1133 } 1134 } 1135 1136 /* 1/ if we prexor'd then the dest is reused as a source 1137 * 2/ if we did not prexor then we are redoing the parity 1138 * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST 1139 * for the synchronous xor case 1140 */ 1141 flags = ASYNC_TX_ACK | 1142 (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST); 1143 1144 atomic_inc(&sh->count); 1145 1146 init_async_submit(&submit, flags, tx, ops_complete_reconstruct, sh, 1147 to_addr_conv(sh, percpu)); 1148 if (unlikely(count == 1)) 1149 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit); 1150 else 1151 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit); 1152 } 1153 1154 static void 1155 ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu, 1156 struct dma_async_tx_descriptor *tx) 1157 { 1158 struct async_submit_ctl submit; 1159 struct page **blocks = percpu->scribble; 1160 int count; 1161 1162 pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector); 1163 1164 count = set_syndrome_sources(blocks, sh); 1165 1166 atomic_inc(&sh->count); 1167 1168 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_reconstruct, 1169 sh, to_addr_conv(sh, percpu)); 1170 async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit); 1171 } 1172 1173 static void ops_complete_check(void *stripe_head_ref) 1174 { 1175 struct stripe_head *sh = stripe_head_ref; 1176 1177 pr_debug("%s: stripe %llu\n", __func__, 1178 (unsigned long long)sh->sector); 1179 1180 sh->check_state = check_state_check_result; 1181 set_bit(STRIPE_HANDLE, &sh->state); 1182 release_stripe(sh); 1183 } 1184 1185 static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu) 1186 { 1187 int disks = sh->disks; 1188 int pd_idx = sh->pd_idx; 1189 int qd_idx = sh->qd_idx; 1190 struct page *xor_dest; 1191 struct page **xor_srcs = percpu->scribble; 1192 struct dma_async_tx_descriptor *tx; 1193 struct async_submit_ctl submit; 1194 int count; 1195 int i; 1196 1197 pr_debug("%s: stripe %llu\n", __func__, 1198 (unsigned long long)sh->sector); 1199 1200 count = 0; 1201 xor_dest = sh->dev[pd_idx].page; 1202 xor_srcs[count++] = xor_dest; 1203 for (i = disks; i--; ) { 1204 if (i == pd_idx || i == qd_idx) 1205 continue; 1206 xor_srcs[count++] = sh->dev[i].page; 1207 } 1208 1209 init_async_submit(&submit, 0, NULL, NULL, NULL, 1210 to_addr_conv(sh, percpu)); 1211 tx = async_xor_val(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, 1212 &sh->ops.zero_sum_result, &submit); 1213 1214 atomic_inc(&sh->count); 1215 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL); 1216 tx = async_trigger_callback(&submit); 1217 } 1218 1219 static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp) 1220 { 1221 struct page **srcs = percpu->scribble; 1222 struct async_submit_ctl submit; 1223 int count; 1224 1225 pr_debug("%s: stripe %llu checkp: %d\n", __func__, 1226 (unsigned long long)sh->sector, checkp); 1227 1228 count = set_syndrome_sources(srcs, sh); 1229 if (!checkp) 1230 srcs[count] = NULL; 1231 1232 atomic_inc(&sh->count); 1233 init_async_submit(&submit, ASYNC_TX_ACK, NULL, ops_complete_check, 1234 sh, to_addr_conv(sh, percpu)); 1235 async_syndrome_val(srcs, 0, count+2, STRIPE_SIZE, 1236 &sh->ops.zero_sum_result, percpu->spare_page, &submit); 1237 } 1238 1239 static void __raid_run_ops(struct stripe_head *sh, unsigned long ops_request) 1240 { 1241 int overlap_clear = 0, i, disks = sh->disks; 1242 struct dma_async_tx_descriptor *tx = NULL; 1243 struct r5conf *conf = sh->raid_conf; 1244 int level = conf->level; 1245 struct raid5_percpu *percpu; 1246 unsigned long cpu; 1247 1248 cpu = get_cpu(); 1249 percpu = per_cpu_ptr(conf->percpu, cpu); 1250 if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) { 1251 ops_run_biofill(sh); 1252 overlap_clear++; 1253 } 1254 1255 if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) { 1256 if (level < 6) 1257 tx = ops_run_compute5(sh, percpu); 1258 else { 1259 if (sh->ops.target2 < 0 || sh->ops.target < 0) 1260 tx = ops_run_compute6_1(sh, percpu); 1261 else 1262 tx = ops_run_compute6_2(sh, percpu); 1263 } 1264 /* terminate the chain if reconstruct is not set to be run */ 1265 if (tx && !test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) 1266 async_tx_ack(tx); 1267 } 1268 1269 if (test_bit(STRIPE_OP_PREXOR, &ops_request)) 1270 tx = ops_run_prexor(sh, percpu, tx); 1271 1272 if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) { 1273 tx = ops_run_biodrain(sh, tx); 1274 overlap_clear++; 1275 } 1276 1277 if (test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) { 1278 if (level < 6) 1279 ops_run_reconstruct5(sh, percpu, tx); 1280 else 1281 ops_run_reconstruct6(sh, percpu, tx); 1282 } 1283 1284 if (test_bit(STRIPE_OP_CHECK, &ops_request)) { 1285 if (sh->check_state == check_state_run) 1286 ops_run_check_p(sh, percpu); 1287 else if (sh->check_state == check_state_run_q) 1288 ops_run_check_pq(sh, percpu, 0); 1289 else if (sh->check_state == check_state_run_pq) 1290 ops_run_check_pq(sh, percpu, 1); 1291 else 1292 BUG(); 1293 } 1294 1295 if (overlap_clear) 1296 for (i = disks; i--; ) { 1297 struct r5dev *dev = &sh->dev[i]; 1298 if (test_and_clear_bit(R5_Overlap, &dev->flags)) 1299 wake_up(&sh->raid_conf->wait_for_overlap); 1300 } 1301 put_cpu(); 1302 } 1303 1304 #ifdef CONFIG_MULTICORE_RAID456 1305 static void async_run_ops(void *param, async_cookie_t cookie) 1306 { 1307 struct stripe_head *sh = param; 1308 unsigned long ops_request = sh->ops.request; 1309 1310 clear_bit_unlock(STRIPE_OPS_REQ_PENDING, &sh->state); 1311 wake_up(&sh->ops.wait_for_ops); 1312 1313 __raid_run_ops(sh, ops_request); 1314 release_stripe(sh); 1315 } 1316 1317 static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) 1318 { 1319 /* since handle_stripe can be called outside of raid5d context 1320 * we need to ensure sh->ops.request is de-staged before another 1321 * request arrives 1322 */ 1323 wait_event(sh->ops.wait_for_ops, 1324 !test_and_set_bit_lock(STRIPE_OPS_REQ_PENDING, &sh->state)); 1325 sh->ops.request = ops_request; 1326 1327 atomic_inc(&sh->count); 1328 async_schedule(async_run_ops, sh); 1329 } 1330 #else 1331 #define raid_run_ops __raid_run_ops 1332 #endif 1333 1334 static int grow_one_stripe(struct r5conf *conf) 1335 { 1336 struct stripe_head *sh; 1337 sh = kmem_cache_zalloc(conf->slab_cache, GFP_KERNEL); 1338 if (!sh) 1339 return 0; 1340 1341 sh->raid_conf = conf; 1342 #ifdef CONFIG_MULTICORE_RAID456 1343 init_waitqueue_head(&sh->ops.wait_for_ops); 1344 #endif 1345 1346 if (grow_buffers(sh)) { 1347 shrink_buffers(sh); 1348 kmem_cache_free(conf->slab_cache, sh); 1349 return 0; 1350 } 1351 /* we just created an active stripe so... */ 1352 atomic_set(&sh->count, 1); 1353 atomic_inc(&conf->active_stripes); 1354 INIT_LIST_HEAD(&sh->lru); 1355 release_stripe(sh); 1356 return 1; 1357 } 1358 1359 static int grow_stripes(struct r5conf *conf, int num) 1360 { 1361 struct kmem_cache *sc; 1362 int devs = max(conf->raid_disks, conf->previous_raid_disks); 1363 1364 if (conf->mddev->gendisk) 1365 sprintf(conf->cache_name[0], 1366 "raid%d-%s", conf->level, mdname(conf->mddev)); 1367 else 1368 sprintf(conf->cache_name[0], 1369 "raid%d-%p", conf->level, conf->mddev); 1370 sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]); 1371 1372 conf->active_name = 0; 1373 sc = kmem_cache_create(conf->cache_name[conf->active_name], 1374 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev), 1375 0, 0, NULL); 1376 if (!sc) 1377 return 1; 1378 conf->slab_cache = sc; 1379 conf->pool_size = devs; 1380 while (num--) 1381 if (!grow_one_stripe(conf)) 1382 return 1; 1383 return 0; 1384 } 1385 1386 /** 1387 * scribble_len - return the required size of the scribble region 1388 * @num - total number of disks in the array 1389 * 1390 * The size must be enough to contain: 1391 * 1/ a struct page pointer for each device in the array +2 1392 * 2/ room to convert each entry in (1) to its corresponding dma 1393 * (dma_map_page()) or page (page_address()) address. 1394 * 1395 * Note: the +2 is for the destination buffers of the ddf/raid6 case where we 1396 * calculate over all devices (not just the data blocks), using zeros in place 1397 * of the P and Q blocks. 1398 */ 1399 static size_t scribble_len(int num) 1400 { 1401 size_t len; 1402 1403 len = sizeof(struct page *) * (num+2) + sizeof(addr_conv_t) * (num+2); 1404 1405 return len; 1406 } 1407 1408 static int resize_stripes(struct r5conf *conf, int newsize) 1409 { 1410 /* Make all the stripes able to hold 'newsize' devices. 1411 * New slots in each stripe get 'page' set to a new page. 1412 * 1413 * This happens in stages: 1414 * 1/ create a new kmem_cache and allocate the required number of 1415 * stripe_heads. 1416 * 2/ gather all the old stripe_heads and tranfer the pages across 1417 * to the new stripe_heads. This will have the side effect of 1418 * freezing the array as once all stripe_heads have been collected, 1419 * no IO will be possible. Old stripe heads are freed once their 1420 * pages have been transferred over, and the old kmem_cache is 1421 * freed when all stripes are done. 1422 * 3/ reallocate conf->disks to be suitable bigger. If this fails, 1423 * we simple return a failre status - no need to clean anything up. 1424 * 4/ allocate new pages for the new slots in the new stripe_heads. 1425 * If this fails, we don't bother trying the shrink the 1426 * stripe_heads down again, we just leave them as they are. 1427 * As each stripe_head is processed the new one is released into 1428 * active service. 1429 * 1430 * Once step2 is started, we cannot afford to wait for a write, 1431 * so we use GFP_NOIO allocations. 1432 */ 1433 struct stripe_head *osh, *nsh; 1434 LIST_HEAD(newstripes); 1435 struct disk_info *ndisks; 1436 unsigned long cpu; 1437 int err; 1438 struct kmem_cache *sc; 1439 int i; 1440 1441 if (newsize <= conf->pool_size) 1442 return 0; /* never bother to shrink */ 1443 1444 err = md_allow_write(conf->mddev); 1445 if (err) 1446 return err; 1447 1448 /* Step 1 */ 1449 sc = kmem_cache_create(conf->cache_name[1-conf->active_name], 1450 sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev), 1451 0, 0, NULL); 1452 if (!sc) 1453 return -ENOMEM; 1454 1455 for (i = conf->max_nr_stripes; i; i--) { 1456 nsh = kmem_cache_zalloc(sc, GFP_KERNEL); 1457 if (!nsh) 1458 break; 1459 1460 nsh->raid_conf = conf; 1461 #ifdef CONFIG_MULTICORE_RAID456 1462 init_waitqueue_head(&nsh->ops.wait_for_ops); 1463 #endif 1464 1465 list_add(&nsh->lru, &newstripes); 1466 } 1467 if (i) { 1468 /* didn't get enough, give up */ 1469 while (!list_empty(&newstripes)) { 1470 nsh = list_entry(newstripes.next, struct stripe_head, lru); 1471 list_del(&nsh->lru); 1472 kmem_cache_free(sc, nsh); 1473 } 1474 kmem_cache_destroy(sc); 1475 return -ENOMEM; 1476 } 1477 /* Step 2 - Must use GFP_NOIO now. 1478 * OK, we have enough stripes, start collecting inactive 1479 * stripes and copying them over 1480 */ 1481 list_for_each_entry(nsh, &newstripes, lru) { 1482 spin_lock_irq(&conf->device_lock); 1483 wait_event_lock_irq(conf->wait_for_stripe, 1484 !list_empty(&conf->inactive_list), 1485 conf->device_lock, 1486 ); 1487 osh = get_free_stripe(conf); 1488 spin_unlock_irq(&conf->device_lock); 1489 atomic_set(&nsh->count, 1); 1490 for(i=0; i<conf->pool_size; i++) 1491 nsh->dev[i].page = osh->dev[i].page; 1492 for( ; i<newsize; i++) 1493 nsh->dev[i].page = NULL; 1494 kmem_cache_free(conf->slab_cache, osh); 1495 } 1496 kmem_cache_destroy(conf->slab_cache); 1497 1498 /* Step 3. 1499 * At this point, we are holding all the stripes so the array 1500 * is completely stalled, so now is a good time to resize 1501 * conf->disks and the scribble region 1502 */ 1503 ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO); 1504 if (ndisks) { 1505 for (i=0; i<conf->raid_disks; i++) 1506 ndisks[i] = conf->disks[i]; 1507 kfree(conf->disks); 1508 conf->disks = ndisks; 1509 } else 1510 err = -ENOMEM; 1511 1512 get_online_cpus(); 1513 conf->scribble_len = scribble_len(newsize); 1514 for_each_present_cpu(cpu) { 1515 struct raid5_percpu *percpu; 1516 void *scribble; 1517 1518 percpu = per_cpu_ptr(conf->percpu, cpu); 1519 scribble = kmalloc(conf->scribble_len, GFP_NOIO); 1520 1521 if (scribble) { 1522 kfree(percpu->scribble); 1523 percpu->scribble = scribble; 1524 } else { 1525 err = -ENOMEM; 1526 break; 1527 } 1528 } 1529 put_online_cpus(); 1530 1531 /* Step 4, return new stripes to service */ 1532 while(!list_empty(&newstripes)) { 1533 nsh = list_entry(newstripes.next, struct stripe_head, lru); 1534 list_del_init(&nsh->lru); 1535 1536 for (i=conf->raid_disks; i < newsize; i++) 1537 if (nsh->dev[i].page == NULL) { 1538 struct page *p = alloc_page(GFP_NOIO); 1539 nsh->dev[i].page = p; 1540 if (!p) 1541 err = -ENOMEM; 1542 } 1543 release_stripe(nsh); 1544 } 1545 /* critical section pass, GFP_NOIO no longer needed */ 1546 1547 conf->slab_cache = sc; 1548 conf->active_name = 1-conf->active_name; 1549 conf->pool_size = newsize; 1550 return err; 1551 } 1552 1553 static int drop_one_stripe(struct r5conf *conf) 1554 { 1555 struct stripe_head *sh; 1556 1557 spin_lock_irq(&conf->device_lock); 1558 sh = get_free_stripe(conf); 1559 spin_unlock_irq(&conf->device_lock); 1560 if (!sh) 1561 return 0; 1562 BUG_ON(atomic_read(&sh->count)); 1563 shrink_buffers(sh); 1564 kmem_cache_free(conf->slab_cache, sh); 1565 atomic_dec(&conf->active_stripes); 1566 return 1; 1567 } 1568 1569 static void shrink_stripes(struct r5conf *conf) 1570 { 1571 while (drop_one_stripe(conf)) 1572 ; 1573 1574 if (conf->slab_cache) 1575 kmem_cache_destroy(conf->slab_cache); 1576 conf->slab_cache = NULL; 1577 } 1578 1579 static void raid5_end_read_request(struct bio * bi, int error) 1580 { 1581 struct stripe_head *sh = bi->bi_private; 1582 struct r5conf *conf = sh->raid_conf; 1583 int disks = sh->disks, i; 1584 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 1585 char b[BDEVNAME_SIZE]; 1586 struct md_rdev *rdev; 1587 1588 1589 for (i=0 ; i<disks; i++) 1590 if (bi == &sh->dev[i].req) 1591 break; 1592 1593 pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n", 1594 (unsigned long long)sh->sector, i, atomic_read(&sh->count), 1595 uptodate); 1596 if (i == disks) { 1597 BUG(); 1598 return; 1599 } 1600 1601 if (uptodate) { 1602 set_bit(R5_UPTODATE, &sh->dev[i].flags); 1603 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 1604 rdev = conf->disks[i].rdev; 1605 printk_ratelimited( 1606 KERN_INFO 1607 "md/raid:%s: read error corrected" 1608 " (%lu sectors at %llu on %s)\n", 1609 mdname(conf->mddev), STRIPE_SECTORS, 1610 (unsigned long long)(sh->sector 1611 + rdev->data_offset), 1612 bdevname(rdev->bdev, b)); 1613 atomic_add(STRIPE_SECTORS, &rdev->corrected_errors); 1614 clear_bit(R5_ReadError, &sh->dev[i].flags); 1615 clear_bit(R5_ReWrite, &sh->dev[i].flags); 1616 } 1617 if (atomic_read(&conf->disks[i].rdev->read_errors)) 1618 atomic_set(&conf->disks[i].rdev->read_errors, 0); 1619 } else { 1620 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b); 1621 int retry = 0; 1622 rdev = conf->disks[i].rdev; 1623 1624 clear_bit(R5_UPTODATE, &sh->dev[i].flags); 1625 atomic_inc(&rdev->read_errors); 1626 if (conf->mddev->degraded >= conf->max_degraded) 1627 printk_ratelimited( 1628 KERN_WARNING 1629 "md/raid:%s: read error not correctable " 1630 "(sector %llu on %s).\n", 1631 mdname(conf->mddev), 1632 (unsigned long long)(sh->sector 1633 + rdev->data_offset), 1634 bdn); 1635 else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) 1636 /* Oh, no!!! */ 1637 printk_ratelimited( 1638 KERN_WARNING 1639 "md/raid:%s: read error NOT corrected!! " 1640 "(sector %llu on %s).\n", 1641 mdname(conf->mddev), 1642 (unsigned long long)(sh->sector 1643 + rdev->data_offset), 1644 bdn); 1645 else if (atomic_read(&rdev->read_errors) 1646 > conf->max_nr_stripes) 1647 printk(KERN_WARNING 1648 "md/raid:%s: Too many read errors, failing device %s.\n", 1649 mdname(conf->mddev), bdn); 1650 else 1651 retry = 1; 1652 if (retry) 1653 set_bit(R5_ReadError, &sh->dev[i].flags); 1654 else { 1655 clear_bit(R5_ReadError, &sh->dev[i].flags); 1656 clear_bit(R5_ReWrite, &sh->dev[i].flags); 1657 md_error(conf->mddev, rdev); 1658 } 1659 } 1660 rdev_dec_pending(conf->disks[i].rdev, conf->mddev); 1661 clear_bit(R5_LOCKED, &sh->dev[i].flags); 1662 set_bit(STRIPE_HANDLE, &sh->state); 1663 release_stripe(sh); 1664 } 1665 1666 static void raid5_end_write_request(struct bio *bi, int error) 1667 { 1668 struct stripe_head *sh = bi->bi_private; 1669 struct r5conf *conf = sh->raid_conf; 1670 int disks = sh->disks, i; 1671 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 1672 sector_t first_bad; 1673 int bad_sectors; 1674 1675 for (i=0 ; i<disks; i++) 1676 if (bi == &sh->dev[i].req) 1677 break; 1678 1679 pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n", 1680 (unsigned long long)sh->sector, i, atomic_read(&sh->count), 1681 uptodate); 1682 if (i == disks) { 1683 BUG(); 1684 return; 1685 } 1686 1687 if (!uptodate) { 1688 set_bit(WriteErrorSeen, &conf->disks[i].rdev->flags); 1689 set_bit(R5_WriteError, &sh->dev[i].flags); 1690 } else if (is_badblock(conf->disks[i].rdev, sh->sector, STRIPE_SECTORS, 1691 &first_bad, &bad_sectors)) 1692 set_bit(R5_MadeGood, &sh->dev[i].flags); 1693 1694 rdev_dec_pending(conf->disks[i].rdev, conf->mddev); 1695 1696 clear_bit(R5_LOCKED, &sh->dev[i].flags); 1697 set_bit(STRIPE_HANDLE, &sh->state); 1698 release_stripe(sh); 1699 } 1700 1701 1702 static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous); 1703 1704 static void raid5_build_block(struct stripe_head *sh, int i, int previous) 1705 { 1706 struct r5dev *dev = &sh->dev[i]; 1707 1708 bio_init(&dev->req); 1709 dev->req.bi_io_vec = &dev->vec; 1710 dev->req.bi_vcnt++; 1711 dev->req.bi_max_vecs++; 1712 dev->vec.bv_page = dev->page; 1713 dev->vec.bv_len = STRIPE_SIZE; 1714 dev->vec.bv_offset = 0; 1715 1716 dev->req.bi_sector = sh->sector; 1717 dev->req.bi_private = sh; 1718 1719 dev->flags = 0; 1720 dev->sector = compute_blocknr(sh, i, previous); 1721 } 1722 1723 static void error(struct mddev *mddev, struct md_rdev *rdev) 1724 { 1725 char b[BDEVNAME_SIZE]; 1726 struct r5conf *conf = mddev->private; 1727 pr_debug("raid456: error called\n"); 1728 1729 if (test_and_clear_bit(In_sync, &rdev->flags)) { 1730 unsigned long flags; 1731 spin_lock_irqsave(&conf->device_lock, flags); 1732 mddev->degraded++; 1733 spin_unlock_irqrestore(&conf->device_lock, flags); 1734 /* 1735 * if recovery was running, make sure it aborts. 1736 */ 1737 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 1738 } 1739 set_bit(Blocked, &rdev->flags); 1740 set_bit(Faulty, &rdev->flags); 1741 set_bit(MD_CHANGE_DEVS, &mddev->flags); 1742 printk(KERN_ALERT 1743 "md/raid:%s: Disk failure on %s, disabling device.\n" 1744 "md/raid:%s: Operation continuing on %d devices.\n", 1745 mdname(mddev), 1746 bdevname(rdev->bdev, b), 1747 mdname(mddev), 1748 conf->raid_disks - mddev->degraded); 1749 } 1750 1751 /* 1752 * Input: a 'big' sector number, 1753 * Output: index of the data and parity disk, and the sector # in them. 1754 */ 1755 static sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector, 1756 int previous, int *dd_idx, 1757 struct stripe_head *sh) 1758 { 1759 sector_t stripe, stripe2; 1760 sector_t chunk_number; 1761 unsigned int chunk_offset; 1762 int pd_idx, qd_idx; 1763 int ddf_layout = 0; 1764 sector_t new_sector; 1765 int algorithm = previous ? conf->prev_algo 1766 : conf->algorithm; 1767 int sectors_per_chunk = previous ? conf->prev_chunk_sectors 1768 : conf->chunk_sectors; 1769 int raid_disks = previous ? conf->previous_raid_disks 1770 : conf->raid_disks; 1771 int data_disks = raid_disks - conf->max_degraded; 1772 1773 /* First compute the information on this sector */ 1774 1775 /* 1776 * Compute the chunk number and the sector offset inside the chunk 1777 */ 1778 chunk_offset = sector_div(r_sector, sectors_per_chunk); 1779 chunk_number = r_sector; 1780 1781 /* 1782 * Compute the stripe number 1783 */ 1784 stripe = chunk_number; 1785 *dd_idx = sector_div(stripe, data_disks); 1786 stripe2 = stripe; 1787 /* 1788 * Select the parity disk based on the user selected algorithm. 1789 */ 1790 pd_idx = qd_idx = -1; 1791 switch(conf->level) { 1792 case 4: 1793 pd_idx = data_disks; 1794 break; 1795 case 5: 1796 switch (algorithm) { 1797 case ALGORITHM_LEFT_ASYMMETRIC: 1798 pd_idx = data_disks - sector_div(stripe2, raid_disks); 1799 if (*dd_idx >= pd_idx) 1800 (*dd_idx)++; 1801 break; 1802 case ALGORITHM_RIGHT_ASYMMETRIC: 1803 pd_idx = sector_div(stripe2, raid_disks); 1804 if (*dd_idx >= pd_idx) 1805 (*dd_idx)++; 1806 break; 1807 case ALGORITHM_LEFT_SYMMETRIC: 1808 pd_idx = data_disks - sector_div(stripe2, raid_disks); 1809 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 1810 break; 1811 case ALGORITHM_RIGHT_SYMMETRIC: 1812 pd_idx = sector_div(stripe2, raid_disks); 1813 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 1814 break; 1815 case ALGORITHM_PARITY_0: 1816 pd_idx = 0; 1817 (*dd_idx)++; 1818 break; 1819 case ALGORITHM_PARITY_N: 1820 pd_idx = data_disks; 1821 break; 1822 default: 1823 BUG(); 1824 } 1825 break; 1826 case 6: 1827 1828 switch (algorithm) { 1829 case ALGORITHM_LEFT_ASYMMETRIC: 1830 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); 1831 qd_idx = pd_idx + 1; 1832 if (pd_idx == raid_disks-1) { 1833 (*dd_idx)++; /* Q D D D P */ 1834 qd_idx = 0; 1835 } else if (*dd_idx >= pd_idx) 1836 (*dd_idx) += 2; /* D D P Q D */ 1837 break; 1838 case ALGORITHM_RIGHT_ASYMMETRIC: 1839 pd_idx = sector_div(stripe2, raid_disks); 1840 qd_idx = pd_idx + 1; 1841 if (pd_idx == raid_disks-1) { 1842 (*dd_idx)++; /* Q D D D P */ 1843 qd_idx = 0; 1844 } else if (*dd_idx >= pd_idx) 1845 (*dd_idx) += 2; /* D D P Q D */ 1846 break; 1847 case ALGORITHM_LEFT_SYMMETRIC: 1848 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); 1849 qd_idx = (pd_idx + 1) % raid_disks; 1850 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; 1851 break; 1852 case ALGORITHM_RIGHT_SYMMETRIC: 1853 pd_idx = sector_div(stripe2, raid_disks); 1854 qd_idx = (pd_idx + 1) % raid_disks; 1855 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; 1856 break; 1857 1858 case ALGORITHM_PARITY_0: 1859 pd_idx = 0; 1860 qd_idx = 1; 1861 (*dd_idx) += 2; 1862 break; 1863 case ALGORITHM_PARITY_N: 1864 pd_idx = data_disks; 1865 qd_idx = data_disks + 1; 1866 break; 1867 1868 case ALGORITHM_ROTATING_ZERO_RESTART: 1869 /* Exactly the same as RIGHT_ASYMMETRIC, but or 1870 * of blocks for computing Q is different. 1871 */ 1872 pd_idx = sector_div(stripe2, raid_disks); 1873 qd_idx = pd_idx + 1; 1874 if (pd_idx == raid_disks-1) { 1875 (*dd_idx)++; /* Q D D D P */ 1876 qd_idx = 0; 1877 } else if (*dd_idx >= pd_idx) 1878 (*dd_idx) += 2; /* D D P Q D */ 1879 ddf_layout = 1; 1880 break; 1881 1882 case ALGORITHM_ROTATING_N_RESTART: 1883 /* Same a left_asymmetric, by first stripe is 1884 * D D D P Q rather than 1885 * Q D D D P 1886 */ 1887 stripe2 += 1; 1888 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); 1889 qd_idx = pd_idx + 1; 1890 if (pd_idx == raid_disks-1) { 1891 (*dd_idx)++; /* Q D D D P */ 1892 qd_idx = 0; 1893 } else if (*dd_idx >= pd_idx) 1894 (*dd_idx) += 2; /* D D P Q D */ 1895 ddf_layout = 1; 1896 break; 1897 1898 case ALGORITHM_ROTATING_N_CONTINUE: 1899 /* Same as left_symmetric but Q is before P */ 1900 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); 1901 qd_idx = (pd_idx + raid_disks - 1) % raid_disks; 1902 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 1903 ddf_layout = 1; 1904 break; 1905 1906 case ALGORITHM_LEFT_ASYMMETRIC_6: 1907 /* RAID5 left_asymmetric, with Q on last device */ 1908 pd_idx = data_disks - sector_div(stripe2, raid_disks-1); 1909 if (*dd_idx >= pd_idx) 1910 (*dd_idx)++; 1911 qd_idx = raid_disks - 1; 1912 break; 1913 1914 case ALGORITHM_RIGHT_ASYMMETRIC_6: 1915 pd_idx = sector_div(stripe2, raid_disks-1); 1916 if (*dd_idx >= pd_idx) 1917 (*dd_idx)++; 1918 qd_idx = raid_disks - 1; 1919 break; 1920 1921 case ALGORITHM_LEFT_SYMMETRIC_6: 1922 pd_idx = data_disks - sector_div(stripe2, raid_disks-1); 1923 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); 1924 qd_idx = raid_disks - 1; 1925 break; 1926 1927 case ALGORITHM_RIGHT_SYMMETRIC_6: 1928 pd_idx = sector_div(stripe2, raid_disks-1); 1929 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); 1930 qd_idx = raid_disks - 1; 1931 break; 1932 1933 case ALGORITHM_PARITY_0_6: 1934 pd_idx = 0; 1935 (*dd_idx)++; 1936 qd_idx = raid_disks - 1; 1937 break; 1938 1939 default: 1940 BUG(); 1941 } 1942 break; 1943 } 1944 1945 if (sh) { 1946 sh->pd_idx = pd_idx; 1947 sh->qd_idx = qd_idx; 1948 sh->ddf_layout = ddf_layout; 1949 } 1950 /* 1951 * Finally, compute the new sector number 1952 */ 1953 new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset; 1954 return new_sector; 1955 } 1956 1957 1958 static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous) 1959 { 1960 struct r5conf *conf = sh->raid_conf; 1961 int raid_disks = sh->disks; 1962 int data_disks = raid_disks - conf->max_degraded; 1963 sector_t new_sector = sh->sector, check; 1964 int sectors_per_chunk = previous ? conf->prev_chunk_sectors 1965 : conf->chunk_sectors; 1966 int algorithm = previous ? conf->prev_algo 1967 : conf->algorithm; 1968 sector_t stripe; 1969 int chunk_offset; 1970 sector_t chunk_number; 1971 int dummy1, dd_idx = i; 1972 sector_t r_sector; 1973 struct stripe_head sh2; 1974 1975 1976 chunk_offset = sector_div(new_sector, sectors_per_chunk); 1977 stripe = new_sector; 1978 1979 if (i == sh->pd_idx) 1980 return 0; 1981 switch(conf->level) { 1982 case 4: break; 1983 case 5: 1984 switch (algorithm) { 1985 case ALGORITHM_LEFT_ASYMMETRIC: 1986 case ALGORITHM_RIGHT_ASYMMETRIC: 1987 if (i > sh->pd_idx) 1988 i--; 1989 break; 1990 case ALGORITHM_LEFT_SYMMETRIC: 1991 case ALGORITHM_RIGHT_SYMMETRIC: 1992 if (i < sh->pd_idx) 1993 i += raid_disks; 1994 i -= (sh->pd_idx + 1); 1995 break; 1996 case ALGORITHM_PARITY_0: 1997 i -= 1; 1998 break; 1999 case ALGORITHM_PARITY_N: 2000 break; 2001 default: 2002 BUG(); 2003 } 2004 break; 2005 case 6: 2006 if (i == sh->qd_idx) 2007 return 0; /* It is the Q disk */ 2008 switch (algorithm) { 2009 case ALGORITHM_LEFT_ASYMMETRIC: 2010 case ALGORITHM_RIGHT_ASYMMETRIC: 2011 case ALGORITHM_ROTATING_ZERO_RESTART: 2012 case ALGORITHM_ROTATING_N_RESTART: 2013 if (sh->pd_idx == raid_disks-1) 2014 i--; /* Q D D D P */ 2015 else if (i > sh->pd_idx) 2016 i -= 2; /* D D P Q D */ 2017 break; 2018 case ALGORITHM_LEFT_SYMMETRIC: 2019 case ALGORITHM_RIGHT_SYMMETRIC: 2020 if (sh->pd_idx == raid_disks-1) 2021 i--; /* Q D D D P */ 2022 else { 2023 /* D D P Q D */ 2024 if (i < sh->pd_idx) 2025 i += raid_disks; 2026 i -= (sh->pd_idx + 2); 2027 } 2028 break; 2029 case ALGORITHM_PARITY_0: 2030 i -= 2; 2031 break; 2032 case ALGORITHM_PARITY_N: 2033 break; 2034 case ALGORITHM_ROTATING_N_CONTINUE: 2035 /* Like left_symmetric, but P is before Q */ 2036 if (sh->pd_idx == 0) 2037 i--; /* P D D D Q */ 2038 else { 2039 /* D D Q P D */ 2040 if (i < sh->pd_idx) 2041 i += raid_disks; 2042 i -= (sh->pd_idx + 1); 2043 } 2044 break; 2045 case ALGORITHM_LEFT_ASYMMETRIC_6: 2046 case ALGORITHM_RIGHT_ASYMMETRIC_6: 2047 if (i > sh->pd_idx) 2048 i--; 2049 break; 2050 case ALGORITHM_LEFT_SYMMETRIC_6: 2051 case ALGORITHM_RIGHT_SYMMETRIC_6: 2052 if (i < sh->pd_idx) 2053 i += data_disks + 1; 2054 i -= (sh->pd_idx + 1); 2055 break; 2056 case ALGORITHM_PARITY_0_6: 2057 i -= 1; 2058 break; 2059 default: 2060 BUG(); 2061 } 2062 break; 2063 } 2064 2065 chunk_number = stripe * data_disks + i; 2066 r_sector = chunk_number * sectors_per_chunk + chunk_offset; 2067 2068 check = raid5_compute_sector(conf, r_sector, 2069 previous, &dummy1, &sh2); 2070 if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx 2071 || sh2.qd_idx != sh->qd_idx) { 2072 printk(KERN_ERR "md/raid:%s: compute_blocknr: map not correct\n", 2073 mdname(conf->mddev)); 2074 return 0; 2075 } 2076 return r_sector; 2077 } 2078 2079 2080 static void 2081 schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, 2082 int rcw, int expand) 2083 { 2084 int i, pd_idx = sh->pd_idx, disks = sh->disks; 2085 struct r5conf *conf = sh->raid_conf; 2086 int level = conf->level; 2087 2088 if (rcw) { 2089 /* if we are not expanding this is a proper write request, and 2090 * there will be bios with new data to be drained into the 2091 * stripe cache 2092 */ 2093 if (!expand) { 2094 sh->reconstruct_state = reconstruct_state_drain_run; 2095 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); 2096 } else 2097 sh->reconstruct_state = reconstruct_state_run; 2098 2099 set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request); 2100 2101 for (i = disks; i--; ) { 2102 struct r5dev *dev = &sh->dev[i]; 2103 2104 if (dev->towrite) { 2105 set_bit(R5_LOCKED, &dev->flags); 2106 set_bit(R5_Wantdrain, &dev->flags); 2107 if (!expand) 2108 clear_bit(R5_UPTODATE, &dev->flags); 2109 s->locked++; 2110 } 2111 } 2112 if (s->locked + conf->max_degraded == disks) 2113 if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) 2114 atomic_inc(&conf->pending_full_writes); 2115 } else { 2116 BUG_ON(level == 6); 2117 BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) || 2118 test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags))); 2119 2120 sh->reconstruct_state = reconstruct_state_prexor_drain_run; 2121 set_bit(STRIPE_OP_PREXOR, &s->ops_request); 2122 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); 2123 set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request); 2124 2125 for (i = disks; i--; ) { 2126 struct r5dev *dev = &sh->dev[i]; 2127 if (i == pd_idx) 2128 continue; 2129 2130 if (dev->towrite && 2131 (test_bit(R5_UPTODATE, &dev->flags) || 2132 test_bit(R5_Wantcompute, &dev->flags))) { 2133 set_bit(R5_Wantdrain, &dev->flags); 2134 set_bit(R5_LOCKED, &dev->flags); 2135 clear_bit(R5_UPTODATE, &dev->flags); 2136 s->locked++; 2137 } 2138 } 2139 } 2140 2141 /* keep the parity disk(s) locked while asynchronous operations 2142 * are in flight 2143 */ 2144 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); 2145 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 2146 s->locked++; 2147 2148 if (level == 6) { 2149 int qd_idx = sh->qd_idx; 2150 struct r5dev *dev = &sh->dev[qd_idx]; 2151 2152 set_bit(R5_LOCKED, &dev->flags); 2153 clear_bit(R5_UPTODATE, &dev->flags); 2154 s->locked++; 2155 } 2156 2157 pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n", 2158 __func__, (unsigned long long)sh->sector, 2159 s->locked, s->ops_request); 2160 } 2161 2162 /* 2163 * Each stripe/dev can have one or more bion attached. 2164 * toread/towrite point to the first in a chain. 2165 * The bi_next chain must be in order. 2166 */ 2167 static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite) 2168 { 2169 struct bio **bip; 2170 struct r5conf *conf = sh->raid_conf; 2171 int firstwrite=0; 2172 2173 pr_debug("adding bi b#%llu to stripe s#%llu\n", 2174 (unsigned long long)bi->bi_sector, 2175 (unsigned long long)sh->sector); 2176 2177 2178 spin_lock_irq(&conf->device_lock); 2179 if (forwrite) { 2180 bip = &sh->dev[dd_idx].towrite; 2181 if (*bip == NULL && sh->dev[dd_idx].written == NULL) 2182 firstwrite = 1; 2183 } else 2184 bip = &sh->dev[dd_idx].toread; 2185 while (*bip && (*bip)->bi_sector < bi->bi_sector) { 2186 if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector) 2187 goto overlap; 2188 bip = & (*bip)->bi_next; 2189 } 2190 if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9)) 2191 goto overlap; 2192 2193 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next); 2194 if (*bip) 2195 bi->bi_next = *bip; 2196 *bip = bi; 2197 bi->bi_phys_segments++; 2198 2199 if (forwrite) { 2200 /* check if page is covered */ 2201 sector_t sector = sh->dev[dd_idx].sector; 2202 for (bi=sh->dev[dd_idx].towrite; 2203 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS && 2204 bi && bi->bi_sector <= sector; 2205 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) { 2206 if (bi->bi_sector + (bi->bi_size>>9) >= sector) 2207 sector = bi->bi_sector + (bi->bi_size>>9); 2208 } 2209 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS) 2210 set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags); 2211 } 2212 spin_unlock_irq(&conf->device_lock); 2213 2214 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n", 2215 (unsigned long long)(*bip)->bi_sector, 2216 (unsigned long long)sh->sector, dd_idx); 2217 2218 if (conf->mddev->bitmap && firstwrite) { 2219 bitmap_startwrite(conf->mddev->bitmap, sh->sector, 2220 STRIPE_SECTORS, 0); 2221 sh->bm_seq = conf->seq_flush+1; 2222 set_bit(STRIPE_BIT_DELAY, &sh->state); 2223 } 2224 return 1; 2225 2226 overlap: 2227 set_bit(R5_Overlap, &sh->dev[dd_idx].flags); 2228 spin_unlock_irq(&conf->device_lock); 2229 return 0; 2230 } 2231 2232 static void end_reshape(struct r5conf *conf); 2233 2234 static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous, 2235 struct stripe_head *sh) 2236 { 2237 int sectors_per_chunk = 2238 previous ? conf->prev_chunk_sectors : conf->chunk_sectors; 2239 int dd_idx; 2240 int chunk_offset = sector_div(stripe, sectors_per_chunk); 2241 int disks = previous ? conf->previous_raid_disks : conf->raid_disks; 2242 2243 raid5_compute_sector(conf, 2244 stripe * (disks - conf->max_degraded) 2245 *sectors_per_chunk + chunk_offset, 2246 previous, 2247 &dd_idx, sh); 2248 } 2249 2250 static void 2251 handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, 2252 struct stripe_head_state *s, int disks, 2253 struct bio **return_bi) 2254 { 2255 int i; 2256 for (i = disks; i--; ) { 2257 struct bio *bi; 2258 int bitmap_end = 0; 2259 2260 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 2261 struct md_rdev *rdev; 2262 rcu_read_lock(); 2263 rdev = rcu_dereference(conf->disks[i].rdev); 2264 if (rdev && test_bit(In_sync, &rdev->flags)) 2265 atomic_inc(&rdev->nr_pending); 2266 else 2267 rdev = NULL; 2268 rcu_read_unlock(); 2269 if (rdev) { 2270 if (!rdev_set_badblocks( 2271 rdev, 2272 sh->sector, 2273 STRIPE_SECTORS, 0)) 2274 md_error(conf->mddev, rdev); 2275 rdev_dec_pending(rdev, conf->mddev); 2276 } 2277 } 2278 spin_lock_irq(&conf->device_lock); 2279 /* fail all writes first */ 2280 bi = sh->dev[i].towrite; 2281 sh->dev[i].towrite = NULL; 2282 if (bi) { 2283 s->to_write--; 2284 bitmap_end = 1; 2285 } 2286 2287 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 2288 wake_up(&conf->wait_for_overlap); 2289 2290 while (bi && bi->bi_sector < 2291 sh->dev[i].sector + STRIPE_SECTORS) { 2292 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); 2293 clear_bit(BIO_UPTODATE, &bi->bi_flags); 2294 if (!raid5_dec_bi_phys_segments(bi)) { 2295 md_write_end(conf->mddev); 2296 bi->bi_next = *return_bi; 2297 *return_bi = bi; 2298 } 2299 bi = nextbi; 2300 } 2301 /* and fail all 'written' */ 2302 bi = sh->dev[i].written; 2303 sh->dev[i].written = NULL; 2304 if (bi) bitmap_end = 1; 2305 while (bi && bi->bi_sector < 2306 sh->dev[i].sector + STRIPE_SECTORS) { 2307 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); 2308 clear_bit(BIO_UPTODATE, &bi->bi_flags); 2309 if (!raid5_dec_bi_phys_segments(bi)) { 2310 md_write_end(conf->mddev); 2311 bi->bi_next = *return_bi; 2312 *return_bi = bi; 2313 } 2314 bi = bi2; 2315 } 2316 2317 /* fail any reads if this device is non-operational and 2318 * the data has not reached the cache yet. 2319 */ 2320 if (!test_bit(R5_Wantfill, &sh->dev[i].flags) && 2321 (!test_bit(R5_Insync, &sh->dev[i].flags) || 2322 test_bit(R5_ReadError, &sh->dev[i].flags))) { 2323 bi = sh->dev[i].toread; 2324 sh->dev[i].toread = NULL; 2325 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 2326 wake_up(&conf->wait_for_overlap); 2327 if (bi) s->to_read--; 2328 while (bi && bi->bi_sector < 2329 sh->dev[i].sector + STRIPE_SECTORS) { 2330 struct bio *nextbi = 2331 r5_next_bio(bi, sh->dev[i].sector); 2332 clear_bit(BIO_UPTODATE, &bi->bi_flags); 2333 if (!raid5_dec_bi_phys_segments(bi)) { 2334 bi->bi_next = *return_bi; 2335 *return_bi = bi; 2336 } 2337 bi = nextbi; 2338 } 2339 } 2340 spin_unlock_irq(&conf->device_lock); 2341 if (bitmap_end) 2342 bitmap_endwrite(conf->mddev->bitmap, sh->sector, 2343 STRIPE_SECTORS, 0, 0); 2344 /* If we were in the middle of a write the parity block might 2345 * still be locked - so just clear all R5_LOCKED flags 2346 */ 2347 clear_bit(R5_LOCKED, &sh->dev[i].flags); 2348 } 2349 2350 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) 2351 if (atomic_dec_and_test(&conf->pending_full_writes)) 2352 md_wakeup_thread(conf->mddev->thread); 2353 } 2354 2355 static void 2356 handle_failed_sync(struct r5conf *conf, struct stripe_head *sh, 2357 struct stripe_head_state *s) 2358 { 2359 int abort = 0; 2360 int i; 2361 2362 md_done_sync(conf->mddev, STRIPE_SECTORS, 0); 2363 clear_bit(STRIPE_SYNCING, &sh->state); 2364 s->syncing = 0; 2365 /* There is nothing more to do for sync/check/repair. 2366 * For recover we need to record a bad block on all 2367 * non-sync devices, or abort the recovery 2368 */ 2369 if (!test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery)) 2370 return; 2371 /* During recovery devices cannot be removed, so locking and 2372 * refcounting of rdevs is not needed 2373 */ 2374 for (i = 0; i < conf->raid_disks; i++) { 2375 struct md_rdev *rdev = conf->disks[i].rdev; 2376 if (!rdev 2377 || test_bit(Faulty, &rdev->flags) 2378 || test_bit(In_sync, &rdev->flags)) 2379 continue; 2380 if (!rdev_set_badblocks(rdev, sh->sector, 2381 STRIPE_SECTORS, 0)) 2382 abort = 1; 2383 } 2384 if (abort) { 2385 conf->recovery_disabled = conf->mddev->recovery_disabled; 2386 set_bit(MD_RECOVERY_INTR, &conf->mddev->recovery); 2387 } 2388 } 2389 2390 /* fetch_block - checks the given member device to see if its data needs 2391 * to be read or computed to satisfy a request. 2392 * 2393 * Returns 1 when no more member devices need to be checked, otherwise returns 2394 * 0 to tell the loop in handle_stripe_fill to continue 2395 */ 2396 static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s, 2397 int disk_idx, int disks) 2398 { 2399 struct r5dev *dev = &sh->dev[disk_idx]; 2400 struct r5dev *fdev[2] = { &sh->dev[s->failed_num[0]], 2401 &sh->dev[s->failed_num[1]] }; 2402 2403 /* is the data in this block needed, and can we get it? */ 2404 if (!test_bit(R5_LOCKED, &dev->flags) && 2405 !test_bit(R5_UPTODATE, &dev->flags) && 2406 (dev->toread || 2407 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) || 2408 s->syncing || s->expanding || 2409 (s->failed >= 1 && fdev[0]->toread) || 2410 (s->failed >= 2 && fdev[1]->toread) || 2411 (sh->raid_conf->level <= 5 && s->failed && fdev[0]->towrite && 2412 !test_bit(R5_OVERWRITE, &fdev[0]->flags)) || 2413 (sh->raid_conf->level == 6 && s->failed && s->to_write))) { 2414 /* we would like to get this block, possibly by computing it, 2415 * otherwise read it if the backing disk is insync 2416 */ 2417 BUG_ON(test_bit(R5_Wantcompute, &dev->flags)); 2418 BUG_ON(test_bit(R5_Wantread, &dev->flags)); 2419 if ((s->uptodate == disks - 1) && 2420 (s->failed && (disk_idx == s->failed_num[0] || 2421 disk_idx == s->failed_num[1]))) { 2422 /* have disk failed, and we're requested to fetch it; 2423 * do compute it 2424 */ 2425 pr_debug("Computing stripe %llu block %d\n", 2426 (unsigned long long)sh->sector, disk_idx); 2427 set_bit(STRIPE_COMPUTE_RUN, &sh->state); 2428 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 2429 set_bit(R5_Wantcompute, &dev->flags); 2430 sh->ops.target = disk_idx; 2431 sh->ops.target2 = -1; /* no 2nd target */ 2432 s->req_compute = 1; 2433 /* Careful: from this point on 'uptodate' is in the eye 2434 * of raid_run_ops which services 'compute' operations 2435 * before writes. R5_Wantcompute flags a block that will 2436 * be R5_UPTODATE by the time it is needed for a 2437 * subsequent operation. 2438 */ 2439 s->uptodate++; 2440 return 1; 2441 } else if (s->uptodate == disks-2 && s->failed >= 2) { 2442 /* Computing 2-failure is *very* expensive; only 2443 * do it if failed >= 2 2444 */ 2445 int other; 2446 for (other = disks; other--; ) { 2447 if (other == disk_idx) 2448 continue; 2449 if (!test_bit(R5_UPTODATE, 2450 &sh->dev[other].flags)) 2451 break; 2452 } 2453 BUG_ON(other < 0); 2454 pr_debug("Computing stripe %llu blocks %d,%d\n", 2455 (unsigned long long)sh->sector, 2456 disk_idx, other); 2457 set_bit(STRIPE_COMPUTE_RUN, &sh->state); 2458 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 2459 set_bit(R5_Wantcompute, &sh->dev[disk_idx].flags); 2460 set_bit(R5_Wantcompute, &sh->dev[other].flags); 2461 sh->ops.target = disk_idx; 2462 sh->ops.target2 = other; 2463 s->uptodate += 2; 2464 s->req_compute = 1; 2465 return 1; 2466 } else if (test_bit(R5_Insync, &dev->flags)) { 2467 set_bit(R5_LOCKED, &dev->flags); 2468 set_bit(R5_Wantread, &dev->flags); 2469 s->locked++; 2470 pr_debug("Reading block %d (sync=%d)\n", 2471 disk_idx, s->syncing); 2472 } 2473 } 2474 2475 return 0; 2476 } 2477 2478 /** 2479 * handle_stripe_fill - read or compute data to satisfy pending requests. 2480 */ 2481 static void handle_stripe_fill(struct stripe_head *sh, 2482 struct stripe_head_state *s, 2483 int disks) 2484 { 2485 int i; 2486 2487 /* look for blocks to read/compute, skip this if a compute 2488 * is already in flight, or if the stripe contents are in the 2489 * midst of changing due to a write 2490 */ 2491 if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state && 2492 !sh->reconstruct_state) 2493 for (i = disks; i--; ) 2494 if (fetch_block(sh, s, i, disks)) 2495 break; 2496 set_bit(STRIPE_HANDLE, &sh->state); 2497 } 2498 2499 2500 /* handle_stripe_clean_event 2501 * any written block on an uptodate or failed drive can be returned. 2502 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but 2503 * never LOCKED, so we don't need to test 'failed' directly. 2504 */ 2505 static void handle_stripe_clean_event(struct r5conf *conf, 2506 struct stripe_head *sh, int disks, struct bio **return_bi) 2507 { 2508 int i; 2509 struct r5dev *dev; 2510 2511 for (i = disks; i--; ) 2512 if (sh->dev[i].written) { 2513 dev = &sh->dev[i]; 2514 if (!test_bit(R5_LOCKED, &dev->flags) && 2515 test_bit(R5_UPTODATE, &dev->flags)) { 2516 /* We can return any write requests */ 2517 struct bio *wbi, *wbi2; 2518 int bitmap_end = 0; 2519 pr_debug("Return write for disc %d\n", i); 2520 spin_lock_irq(&conf->device_lock); 2521 wbi = dev->written; 2522 dev->written = NULL; 2523 while (wbi && wbi->bi_sector < 2524 dev->sector + STRIPE_SECTORS) { 2525 wbi2 = r5_next_bio(wbi, dev->sector); 2526 if (!raid5_dec_bi_phys_segments(wbi)) { 2527 md_write_end(conf->mddev); 2528 wbi->bi_next = *return_bi; 2529 *return_bi = wbi; 2530 } 2531 wbi = wbi2; 2532 } 2533 if (dev->towrite == NULL) 2534 bitmap_end = 1; 2535 spin_unlock_irq(&conf->device_lock); 2536 if (bitmap_end) 2537 bitmap_endwrite(conf->mddev->bitmap, 2538 sh->sector, 2539 STRIPE_SECTORS, 2540 !test_bit(STRIPE_DEGRADED, &sh->state), 2541 0); 2542 } 2543 } 2544 2545 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) 2546 if (atomic_dec_and_test(&conf->pending_full_writes)) 2547 md_wakeup_thread(conf->mddev->thread); 2548 } 2549 2550 static void handle_stripe_dirtying(struct r5conf *conf, 2551 struct stripe_head *sh, 2552 struct stripe_head_state *s, 2553 int disks) 2554 { 2555 int rmw = 0, rcw = 0, i; 2556 if (conf->max_degraded == 2) { 2557 /* RAID6 requires 'rcw' in current implementation 2558 * Calculate the real rcw later - for now fake it 2559 * look like rcw is cheaper 2560 */ 2561 rcw = 1; rmw = 2; 2562 } else for (i = disks; i--; ) { 2563 /* would I have to read this buffer for read_modify_write */ 2564 struct r5dev *dev = &sh->dev[i]; 2565 if ((dev->towrite || i == sh->pd_idx) && 2566 !test_bit(R5_LOCKED, &dev->flags) && 2567 !(test_bit(R5_UPTODATE, &dev->flags) || 2568 test_bit(R5_Wantcompute, &dev->flags))) { 2569 if (test_bit(R5_Insync, &dev->flags)) 2570 rmw++; 2571 else 2572 rmw += 2*disks; /* cannot read it */ 2573 } 2574 /* Would I have to read this buffer for reconstruct_write */ 2575 if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx && 2576 !test_bit(R5_LOCKED, &dev->flags) && 2577 !(test_bit(R5_UPTODATE, &dev->flags) || 2578 test_bit(R5_Wantcompute, &dev->flags))) { 2579 if (test_bit(R5_Insync, &dev->flags)) rcw++; 2580 else 2581 rcw += 2*disks; 2582 } 2583 } 2584 pr_debug("for sector %llu, rmw=%d rcw=%d\n", 2585 (unsigned long long)sh->sector, rmw, rcw); 2586 set_bit(STRIPE_HANDLE, &sh->state); 2587 if (rmw < rcw && rmw > 0) 2588 /* prefer read-modify-write, but need to get some data */ 2589 for (i = disks; i--; ) { 2590 struct r5dev *dev = &sh->dev[i]; 2591 if ((dev->towrite || i == sh->pd_idx) && 2592 !test_bit(R5_LOCKED, &dev->flags) && 2593 !(test_bit(R5_UPTODATE, &dev->flags) || 2594 test_bit(R5_Wantcompute, &dev->flags)) && 2595 test_bit(R5_Insync, &dev->flags)) { 2596 if ( 2597 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 2598 pr_debug("Read_old block " 2599 "%d for r-m-w\n", i); 2600 set_bit(R5_LOCKED, &dev->flags); 2601 set_bit(R5_Wantread, &dev->flags); 2602 s->locked++; 2603 } else { 2604 set_bit(STRIPE_DELAYED, &sh->state); 2605 set_bit(STRIPE_HANDLE, &sh->state); 2606 } 2607 } 2608 } 2609 if (rcw <= rmw && rcw > 0) { 2610 /* want reconstruct write, but need to get some data */ 2611 rcw = 0; 2612 for (i = disks; i--; ) { 2613 struct r5dev *dev = &sh->dev[i]; 2614 if (!test_bit(R5_OVERWRITE, &dev->flags) && 2615 i != sh->pd_idx && i != sh->qd_idx && 2616 !test_bit(R5_LOCKED, &dev->flags) && 2617 !(test_bit(R5_UPTODATE, &dev->flags) || 2618 test_bit(R5_Wantcompute, &dev->flags))) { 2619 rcw++; 2620 if (!test_bit(R5_Insync, &dev->flags)) 2621 continue; /* it's a failed drive */ 2622 if ( 2623 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 2624 pr_debug("Read_old block " 2625 "%d for Reconstruct\n", i); 2626 set_bit(R5_LOCKED, &dev->flags); 2627 set_bit(R5_Wantread, &dev->flags); 2628 s->locked++; 2629 } else { 2630 set_bit(STRIPE_DELAYED, &sh->state); 2631 set_bit(STRIPE_HANDLE, &sh->state); 2632 } 2633 } 2634 } 2635 } 2636 /* now if nothing is locked, and if we have enough data, 2637 * we can start a write request 2638 */ 2639 /* since handle_stripe can be called at any time we need to handle the 2640 * case where a compute block operation has been submitted and then a 2641 * subsequent call wants to start a write request. raid_run_ops only 2642 * handles the case where compute block and reconstruct are requested 2643 * simultaneously. If this is not the case then new writes need to be 2644 * held off until the compute completes. 2645 */ 2646 if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) && 2647 (s->locked == 0 && (rcw == 0 || rmw == 0) && 2648 !test_bit(STRIPE_BIT_DELAY, &sh->state))) 2649 schedule_reconstruction(sh, s, rcw == 0, 0); 2650 } 2651 2652 static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh, 2653 struct stripe_head_state *s, int disks) 2654 { 2655 struct r5dev *dev = NULL; 2656 2657 set_bit(STRIPE_HANDLE, &sh->state); 2658 2659 switch (sh->check_state) { 2660 case check_state_idle: 2661 /* start a new check operation if there are no failures */ 2662 if (s->failed == 0) { 2663 BUG_ON(s->uptodate != disks); 2664 sh->check_state = check_state_run; 2665 set_bit(STRIPE_OP_CHECK, &s->ops_request); 2666 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); 2667 s->uptodate--; 2668 break; 2669 } 2670 dev = &sh->dev[s->failed_num[0]]; 2671 /* fall through */ 2672 case check_state_compute_result: 2673 sh->check_state = check_state_idle; 2674 if (!dev) 2675 dev = &sh->dev[sh->pd_idx]; 2676 2677 /* check that a write has not made the stripe insync */ 2678 if (test_bit(STRIPE_INSYNC, &sh->state)) 2679 break; 2680 2681 /* either failed parity check, or recovery is happening */ 2682 BUG_ON(!test_bit(R5_UPTODATE, &dev->flags)); 2683 BUG_ON(s->uptodate != disks); 2684 2685 set_bit(R5_LOCKED, &dev->flags); 2686 s->locked++; 2687 set_bit(R5_Wantwrite, &dev->flags); 2688 2689 clear_bit(STRIPE_DEGRADED, &sh->state); 2690 set_bit(STRIPE_INSYNC, &sh->state); 2691 break; 2692 case check_state_run: 2693 break; /* we will be called again upon completion */ 2694 case check_state_check_result: 2695 sh->check_state = check_state_idle; 2696 2697 /* if a failure occurred during the check operation, leave 2698 * STRIPE_INSYNC not set and let the stripe be handled again 2699 */ 2700 if (s->failed) 2701 break; 2702 2703 /* handle a successful check operation, if parity is correct 2704 * we are done. Otherwise update the mismatch count and repair 2705 * parity if !MD_RECOVERY_CHECK 2706 */ 2707 if ((sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) == 0) 2708 /* parity is correct (on disc, 2709 * not in buffer any more) 2710 */ 2711 set_bit(STRIPE_INSYNC, &sh->state); 2712 else { 2713 conf->mddev->resync_mismatches += STRIPE_SECTORS; 2714 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) 2715 /* don't try to repair!! */ 2716 set_bit(STRIPE_INSYNC, &sh->state); 2717 else { 2718 sh->check_state = check_state_compute_run; 2719 set_bit(STRIPE_COMPUTE_RUN, &sh->state); 2720 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 2721 set_bit(R5_Wantcompute, 2722 &sh->dev[sh->pd_idx].flags); 2723 sh->ops.target = sh->pd_idx; 2724 sh->ops.target2 = -1; 2725 s->uptodate++; 2726 } 2727 } 2728 break; 2729 case check_state_compute_run: 2730 break; 2731 default: 2732 printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n", 2733 __func__, sh->check_state, 2734 (unsigned long long) sh->sector); 2735 BUG(); 2736 } 2737 } 2738 2739 2740 static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh, 2741 struct stripe_head_state *s, 2742 int disks) 2743 { 2744 int pd_idx = sh->pd_idx; 2745 int qd_idx = sh->qd_idx; 2746 struct r5dev *dev; 2747 2748 set_bit(STRIPE_HANDLE, &sh->state); 2749 2750 BUG_ON(s->failed > 2); 2751 2752 /* Want to check and possibly repair P and Q. 2753 * However there could be one 'failed' device, in which 2754 * case we can only check one of them, possibly using the 2755 * other to generate missing data 2756 */ 2757 2758 switch (sh->check_state) { 2759 case check_state_idle: 2760 /* start a new check operation if there are < 2 failures */ 2761 if (s->failed == s->q_failed) { 2762 /* The only possible failed device holds Q, so it 2763 * makes sense to check P (If anything else were failed, 2764 * we would have used P to recreate it). 2765 */ 2766 sh->check_state = check_state_run; 2767 } 2768 if (!s->q_failed && s->failed < 2) { 2769 /* Q is not failed, and we didn't use it to generate 2770 * anything, so it makes sense to check it 2771 */ 2772 if (sh->check_state == check_state_run) 2773 sh->check_state = check_state_run_pq; 2774 else 2775 sh->check_state = check_state_run_q; 2776 } 2777 2778 /* discard potentially stale zero_sum_result */ 2779 sh->ops.zero_sum_result = 0; 2780 2781 if (sh->check_state == check_state_run) { 2782 /* async_xor_zero_sum destroys the contents of P */ 2783 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 2784 s->uptodate--; 2785 } 2786 if (sh->check_state >= check_state_run && 2787 sh->check_state <= check_state_run_pq) { 2788 /* async_syndrome_zero_sum preserves P and Q, so 2789 * no need to mark them !uptodate here 2790 */ 2791 set_bit(STRIPE_OP_CHECK, &s->ops_request); 2792 break; 2793 } 2794 2795 /* we have 2-disk failure */ 2796 BUG_ON(s->failed != 2); 2797 /* fall through */ 2798 case check_state_compute_result: 2799 sh->check_state = check_state_idle; 2800 2801 /* check that a write has not made the stripe insync */ 2802 if (test_bit(STRIPE_INSYNC, &sh->state)) 2803 break; 2804 2805 /* now write out any block on a failed drive, 2806 * or P or Q if they were recomputed 2807 */ 2808 BUG_ON(s->uptodate < disks - 1); /* We don't need Q to recover */ 2809 if (s->failed == 2) { 2810 dev = &sh->dev[s->failed_num[1]]; 2811 s->locked++; 2812 set_bit(R5_LOCKED, &dev->flags); 2813 set_bit(R5_Wantwrite, &dev->flags); 2814 } 2815 if (s->failed >= 1) { 2816 dev = &sh->dev[s->failed_num[0]]; 2817 s->locked++; 2818 set_bit(R5_LOCKED, &dev->flags); 2819 set_bit(R5_Wantwrite, &dev->flags); 2820 } 2821 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) { 2822 dev = &sh->dev[pd_idx]; 2823 s->locked++; 2824 set_bit(R5_LOCKED, &dev->flags); 2825 set_bit(R5_Wantwrite, &dev->flags); 2826 } 2827 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) { 2828 dev = &sh->dev[qd_idx]; 2829 s->locked++; 2830 set_bit(R5_LOCKED, &dev->flags); 2831 set_bit(R5_Wantwrite, &dev->flags); 2832 } 2833 clear_bit(STRIPE_DEGRADED, &sh->state); 2834 2835 set_bit(STRIPE_INSYNC, &sh->state); 2836 break; 2837 case check_state_run: 2838 case check_state_run_q: 2839 case check_state_run_pq: 2840 break; /* we will be called again upon completion */ 2841 case check_state_check_result: 2842 sh->check_state = check_state_idle; 2843 2844 /* handle a successful check operation, if parity is correct 2845 * we are done. Otherwise update the mismatch count and repair 2846 * parity if !MD_RECOVERY_CHECK 2847 */ 2848 if (sh->ops.zero_sum_result == 0) { 2849 /* both parities are correct */ 2850 if (!s->failed) 2851 set_bit(STRIPE_INSYNC, &sh->state); 2852 else { 2853 /* in contrast to the raid5 case we can validate 2854 * parity, but still have a failure to write 2855 * back 2856 */ 2857 sh->check_state = check_state_compute_result; 2858 /* Returning at this point means that we may go 2859 * off and bring p and/or q uptodate again so 2860 * we make sure to check zero_sum_result again 2861 * to verify if p or q need writeback 2862 */ 2863 } 2864 } else { 2865 conf->mddev->resync_mismatches += STRIPE_SECTORS; 2866 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) 2867 /* don't try to repair!! */ 2868 set_bit(STRIPE_INSYNC, &sh->state); 2869 else { 2870 int *target = &sh->ops.target; 2871 2872 sh->ops.target = -1; 2873 sh->ops.target2 = -1; 2874 sh->check_state = check_state_compute_run; 2875 set_bit(STRIPE_COMPUTE_RUN, &sh->state); 2876 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 2877 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) { 2878 set_bit(R5_Wantcompute, 2879 &sh->dev[pd_idx].flags); 2880 *target = pd_idx; 2881 target = &sh->ops.target2; 2882 s->uptodate++; 2883 } 2884 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) { 2885 set_bit(R5_Wantcompute, 2886 &sh->dev[qd_idx].flags); 2887 *target = qd_idx; 2888 s->uptodate++; 2889 } 2890 } 2891 } 2892 break; 2893 case check_state_compute_run: 2894 break; 2895 default: 2896 printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n", 2897 __func__, sh->check_state, 2898 (unsigned long long) sh->sector); 2899 BUG(); 2900 } 2901 } 2902 2903 static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh) 2904 { 2905 int i; 2906 2907 /* We have read all the blocks in this stripe and now we need to 2908 * copy some of them into a target stripe for expand. 2909 */ 2910 struct dma_async_tx_descriptor *tx = NULL; 2911 clear_bit(STRIPE_EXPAND_SOURCE, &sh->state); 2912 for (i = 0; i < sh->disks; i++) 2913 if (i != sh->pd_idx && i != sh->qd_idx) { 2914 int dd_idx, j; 2915 struct stripe_head *sh2; 2916 struct async_submit_ctl submit; 2917 2918 sector_t bn = compute_blocknr(sh, i, 1); 2919 sector_t s = raid5_compute_sector(conf, bn, 0, 2920 &dd_idx, NULL); 2921 sh2 = get_active_stripe(conf, s, 0, 1, 1); 2922 if (sh2 == NULL) 2923 /* so far only the early blocks of this stripe 2924 * have been requested. When later blocks 2925 * get requested, we will try again 2926 */ 2927 continue; 2928 if (!test_bit(STRIPE_EXPANDING, &sh2->state) || 2929 test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) { 2930 /* must have already done this block */ 2931 release_stripe(sh2); 2932 continue; 2933 } 2934 2935 /* place all the copies on one channel */ 2936 init_async_submit(&submit, 0, tx, NULL, NULL, NULL); 2937 tx = async_memcpy(sh2->dev[dd_idx].page, 2938 sh->dev[i].page, 0, 0, STRIPE_SIZE, 2939 &submit); 2940 2941 set_bit(R5_Expanded, &sh2->dev[dd_idx].flags); 2942 set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags); 2943 for (j = 0; j < conf->raid_disks; j++) 2944 if (j != sh2->pd_idx && 2945 j != sh2->qd_idx && 2946 !test_bit(R5_Expanded, &sh2->dev[j].flags)) 2947 break; 2948 if (j == conf->raid_disks) { 2949 set_bit(STRIPE_EXPAND_READY, &sh2->state); 2950 set_bit(STRIPE_HANDLE, &sh2->state); 2951 } 2952 release_stripe(sh2); 2953 2954 } 2955 /* done submitting copies, wait for them to complete */ 2956 if (tx) { 2957 async_tx_ack(tx); 2958 dma_wait_for_async_tx(tx); 2959 } 2960 } 2961 2962 2963 /* 2964 * handle_stripe - do things to a stripe. 2965 * 2966 * We lock the stripe and then examine the state of various bits 2967 * to see what needs to be done. 2968 * Possible results: 2969 * return some read request which now have data 2970 * return some write requests which are safely on disc 2971 * schedule a read on some buffers 2972 * schedule a write of some buffers 2973 * return confirmation of parity correctness 2974 * 2975 * buffers are taken off read_list or write_list, and bh_cache buffers 2976 * get BH_Lock set before the stripe lock is released. 2977 * 2978 */ 2979 2980 static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) 2981 { 2982 struct r5conf *conf = sh->raid_conf; 2983 int disks = sh->disks; 2984 struct r5dev *dev; 2985 int i; 2986 2987 memset(s, 0, sizeof(*s)); 2988 2989 s->syncing = test_bit(STRIPE_SYNCING, &sh->state); 2990 s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state); 2991 s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state); 2992 s->failed_num[0] = -1; 2993 s->failed_num[1] = -1; 2994 2995 /* Now to look around and see what can be done */ 2996 rcu_read_lock(); 2997 spin_lock_irq(&conf->device_lock); 2998 for (i=disks; i--; ) { 2999 struct md_rdev *rdev; 3000 sector_t first_bad; 3001 int bad_sectors; 3002 int is_bad = 0; 3003 3004 dev = &sh->dev[i]; 3005 3006 pr_debug("check %d: state 0x%lx read %p write %p written %p\n", 3007 i, dev->flags, dev->toread, dev->towrite, dev->written); 3008 /* maybe we can reply to a read 3009 * 3010 * new wantfill requests are only permitted while 3011 * ops_complete_biofill is guaranteed to be inactive 3012 */ 3013 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread && 3014 !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) 3015 set_bit(R5_Wantfill, &dev->flags); 3016 3017 /* now count some things */ 3018 if (test_bit(R5_LOCKED, &dev->flags)) 3019 s->locked++; 3020 if (test_bit(R5_UPTODATE, &dev->flags)) 3021 s->uptodate++; 3022 if (test_bit(R5_Wantcompute, &dev->flags)) { 3023 s->compute++; 3024 BUG_ON(s->compute > 2); 3025 } 3026 3027 if (test_bit(R5_Wantfill, &dev->flags)) 3028 s->to_fill++; 3029 else if (dev->toread) 3030 s->to_read++; 3031 if (dev->towrite) { 3032 s->to_write++; 3033 if (!test_bit(R5_OVERWRITE, &dev->flags)) 3034 s->non_overwrite++; 3035 } 3036 if (dev->written) 3037 s->written++; 3038 rdev = rcu_dereference(conf->disks[i].rdev); 3039 if (rdev) { 3040 is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS, 3041 &first_bad, &bad_sectors); 3042 if (s->blocked_rdev == NULL 3043 && (test_bit(Blocked, &rdev->flags) 3044 || is_bad < 0)) { 3045 if (is_bad < 0) 3046 set_bit(BlockedBadBlocks, 3047 &rdev->flags); 3048 s->blocked_rdev = rdev; 3049 atomic_inc(&rdev->nr_pending); 3050 } 3051 } 3052 clear_bit(R5_Insync, &dev->flags); 3053 if (!rdev) 3054 /* Not in-sync */; 3055 else if (is_bad) { 3056 /* also not in-sync */ 3057 if (!test_bit(WriteErrorSeen, &rdev->flags)) { 3058 /* treat as in-sync, but with a read error 3059 * which we can now try to correct 3060 */ 3061 set_bit(R5_Insync, &dev->flags); 3062 set_bit(R5_ReadError, &dev->flags); 3063 } 3064 } else if (test_bit(In_sync, &rdev->flags)) 3065 set_bit(R5_Insync, &dev->flags); 3066 else if (!test_bit(Faulty, &rdev->flags)) { 3067 /* in sync if before recovery_offset */ 3068 if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset) 3069 set_bit(R5_Insync, &dev->flags); 3070 } 3071 if (test_bit(R5_WriteError, &dev->flags)) { 3072 clear_bit(R5_Insync, &dev->flags); 3073 if (!test_bit(Faulty, &rdev->flags)) { 3074 s->handle_bad_blocks = 1; 3075 atomic_inc(&rdev->nr_pending); 3076 } else 3077 clear_bit(R5_WriteError, &dev->flags); 3078 } 3079 if (test_bit(R5_MadeGood, &dev->flags)) { 3080 if (!test_bit(Faulty, &rdev->flags)) { 3081 s->handle_bad_blocks = 1; 3082 atomic_inc(&rdev->nr_pending); 3083 } else 3084 clear_bit(R5_MadeGood, &dev->flags); 3085 } 3086 if (!test_bit(R5_Insync, &dev->flags)) { 3087 /* The ReadError flag will just be confusing now */ 3088 clear_bit(R5_ReadError, &dev->flags); 3089 clear_bit(R5_ReWrite, &dev->flags); 3090 } 3091 if (test_bit(R5_ReadError, &dev->flags)) 3092 clear_bit(R5_Insync, &dev->flags); 3093 if (!test_bit(R5_Insync, &dev->flags)) { 3094 if (s->failed < 2) 3095 s->failed_num[s->failed] = i; 3096 s->failed++; 3097 } 3098 } 3099 spin_unlock_irq(&conf->device_lock); 3100 rcu_read_unlock(); 3101 } 3102 3103 static void handle_stripe(struct stripe_head *sh) 3104 { 3105 struct stripe_head_state s; 3106 struct r5conf *conf = sh->raid_conf; 3107 int i; 3108 int prexor; 3109 int disks = sh->disks; 3110 struct r5dev *pdev, *qdev; 3111 3112 clear_bit(STRIPE_HANDLE, &sh->state); 3113 if (test_and_set_bit_lock(STRIPE_ACTIVE, &sh->state)) { 3114 /* already being handled, ensure it gets handled 3115 * again when current action finishes */ 3116 set_bit(STRIPE_HANDLE, &sh->state); 3117 return; 3118 } 3119 3120 if (test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { 3121 set_bit(STRIPE_SYNCING, &sh->state); 3122 clear_bit(STRIPE_INSYNC, &sh->state); 3123 } 3124 clear_bit(STRIPE_DELAYED, &sh->state); 3125 3126 pr_debug("handling stripe %llu, state=%#lx cnt=%d, " 3127 "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n", 3128 (unsigned long long)sh->sector, sh->state, 3129 atomic_read(&sh->count), sh->pd_idx, sh->qd_idx, 3130 sh->check_state, sh->reconstruct_state); 3131 3132 analyse_stripe(sh, &s); 3133 3134 if (s.handle_bad_blocks) { 3135 set_bit(STRIPE_HANDLE, &sh->state); 3136 goto finish; 3137 } 3138 3139 if (unlikely(s.blocked_rdev)) { 3140 if (s.syncing || s.expanding || s.expanded || 3141 s.to_write || s.written) { 3142 set_bit(STRIPE_HANDLE, &sh->state); 3143 goto finish; 3144 } 3145 /* There is nothing for the blocked_rdev to block */ 3146 rdev_dec_pending(s.blocked_rdev, conf->mddev); 3147 s.blocked_rdev = NULL; 3148 } 3149 3150 if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) { 3151 set_bit(STRIPE_OP_BIOFILL, &s.ops_request); 3152 set_bit(STRIPE_BIOFILL_RUN, &sh->state); 3153 } 3154 3155 pr_debug("locked=%d uptodate=%d to_read=%d" 3156 " to_write=%d failed=%d failed_num=%d,%d\n", 3157 s.locked, s.uptodate, s.to_read, s.to_write, s.failed, 3158 s.failed_num[0], s.failed_num[1]); 3159 /* check if the array has lost more than max_degraded devices and, 3160 * if so, some requests might need to be failed. 3161 */ 3162 if (s.failed > conf->max_degraded) { 3163 sh->check_state = 0; 3164 sh->reconstruct_state = 0; 3165 if (s.to_read+s.to_write+s.written) 3166 handle_failed_stripe(conf, sh, &s, disks, &s.return_bi); 3167 if (s.syncing) 3168 handle_failed_sync(conf, sh, &s); 3169 } 3170 3171 /* 3172 * might be able to return some write requests if the parity blocks 3173 * are safe, or on a failed drive 3174 */ 3175 pdev = &sh->dev[sh->pd_idx]; 3176 s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx) 3177 || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx); 3178 qdev = &sh->dev[sh->qd_idx]; 3179 s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx) 3180 || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx) 3181 || conf->level < 6; 3182 3183 if (s.written && 3184 (s.p_failed || ((test_bit(R5_Insync, &pdev->flags) 3185 && !test_bit(R5_LOCKED, &pdev->flags) 3186 && test_bit(R5_UPTODATE, &pdev->flags)))) && 3187 (s.q_failed || ((test_bit(R5_Insync, &qdev->flags) 3188 && !test_bit(R5_LOCKED, &qdev->flags) 3189 && test_bit(R5_UPTODATE, &qdev->flags))))) 3190 handle_stripe_clean_event(conf, sh, disks, &s.return_bi); 3191 3192 /* Now we might consider reading some blocks, either to check/generate 3193 * parity, or to satisfy requests 3194 * or to load a block that is being partially written. 3195 */ 3196 if (s.to_read || s.non_overwrite 3197 || (conf->level == 6 && s.to_write && s.failed) 3198 || (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding) 3199 handle_stripe_fill(sh, &s, disks); 3200 3201 /* Now we check to see if any write operations have recently 3202 * completed 3203 */ 3204 prexor = 0; 3205 if (sh->reconstruct_state == reconstruct_state_prexor_drain_result) 3206 prexor = 1; 3207 if (sh->reconstruct_state == reconstruct_state_drain_result || 3208 sh->reconstruct_state == reconstruct_state_prexor_drain_result) { 3209 sh->reconstruct_state = reconstruct_state_idle; 3210 3211 /* All the 'written' buffers and the parity block are ready to 3212 * be written back to disk 3213 */ 3214 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags)); 3215 BUG_ON(sh->qd_idx >= 0 && 3216 !test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags)); 3217 for (i = disks; i--; ) { 3218 struct r5dev *dev = &sh->dev[i]; 3219 if (test_bit(R5_LOCKED, &dev->flags) && 3220 (i == sh->pd_idx || i == sh->qd_idx || 3221 dev->written)) { 3222 pr_debug("Writing block %d\n", i); 3223 set_bit(R5_Wantwrite, &dev->flags); 3224 if (prexor) 3225 continue; 3226 if (!test_bit(R5_Insync, &dev->flags) || 3227 ((i == sh->pd_idx || i == sh->qd_idx) && 3228 s.failed == 0)) 3229 set_bit(STRIPE_INSYNC, &sh->state); 3230 } 3231 } 3232 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 3233 s.dec_preread_active = 1; 3234 } 3235 3236 /* Now to consider new write requests and what else, if anything 3237 * should be read. We do not handle new writes when: 3238 * 1/ A 'write' operation (copy+xor) is already in flight. 3239 * 2/ A 'check' operation is in flight, as it may clobber the parity 3240 * block. 3241 */ 3242 if (s.to_write && !sh->reconstruct_state && !sh->check_state) 3243 handle_stripe_dirtying(conf, sh, &s, disks); 3244 3245 /* maybe we need to check and possibly fix the parity for this stripe 3246 * Any reads will already have been scheduled, so we just see if enough 3247 * data is available. The parity check is held off while parity 3248 * dependent operations are in flight. 3249 */ 3250 if (sh->check_state || 3251 (s.syncing && s.locked == 0 && 3252 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && 3253 !test_bit(STRIPE_INSYNC, &sh->state))) { 3254 if (conf->level == 6) 3255 handle_parity_checks6(conf, sh, &s, disks); 3256 else 3257 handle_parity_checks5(conf, sh, &s, disks); 3258 } 3259 3260 if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) { 3261 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); 3262 clear_bit(STRIPE_SYNCING, &sh->state); 3263 } 3264 3265 /* If the failed drives are just a ReadError, then we might need 3266 * to progress the repair/check process 3267 */ 3268 if (s.failed <= conf->max_degraded && !conf->mddev->ro) 3269 for (i = 0; i < s.failed; i++) { 3270 struct r5dev *dev = &sh->dev[s.failed_num[i]]; 3271 if (test_bit(R5_ReadError, &dev->flags) 3272 && !test_bit(R5_LOCKED, &dev->flags) 3273 && test_bit(R5_UPTODATE, &dev->flags) 3274 ) { 3275 if (!test_bit(R5_ReWrite, &dev->flags)) { 3276 set_bit(R5_Wantwrite, &dev->flags); 3277 set_bit(R5_ReWrite, &dev->flags); 3278 set_bit(R5_LOCKED, &dev->flags); 3279 s.locked++; 3280 } else { 3281 /* let's read it back */ 3282 set_bit(R5_Wantread, &dev->flags); 3283 set_bit(R5_LOCKED, &dev->flags); 3284 s.locked++; 3285 } 3286 } 3287 } 3288 3289 3290 /* Finish reconstruct operations initiated by the expansion process */ 3291 if (sh->reconstruct_state == reconstruct_state_result) { 3292 struct stripe_head *sh_src 3293 = get_active_stripe(conf, sh->sector, 1, 1, 1); 3294 if (sh_src && test_bit(STRIPE_EXPAND_SOURCE, &sh_src->state)) { 3295 /* sh cannot be written until sh_src has been read. 3296 * so arrange for sh to be delayed a little 3297 */ 3298 set_bit(STRIPE_DELAYED, &sh->state); 3299 set_bit(STRIPE_HANDLE, &sh->state); 3300 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, 3301 &sh_src->state)) 3302 atomic_inc(&conf->preread_active_stripes); 3303 release_stripe(sh_src); 3304 goto finish; 3305 } 3306 if (sh_src) 3307 release_stripe(sh_src); 3308 3309 sh->reconstruct_state = reconstruct_state_idle; 3310 clear_bit(STRIPE_EXPANDING, &sh->state); 3311 for (i = conf->raid_disks; i--; ) { 3312 set_bit(R5_Wantwrite, &sh->dev[i].flags); 3313 set_bit(R5_LOCKED, &sh->dev[i].flags); 3314 s.locked++; 3315 } 3316 } 3317 3318 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) && 3319 !sh->reconstruct_state) { 3320 /* Need to write out all blocks after computing parity */ 3321 sh->disks = conf->raid_disks; 3322 stripe_set_idx(sh->sector, conf, 0, sh); 3323 schedule_reconstruction(sh, &s, 1, 1); 3324 } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) { 3325 clear_bit(STRIPE_EXPAND_READY, &sh->state); 3326 atomic_dec(&conf->reshape_stripes); 3327 wake_up(&conf->wait_for_overlap); 3328 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); 3329 } 3330 3331 if (s.expanding && s.locked == 0 && 3332 !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) 3333 handle_stripe_expansion(conf, sh); 3334 3335 finish: 3336 /* wait for this device to become unblocked */ 3337 if (conf->mddev->external && unlikely(s.blocked_rdev)) 3338 md_wait_for_blocked_rdev(s.blocked_rdev, conf->mddev); 3339 3340 if (s.handle_bad_blocks) 3341 for (i = disks; i--; ) { 3342 struct md_rdev *rdev; 3343 struct r5dev *dev = &sh->dev[i]; 3344 if (test_and_clear_bit(R5_WriteError, &dev->flags)) { 3345 /* We own a safe reference to the rdev */ 3346 rdev = conf->disks[i].rdev; 3347 if (!rdev_set_badblocks(rdev, sh->sector, 3348 STRIPE_SECTORS, 0)) 3349 md_error(conf->mddev, rdev); 3350 rdev_dec_pending(rdev, conf->mddev); 3351 } 3352 if (test_and_clear_bit(R5_MadeGood, &dev->flags)) { 3353 rdev = conf->disks[i].rdev; 3354 rdev_clear_badblocks(rdev, sh->sector, 3355 STRIPE_SECTORS); 3356 rdev_dec_pending(rdev, conf->mddev); 3357 } 3358 } 3359 3360 if (s.ops_request) 3361 raid_run_ops(sh, s.ops_request); 3362 3363 ops_run_io(sh, &s); 3364 3365 if (s.dec_preread_active) { 3366 /* We delay this until after ops_run_io so that if make_request 3367 * is waiting on a flush, it won't continue until the writes 3368 * have actually been submitted. 3369 */ 3370 atomic_dec(&conf->preread_active_stripes); 3371 if (atomic_read(&conf->preread_active_stripes) < 3372 IO_THRESHOLD) 3373 md_wakeup_thread(conf->mddev->thread); 3374 } 3375 3376 return_io(s.return_bi); 3377 3378 clear_bit_unlock(STRIPE_ACTIVE, &sh->state); 3379 } 3380 3381 static void raid5_activate_delayed(struct r5conf *conf) 3382 { 3383 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) { 3384 while (!list_empty(&conf->delayed_list)) { 3385 struct list_head *l = conf->delayed_list.next; 3386 struct stripe_head *sh; 3387 sh = list_entry(l, struct stripe_head, lru); 3388 list_del_init(l); 3389 clear_bit(STRIPE_DELAYED, &sh->state); 3390 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 3391 atomic_inc(&conf->preread_active_stripes); 3392 list_add_tail(&sh->lru, &conf->hold_list); 3393 } 3394 } 3395 } 3396 3397 static void activate_bit_delay(struct r5conf *conf) 3398 { 3399 /* device_lock is held */ 3400 struct list_head head; 3401 list_add(&head, &conf->bitmap_list); 3402 list_del_init(&conf->bitmap_list); 3403 while (!list_empty(&head)) { 3404 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru); 3405 list_del_init(&sh->lru); 3406 atomic_inc(&sh->count); 3407 __release_stripe(conf, sh); 3408 } 3409 } 3410 3411 int md_raid5_congested(struct mddev *mddev, int bits) 3412 { 3413 struct r5conf *conf = mddev->private; 3414 3415 /* No difference between reads and writes. Just check 3416 * how busy the stripe_cache is 3417 */ 3418 3419 if (conf->inactive_blocked) 3420 return 1; 3421 if (conf->quiesce) 3422 return 1; 3423 if (list_empty_careful(&conf->inactive_list)) 3424 return 1; 3425 3426 return 0; 3427 } 3428 EXPORT_SYMBOL_GPL(md_raid5_congested); 3429 3430 static int raid5_congested(void *data, int bits) 3431 { 3432 struct mddev *mddev = data; 3433 3434 return mddev_congested(mddev, bits) || 3435 md_raid5_congested(mddev, bits); 3436 } 3437 3438 /* We want read requests to align with chunks where possible, 3439 * but write requests don't need to. 3440 */ 3441 static int raid5_mergeable_bvec(struct request_queue *q, 3442 struct bvec_merge_data *bvm, 3443 struct bio_vec *biovec) 3444 { 3445 struct mddev *mddev = q->queuedata; 3446 sector_t sector = bvm->bi_sector + get_start_sect(bvm->bi_bdev); 3447 int max; 3448 unsigned int chunk_sectors = mddev->chunk_sectors; 3449 unsigned int bio_sectors = bvm->bi_size >> 9; 3450 3451 if ((bvm->bi_rw & 1) == WRITE) 3452 return biovec->bv_len; /* always allow writes to be mergeable */ 3453 3454 if (mddev->new_chunk_sectors < mddev->chunk_sectors) 3455 chunk_sectors = mddev->new_chunk_sectors; 3456 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; 3457 if (max < 0) max = 0; 3458 if (max <= biovec->bv_len && bio_sectors == 0) 3459 return biovec->bv_len; 3460 else 3461 return max; 3462 } 3463 3464 3465 static int in_chunk_boundary(struct mddev *mddev, struct bio *bio) 3466 { 3467 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); 3468 unsigned int chunk_sectors = mddev->chunk_sectors; 3469 unsigned int bio_sectors = bio->bi_size >> 9; 3470 3471 if (mddev->new_chunk_sectors < mddev->chunk_sectors) 3472 chunk_sectors = mddev->new_chunk_sectors; 3473 return chunk_sectors >= 3474 ((sector & (chunk_sectors - 1)) + bio_sectors); 3475 } 3476 3477 /* 3478 * add bio to the retry LIFO ( in O(1) ... we are in interrupt ) 3479 * later sampled by raid5d. 3480 */ 3481 static void add_bio_to_retry(struct bio *bi,struct r5conf *conf) 3482 { 3483 unsigned long flags; 3484 3485 spin_lock_irqsave(&conf->device_lock, flags); 3486 3487 bi->bi_next = conf->retry_read_aligned_list; 3488 conf->retry_read_aligned_list = bi; 3489 3490 spin_unlock_irqrestore(&conf->device_lock, flags); 3491 md_wakeup_thread(conf->mddev->thread); 3492 } 3493 3494 3495 static struct bio *remove_bio_from_retry(struct r5conf *conf) 3496 { 3497 struct bio *bi; 3498 3499 bi = conf->retry_read_aligned; 3500 if (bi) { 3501 conf->retry_read_aligned = NULL; 3502 return bi; 3503 } 3504 bi = conf->retry_read_aligned_list; 3505 if(bi) { 3506 conf->retry_read_aligned_list = bi->bi_next; 3507 bi->bi_next = NULL; 3508 /* 3509 * this sets the active strip count to 1 and the processed 3510 * strip count to zero (upper 8 bits) 3511 */ 3512 bi->bi_phys_segments = 1; /* biased count of active stripes */ 3513 } 3514 3515 return bi; 3516 } 3517 3518 3519 /* 3520 * The "raid5_align_endio" should check if the read succeeded and if it 3521 * did, call bio_endio on the original bio (having bio_put the new bio 3522 * first). 3523 * If the read failed.. 3524 */ 3525 static void raid5_align_endio(struct bio *bi, int error) 3526 { 3527 struct bio* raid_bi = bi->bi_private; 3528 struct mddev *mddev; 3529 struct r5conf *conf; 3530 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 3531 struct md_rdev *rdev; 3532 3533 bio_put(bi); 3534 3535 rdev = (void*)raid_bi->bi_next; 3536 raid_bi->bi_next = NULL; 3537 mddev = rdev->mddev; 3538 conf = mddev->private; 3539 3540 rdev_dec_pending(rdev, conf->mddev); 3541 3542 if (!error && uptodate) { 3543 bio_endio(raid_bi, 0); 3544 if (atomic_dec_and_test(&conf->active_aligned_reads)) 3545 wake_up(&conf->wait_for_stripe); 3546 return; 3547 } 3548 3549 3550 pr_debug("raid5_align_endio : io error...handing IO for a retry\n"); 3551 3552 add_bio_to_retry(raid_bi, conf); 3553 } 3554 3555 static int bio_fits_rdev(struct bio *bi) 3556 { 3557 struct request_queue *q = bdev_get_queue(bi->bi_bdev); 3558 3559 if ((bi->bi_size>>9) > queue_max_sectors(q)) 3560 return 0; 3561 blk_recount_segments(q, bi); 3562 if (bi->bi_phys_segments > queue_max_segments(q)) 3563 return 0; 3564 3565 if (q->merge_bvec_fn) 3566 /* it's too hard to apply the merge_bvec_fn at this stage, 3567 * just just give up 3568 */ 3569 return 0; 3570 3571 return 1; 3572 } 3573 3574 3575 static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio) 3576 { 3577 struct r5conf *conf = mddev->private; 3578 int dd_idx; 3579 struct bio* align_bi; 3580 struct md_rdev *rdev; 3581 3582 if (!in_chunk_boundary(mddev, raid_bio)) { 3583 pr_debug("chunk_aligned_read : non aligned\n"); 3584 return 0; 3585 } 3586 /* 3587 * use bio_clone_mddev to make a copy of the bio 3588 */ 3589 align_bi = bio_clone_mddev(raid_bio, GFP_NOIO, mddev); 3590 if (!align_bi) 3591 return 0; 3592 /* 3593 * set bi_end_io to a new function, and set bi_private to the 3594 * original bio. 3595 */ 3596 align_bi->bi_end_io = raid5_align_endio; 3597 align_bi->bi_private = raid_bio; 3598 /* 3599 * compute position 3600 */ 3601 align_bi->bi_sector = raid5_compute_sector(conf, raid_bio->bi_sector, 3602 0, 3603 &dd_idx, NULL); 3604 3605 rcu_read_lock(); 3606 rdev = rcu_dereference(conf->disks[dd_idx].rdev); 3607 if (rdev && test_bit(In_sync, &rdev->flags)) { 3608 sector_t first_bad; 3609 int bad_sectors; 3610 3611 atomic_inc(&rdev->nr_pending); 3612 rcu_read_unlock(); 3613 raid_bio->bi_next = (void*)rdev; 3614 align_bi->bi_bdev = rdev->bdev; 3615 align_bi->bi_flags &= ~(1 << BIO_SEG_VALID); 3616 align_bi->bi_sector += rdev->data_offset; 3617 3618 if (!bio_fits_rdev(align_bi) || 3619 is_badblock(rdev, align_bi->bi_sector, align_bi->bi_size>>9, 3620 &first_bad, &bad_sectors)) { 3621 /* too big in some way, or has a known bad block */ 3622 bio_put(align_bi); 3623 rdev_dec_pending(rdev, mddev); 3624 return 0; 3625 } 3626 3627 spin_lock_irq(&conf->device_lock); 3628 wait_event_lock_irq(conf->wait_for_stripe, 3629 conf->quiesce == 0, 3630 conf->device_lock, /* nothing */); 3631 atomic_inc(&conf->active_aligned_reads); 3632 spin_unlock_irq(&conf->device_lock); 3633 3634 generic_make_request(align_bi); 3635 return 1; 3636 } else { 3637 rcu_read_unlock(); 3638 bio_put(align_bi); 3639 return 0; 3640 } 3641 } 3642 3643 /* __get_priority_stripe - get the next stripe to process 3644 * 3645 * Full stripe writes are allowed to pass preread active stripes up until 3646 * the bypass_threshold is exceeded. In general the bypass_count 3647 * increments when the handle_list is handled before the hold_list; however, it 3648 * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a 3649 * stripe with in flight i/o. The bypass_count will be reset when the 3650 * head of the hold_list has changed, i.e. the head was promoted to the 3651 * handle_list. 3652 */ 3653 static struct stripe_head *__get_priority_stripe(struct r5conf *conf) 3654 { 3655 struct stripe_head *sh; 3656 3657 pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n", 3658 __func__, 3659 list_empty(&conf->handle_list) ? "empty" : "busy", 3660 list_empty(&conf->hold_list) ? "empty" : "busy", 3661 atomic_read(&conf->pending_full_writes), conf->bypass_count); 3662 3663 if (!list_empty(&conf->handle_list)) { 3664 sh = list_entry(conf->handle_list.next, typeof(*sh), lru); 3665 3666 if (list_empty(&conf->hold_list)) 3667 conf->bypass_count = 0; 3668 else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) { 3669 if (conf->hold_list.next == conf->last_hold) 3670 conf->bypass_count++; 3671 else { 3672 conf->last_hold = conf->hold_list.next; 3673 conf->bypass_count -= conf->bypass_threshold; 3674 if (conf->bypass_count < 0) 3675 conf->bypass_count = 0; 3676 } 3677 } 3678 } else if (!list_empty(&conf->hold_list) && 3679 ((conf->bypass_threshold && 3680 conf->bypass_count > conf->bypass_threshold) || 3681 atomic_read(&conf->pending_full_writes) == 0)) { 3682 sh = list_entry(conf->hold_list.next, 3683 typeof(*sh), lru); 3684 conf->bypass_count -= conf->bypass_threshold; 3685 if (conf->bypass_count < 0) 3686 conf->bypass_count = 0; 3687 } else 3688 return NULL; 3689 3690 list_del_init(&sh->lru); 3691 atomic_inc(&sh->count); 3692 BUG_ON(atomic_read(&sh->count) != 1); 3693 return sh; 3694 } 3695 3696 static void make_request(struct mddev *mddev, struct bio * bi) 3697 { 3698 struct r5conf *conf = mddev->private; 3699 int dd_idx; 3700 sector_t new_sector; 3701 sector_t logical_sector, last_sector; 3702 struct stripe_head *sh; 3703 const int rw = bio_data_dir(bi); 3704 int remaining; 3705 int plugged; 3706 3707 if (unlikely(bi->bi_rw & REQ_FLUSH)) { 3708 md_flush_request(mddev, bi); 3709 return; 3710 } 3711 3712 md_write_start(mddev, bi); 3713 3714 if (rw == READ && 3715 mddev->reshape_position == MaxSector && 3716 chunk_aligned_read(mddev,bi)) 3717 return; 3718 3719 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 3720 last_sector = bi->bi_sector + (bi->bi_size>>9); 3721 bi->bi_next = NULL; 3722 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ 3723 3724 plugged = mddev_check_plugged(mddev); 3725 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) { 3726 DEFINE_WAIT(w); 3727 int disks, data_disks; 3728 int previous; 3729 3730 retry: 3731 previous = 0; 3732 disks = conf->raid_disks; 3733 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); 3734 if (unlikely(conf->reshape_progress != MaxSector)) { 3735 /* spinlock is needed as reshape_progress may be 3736 * 64bit on a 32bit platform, and so it might be 3737 * possible to see a half-updated value 3738 * Of course reshape_progress could change after 3739 * the lock is dropped, so once we get a reference 3740 * to the stripe that we think it is, we will have 3741 * to check again. 3742 */ 3743 spin_lock_irq(&conf->device_lock); 3744 if (mddev->delta_disks < 0 3745 ? logical_sector < conf->reshape_progress 3746 : logical_sector >= conf->reshape_progress) { 3747 disks = conf->previous_raid_disks; 3748 previous = 1; 3749 } else { 3750 if (mddev->delta_disks < 0 3751 ? logical_sector < conf->reshape_safe 3752 : logical_sector >= conf->reshape_safe) { 3753 spin_unlock_irq(&conf->device_lock); 3754 schedule(); 3755 goto retry; 3756 } 3757 } 3758 spin_unlock_irq(&conf->device_lock); 3759 } 3760 data_disks = disks - conf->max_degraded; 3761 3762 new_sector = raid5_compute_sector(conf, logical_sector, 3763 previous, 3764 &dd_idx, NULL); 3765 pr_debug("raid456: make_request, sector %llu logical %llu\n", 3766 (unsigned long long)new_sector, 3767 (unsigned long long)logical_sector); 3768 3769 sh = get_active_stripe(conf, new_sector, previous, 3770 (bi->bi_rw&RWA_MASK), 0); 3771 if (sh) { 3772 if (unlikely(previous)) { 3773 /* expansion might have moved on while waiting for a 3774 * stripe, so we must do the range check again. 3775 * Expansion could still move past after this 3776 * test, but as we are holding a reference to 3777 * 'sh', we know that if that happens, 3778 * STRIPE_EXPANDING will get set and the expansion 3779 * won't proceed until we finish with the stripe. 3780 */ 3781 int must_retry = 0; 3782 spin_lock_irq(&conf->device_lock); 3783 if (mddev->delta_disks < 0 3784 ? logical_sector >= conf->reshape_progress 3785 : logical_sector < conf->reshape_progress) 3786 /* mismatch, need to try again */ 3787 must_retry = 1; 3788 spin_unlock_irq(&conf->device_lock); 3789 if (must_retry) { 3790 release_stripe(sh); 3791 schedule(); 3792 goto retry; 3793 } 3794 } 3795 3796 if (rw == WRITE && 3797 logical_sector >= mddev->suspend_lo && 3798 logical_sector < mddev->suspend_hi) { 3799 release_stripe(sh); 3800 /* As the suspend_* range is controlled by 3801 * userspace, we want an interruptible 3802 * wait. 3803 */ 3804 flush_signals(current); 3805 prepare_to_wait(&conf->wait_for_overlap, 3806 &w, TASK_INTERRUPTIBLE); 3807 if (logical_sector >= mddev->suspend_lo && 3808 logical_sector < mddev->suspend_hi) 3809 schedule(); 3810 goto retry; 3811 } 3812 3813 if (test_bit(STRIPE_EXPANDING, &sh->state) || 3814 !add_stripe_bio(sh, bi, dd_idx, rw)) { 3815 /* Stripe is busy expanding or 3816 * add failed due to overlap. Flush everything 3817 * and wait a while 3818 */ 3819 md_wakeup_thread(mddev->thread); 3820 release_stripe(sh); 3821 schedule(); 3822 goto retry; 3823 } 3824 finish_wait(&conf->wait_for_overlap, &w); 3825 set_bit(STRIPE_HANDLE, &sh->state); 3826 clear_bit(STRIPE_DELAYED, &sh->state); 3827 if ((bi->bi_rw & REQ_SYNC) && 3828 !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 3829 atomic_inc(&conf->preread_active_stripes); 3830 release_stripe(sh); 3831 } else { 3832 /* cannot get stripe for read-ahead, just give-up */ 3833 clear_bit(BIO_UPTODATE, &bi->bi_flags); 3834 finish_wait(&conf->wait_for_overlap, &w); 3835 break; 3836 } 3837 3838 } 3839 if (!plugged) 3840 md_wakeup_thread(mddev->thread); 3841 3842 spin_lock_irq(&conf->device_lock); 3843 remaining = raid5_dec_bi_phys_segments(bi); 3844 spin_unlock_irq(&conf->device_lock); 3845 if (remaining == 0) { 3846 3847 if ( rw == WRITE ) 3848 md_write_end(mddev); 3849 3850 bio_endio(bi, 0); 3851 } 3852 } 3853 3854 static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks); 3855 3856 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *skipped) 3857 { 3858 /* reshaping is quite different to recovery/resync so it is 3859 * handled quite separately ... here. 3860 * 3861 * On each call to sync_request, we gather one chunk worth of 3862 * destination stripes and flag them as expanding. 3863 * Then we find all the source stripes and request reads. 3864 * As the reads complete, handle_stripe will copy the data 3865 * into the destination stripe and release that stripe. 3866 */ 3867 struct r5conf *conf = mddev->private; 3868 struct stripe_head *sh; 3869 sector_t first_sector, last_sector; 3870 int raid_disks = conf->previous_raid_disks; 3871 int data_disks = raid_disks - conf->max_degraded; 3872 int new_data_disks = conf->raid_disks - conf->max_degraded; 3873 int i; 3874 int dd_idx; 3875 sector_t writepos, readpos, safepos; 3876 sector_t stripe_addr; 3877 int reshape_sectors; 3878 struct list_head stripes; 3879 3880 if (sector_nr == 0) { 3881 /* If restarting in the middle, skip the initial sectors */ 3882 if (mddev->delta_disks < 0 && 3883 conf->reshape_progress < raid5_size(mddev, 0, 0)) { 3884 sector_nr = raid5_size(mddev, 0, 0) 3885 - conf->reshape_progress; 3886 } else if (mddev->delta_disks >= 0 && 3887 conf->reshape_progress > 0) 3888 sector_nr = conf->reshape_progress; 3889 sector_div(sector_nr, new_data_disks); 3890 if (sector_nr) { 3891 mddev->curr_resync_completed = sector_nr; 3892 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 3893 *skipped = 1; 3894 return sector_nr; 3895 } 3896 } 3897 3898 /* We need to process a full chunk at a time. 3899 * If old and new chunk sizes differ, we need to process the 3900 * largest of these 3901 */ 3902 if (mddev->new_chunk_sectors > mddev->chunk_sectors) 3903 reshape_sectors = mddev->new_chunk_sectors; 3904 else 3905 reshape_sectors = mddev->chunk_sectors; 3906 3907 /* we update the metadata when there is more than 3Meg 3908 * in the block range (that is rather arbitrary, should 3909 * probably be time based) or when the data about to be 3910 * copied would over-write the source of the data at 3911 * the front of the range. 3912 * i.e. one new_stripe along from reshape_progress new_maps 3913 * to after where reshape_safe old_maps to 3914 */ 3915 writepos = conf->reshape_progress; 3916 sector_div(writepos, new_data_disks); 3917 readpos = conf->reshape_progress; 3918 sector_div(readpos, data_disks); 3919 safepos = conf->reshape_safe; 3920 sector_div(safepos, data_disks); 3921 if (mddev->delta_disks < 0) { 3922 writepos -= min_t(sector_t, reshape_sectors, writepos); 3923 readpos += reshape_sectors; 3924 safepos += reshape_sectors; 3925 } else { 3926 writepos += reshape_sectors; 3927 readpos -= min_t(sector_t, reshape_sectors, readpos); 3928 safepos -= min_t(sector_t, reshape_sectors, safepos); 3929 } 3930 3931 /* 'writepos' is the most advanced device address we might write. 3932 * 'readpos' is the least advanced device address we might read. 3933 * 'safepos' is the least address recorded in the metadata as having 3934 * been reshaped. 3935 * If 'readpos' is behind 'writepos', then there is no way that we can 3936 * ensure safety in the face of a crash - that must be done by userspace 3937 * making a backup of the data. So in that case there is no particular 3938 * rush to update metadata. 3939 * Otherwise if 'safepos' is behind 'writepos', then we really need to 3940 * update the metadata to advance 'safepos' to match 'readpos' so that 3941 * we can be safe in the event of a crash. 3942 * So we insist on updating metadata if safepos is behind writepos and 3943 * readpos is beyond writepos. 3944 * In any case, update the metadata every 10 seconds. 3945 * Maybe that number should be configurable, but I'm not sure it is 3946 * worth it.... maybe it could be a multiple of safemode_delay??? 3947 */ 3948 if ((mddev->delta_disks < 0 3949 ? (safepos > writepos && readpos < writepos) 3950 : (safepos < writepos && readpos > writepos)) || 3951 time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) { 3952 /* Cannot proceed until we've updated the superblock... */ 3953 wait_event(conf->wait_for_overlap, 3954 atomic_read(&conf->reshape_stripes)==0); 3955 mddev->reshape_position = conf->reshape_progress; 3956 mddev->curr_resync_completed = sector_nr; 3957 conf->reshape_checkpoint = jiffies; 3958 set_bit(MD_CHANGE_DEVS, &mddev->flags); 3959 md_wakeup_thread(mddev->thread); 3960 wait_event(mddev->sb_wait, mddev->flags == 0 || 3961 kthread_should_stop()); 3962 spin_lock_irq(&conf->device_lock); 3963 conf->reshape_safe = mddev->reshape_position; 3964 spin_unlock_irq(&conf->device_lock); 3965 wake_up(&conf->wait_for_overlap); 3966 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 3967 } 3968 3969 if (mddev->delta_disks < 0) { 3970 BUG_ON(conf->reshape_progress == 0); 3971 stripe_addr = writepos; 3972 BUG_ON((mddev->dev_sectors & 3973 ~((sector_t)reshape_sectors - 1)) 3974 - reshape_sectors - stripe_addr 3975 != sector_nr); 3976 } else { 3977 BUG_ON(writepos != sector_nr + reshape_sectors); 3978 stripe_addr = sector_nr; 3979 } 3980 INIT_LIST_HEAD(&stripes); 3981 for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) { 3982 int j; 3983 int skipped_disk = 0; 3984 sh = get_active_stripe(conf, stripe_addr+i, 0, 0, 1); 3985 set_bit(STRIPE_EXPANDING, &sh->state); 3986 atomic_inc(&conf->reshape_stripes); 3987 /* If any of this stripe is beyond the end of the old 3988 * array, then we need to zero those blocks 3989 */ 3990 for (j=sh->disks; j--;) { 3991 sector_t s; 3992 if (j == sh->pd_idx) 3993 continue; 3994 if (conf->level == 6 && 3995 j == sh->qd_idx) 3996 continue; 3997 s = compute_blocknr(sh, j, 0); 3998 if (s < raid5_size(mddev, 0, 0)) { 3999 skipped_disk = 1; 4000 continue; 4001 } 4002 memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE); 4003 set_bit(R5_Expanded, &sh->dev[j].flags); 4004 set_bit(R5_UPTODATE, &sh->dev[j].flags); 4005 } 4006 if (!skipped_disk) { 4007 set_bit(STRIPE_EXPAND_READY, &sh->state); 4008 set_bit(STRIPE_HANDLE, &sh->state); 4009 } 4010 list_add(&sh->lru, &stripes); 4011 } 4012 spin_lock_irq(&conf->device_lock); 4013 if (mddev->delta_disks < 0) 4014 conf->reshape_progress -= reshape_sectors * new_data_disks; 4015 else 4016 conf->reshape_progress += reshape_sectors * new_data_disks; 4017 spin_unlock_irq(&conf->device_lock); 4018 /* Ok, those stripe are ready. We can start scheduling 4019 * reads on the source stripes. 4020 * The source stripes are determined by mapping the first and last 4021 * block on the destination stripes. 4022 */ 4023 first_sector = 4024 raid5_compute_sector(conf, stripe_addr*(new_data_disks), 4025 1, &dd_idx, NULL); 4026 last_sector = 4027 raid5_compute_sector(conf, ((stripe_addr+reshape_sectors) 4028 * new_data_disks - 1), 4029 1, &dd_idx, NULL); 4030 if (last_sector >= mddev->dev_sectors) 4031 last_sector = mddev->dev_sectors - 1; 4032 while (first_sector <= last_sector) { 4033 sh = get_active_stripe(conf, first_sector, 1, 0, 1); 4034 set_bit(STRIPE_EXPAND_SOURCE, &sh->state); 4035 set_bit(STRIPE_HANDLE, &sh->state); 4036 release_stripe(sh); 4037 first_sector += STRIPE_SECTORS; 4038 } 4039 /* Now that the sources are clearly marked, we can release 4040 * the destination stripes 4041 */ 4042 while (!list_empty(&stripes)) { 4043 sh = list_entry(stripes.next, struct stripe_head, lru); 4044 list_del_init(&sh->lru); 4045 release_stripe(sh); 4046 } 4047 /* If this takes us to the resync_max point where we have to pause, 4048 * then we need to write out the superblock. 4049 */ 4050 sector_nr += reshape_sectors; 4051 if ((sector_nr - mddev->curr_resync_completed) * 2 4052 >= mddev->resync_max - mddev->curr_resync_completed) { 4053 /* Cannot proceed until we've updated the superblock... */ 4054 wait_event(conf->wait_for_overlap, 4055 atomic_read(&conf->reshape_stripes) == 0); 4056 mddev->reshape_position = conf->reshape_progress; 4057 mddev->curr_resync_completed = sector_nr; 4058 conf->reshape_checkpoint = jiffies; 4059 set_bit(MD_CHANGE_DEVS, &mddev->flags); 4060 md_wakeup_thread(mddev->thread); 4061 wait_event(mddev->sb_wait, 4062 !test_bit(MD_CHANGE_DEVS, &mddev->flags) 4063 || kthread_should_stop()); 4064 spin_lock_irq(&conf->device_lock); 4065 conf->reshape_safe = mddev->reshape_position; 4066 spin_unlock_irq(&conf->device_lock); 4067 wake_up(&conf->wait_for_overlap); 4068 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 4069 } 4070 return reshape_sectors; 4071 } 4072 4073 /* FIXME go_faster isn't used */ 4074 static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped, int go_faster) 4075 { 4076 struct r5conf *conf = mddev->private; 4077 struct stripe_head *sh; 4078 sector_t max_sector = mddev->dev_sectors; 4079 sector_t sync_blocks; 4080 int still_degraded = 0; 4081 int i; 4082 4083 if (sector_nr >= max_sector) { 4084 /* just being told to finish up .. nothing much to do */ 4085 4086 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { 4087 end_reshape(conf); 4088 return 0; 4089 } 4090 4091 if (mddev->curr_resync < max_sector) /* aborted */ 4092 bitmap_end_sync(mddev->bitmap, mddev->curr_resync, 4093 &sync_blocks, 1); 4094 else /* completed sync */ 4095 conf->fullsync = 0; 4096 bitmap_close_sync(mddev->bitmap); 4097 4098 return 0; 4099 } 4100 4101 /* Allow raid5_quiesce to complete */ 4102 wait_event(conf->wait_for_overlap, conf->quiesce != 2); 4103 4104 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 4105 return reshape_request(mddev, sector_nr, skipped); 4106 4107 /* No need to check resync_max as we never do more than one 4108 * stripe, and as resync_max will always be on a chunk boundary, 4109 * if the check in md_do_sync didn't fire, there is no chance 4110 * of overstepping resync_max here 4111 */ 4112 4113 /* if there is too many failed drives and we are trying 4114 * to resync, then assert that we are finished, because there is 4115 * nothing we can do. 4116 */ 4117 if (mddev->degraded >= conf->max_degraded && 4118 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 4119 sector_t rv = mddev->dev_sectors - sector_nr; 4120 *skipped = 1; 4121 return rv; 4122 } 4123 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && 4124 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && 4125 !conf->fullsync && sync_blocks >= STRIPE_SECTORS) { 4126 /* we can skip this block, and probably more */ 4127 sync_blocks /= STRIPE_SECTORS; 4128 *skipped = 1; 4129 return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */ 4130 } 4131 4132 4133 bitmap_cond_end_sync(mddev->bitmap, sector_nr); 4134 4135 sh = get_active_stripe(conf, sector_nr, 0, 1, 0); 4136 if (sh == NULL) { 4137 sh = get_active_stripe(conf, sector_nr, 0, 0, 0); 4138 /* make sure we don't swamp the stripe cache if someone else 4139 * is trying to get access 4140 */ 4141 schedule_timeout_uninterruptible(1); 4142 } 4143 /* Need to check if array will still be degraded after recovery/resync 4144 * We don't need to check the 'failed' flag as when that gets set, 4145 * recovery aborts. 4146 */ 4147 for (i = 0; i < conf->raid_disks; i++) 4148 if (conf->disks[i].rdev == NULL) 4149 still_degraded = 1; 4150 4151 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded); 4152 4153 set_bit(STRIPE_SYNC_REQUESTED, &sh->state); 4154 4155 handle_stripe(sh); 4156 release_stripe(sh); 4157 4158 return STRIPE_SECTORS; 4159 } 4160 4161 static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio) 4162 { 4163 /* We may not be able to submit a whole bio at once as there 4164 * may not be enough stripe_heads available. 4165 * We cannot pre-allocate enough stripe_heads as we may need 4166 * more than exist in the cache (if we allow ever large chunks). 4167 * So we do one stripe head at a time and record in 4168 * ->bi_hw_segments how many have been done. 4169 * 4170 * We *know* that this entire raid_bio is in one chunk, so 4171 * it will be only one 'dd_idx' and only need one call to raid5_compute_sector. 4172 */ 4173 struct stripe_head *sh; 4174 int dd_idx; 4175 sector_t sector, logical_sector, last_sector; 4176 int scnt = 0; 4177 int remaining; 4178 int handled = 0; 4179 4180 logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 4181 sector = raid5_compute_sector(conf, logical_sector, 4182 0, &dd_idx, NULL); 4183 last_sector = raid_bio->bi_sector + (raid_bio->bi_size>>9); 4184 4185 for (; logical_sector < last_sector; 4186 logical_sector += STRIPE_SECTORS, 4187 sector += STRIPE_SECTORS, 4188 scnt++) { 4189 4190 if (scnt < raid5_bi_hw_segments(raid_bio)) 4191 /* already done this stripe */ 4192 continue; 4193 4194 sh = get_active_stripe(conf, sector, 0, 1, 0); 4195 4196 if (!sh) { 4197 /* failed to get a stripe - must wait */ 4198 raid5_set_bi_hw_segments(raid_bio, scnt); 4199 conf->retry_read_aligned = raid_bio; 4200 return handled; 4201 } 4202 4203 set_bit(R5_ReadError, &sh->dev[dd_idx].flags); 4204 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) { 4205 release_stripe(sh); 4206 raid5_set_bi_hw_segments(raid_bio, scnt); 4207 conf->retry_read_aligned = raid_bio; 4208 return handled; 4209 } 4210 4211 handle_stripe(sh); 4212 release_stripe(sh); 4213 handled++; 4214 } 4215 spin_lock_irq(&conf->device_lock); 4216 remaining = raid5_dec_bi_phys_segments(raid_bio); 4217 spin_unlock_irq(&conf->device_lock); 4218 if (remaining == 0) 4219 bio_endio(raid_bio, 0); 4220 if (atomic_dec_and_test(&conf->active_aligned_reads)) 4221 wake_up(&conf->wait_for_stripe); 4222 return handled; 4223 } 4224 4225 4226 /* 4227 * This is our raid5 kernel thread. 4228 * 4229 * We scan the hash table for stripes which can be handled now. 4230 * During the scan, completed stripes are saved for us by the interrupt 4231 * handler, so that they will not have to wait for our next wakeup. 4232 */ 4233 static void raid5d(struct mddev *mddev) 4234 { 4235 struct stripe_head *sh; 4236 struct r5conf *conf = mddev->private; 4237 int handled; 4238 struct blk_plug plug; 4239 4240 pr_debug("+++ raid5d active\n"); 4241 4242 md_check_recovery(mddev); 4243 4244 blk_start_plug(&plug); 4245 handled = 0; 4246 spin_lock_irq(&conf->device_lock); 4247 while (1) { 4248 struct bio *bio; 4249 4250 if (atomic_read(&mddev->plug_cnt) == 0 && 4251 !list_empty(&conf->bitmap_list)) { 4252 /* Now is a good time to flush some bitmap updates */ 4253 conf->seq_flush++; 4254 spin_unlock_irq(&conf->device_lock); 4255 bitmap_unplug(mddev->bitmap); 4256 spin_lock_irq(&conf->device_lock); 4257 conf->seq_write = conf->seq_flush; 4258 activate_bit_delay(conf); 4259 } 4260 if (atomic_read(&mddev->plug_cnt) == 0) 4261 raid5_activate_delayed(conf); 4262 4263 while ((bio = remove_bio_from_retry(conf))) { 4264 int ok; 4265 spin_unlock_irq(&conf->device_lock); 4266 ok = retry_aligned_read(conf, bio); 4267 spin_lock_irq(&conf->device_lock); 4268 if (!ok) 4269 break; 4270 handled++; 4271 } 4272 4273 sh = __get_priority_stripe(conf); 4274 4275 if (!sh) 4276 break; 4277 spin_unlock_irq(&conf->device_lock); 4278 4279 handled++; 4280 handle_stripe(sh); 4281 release_stripe(sh); 4282 cond_resched(); 4283 4284 if (mddev->flags & ~(1<<MD_CHANGE_PENDING)) 4285 md_check_recovery(mddev); 4286 4287 spin_lock_irq(&conf->device_lock); 4288 } 4289 pr_debug("%d stripes handled\n", handled); 4290 4291 spin_unlock_irq(&conf->device_lock); 4292 4293 async_tx_issue_pending_all(); 4294 blk_finish_plug(&plug); 4295 4296 pr_debug("--- raid5d inactive\n"); 4297 } 4298 4299 static ssize_t 4300 raid5_show_stripe_cache_size(struct mddev *mddev, char *page) 4301 { 4302 struct r5conf *conf = mddev->private; 4303 if (conf) 4304 return sprintf(page, "%d\n", conf->max_nr_stripes); 4305 else 4306 return 0; 4307 } 4308 4309 int 4310 raid5_set_cache_size(struct mddev *mddev, int size) 4311 { 4312 struct r5conf *conf = mddev->private; 4313 int err; 4314 4315 if (size <= 16 || size > 32768) 4316 return -EINVAL; 4317 while (size < conf->max_nr_stripes) { 4318 if (drop_one_stripe(conf)) 4319 conf->max_nr_stripes--; 4320 else 4321 break; 4322 } 4323 err = md_allow_write(mddev); 4324 if (err) 4325 return err; 4326 while (size > conf->max_nr_stripes) { 4327 if (grow_one_stripe(conf)) 4328 conf->max_nr_stripes++; 4329 else break; 4330 } 4331 return 0; 4332 } 4333 EXPORT_SYMBOL(raid5_set_cache_size); 4334 4335 static ssize_t 4336 raid5_store_stripe_cache_size(struct mddev *mddev, const char *page, size_t len) 4337 { 4338 struct r5conf *conf = mddev->private; 4339 unsigned long new; 4340 int err; 4341 4342 if (len >= PAGE_SIZE) 4343 return -EINVAL; 4344 if (!conf) 4345 return -ENODEV; 4346 4347 if (strict_strtoul(page, 10, &new)) 4348 return -EINVAL; 4349 err = raid5_set_cache_size(mddev, new); 4350 if (err) 4351 return err; 4352 return len; 4353 } 4354 4355 static struct md_sysfs_entry 4356 raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR, 4357 raid5_show_stripe_cache_size, 4358 raid5_store_stripe_cache_size); 4359 4360 static ssize_t 4361 raid5_show_preread_threshold(struct mddev *mddev, char *page) 4362 { 4363 struct r5conf *conf = mddev->private; 4364 if (conf) 4365 return sprintf(page, "%d\n", conf->bypass_threshold); 4366 else 4367 return 0; 4368 } 4369 4370 static ssize_t 4371 raid5_store_preread_threshold(struct mddev *mddev, const char *page, size_t len) 4372 { 4373 struct r5conf *conf = mddev->private; 4374 unsigned long new; 4375 if (len >= PAGE_SIZE) 4376 return -EINVAL; 4377 if (!conf) 4378 return -ENODEV; 4379 4380 if (strict_strtoul(page, 10, &new)) 4381 return -EINVAL; 4382 if (new > conf->max_nr_stripes) 4383 return -EINVAL; 4384 conf->bypass_threshold = new; 4385 return len; 4386 } 4387 4388 static struct md_sysfs_entry 4389 raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold, 4390 S_IRUGO | S_IWUSR, 4391 raid5_show_preread_threshold, 4392 raid5_store_preread_threshold); 4393 4394 static ssize_t 4395 stripe_cache_active_show(struct mddev *mddev, char *page) 4396 { 4397 struct r5conf *conf = mddev->private; 4398 if (conf) 4399 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes)); 4400 else 4401 return 0; 4402 } 4403 4404 static struct md_sysfs_entry 4405 raid5_stripecache_active = __ATTR_RO(stripe_cache_active); 4406 4407 static struct attribute *raid5_attrs[] = { 4408 &raid5_stripecache_size.attr, 4409 &raid5_stripecache_active.attr, 4410 &raid5_preread_bypass_threshold.attr, 4411 NULL, 4412 }; 4413 static struct attribute_group raid5_attrs_group = { 4414 .name = NULL, 4415 .attrs = raid5_attrs, 4416 }; 4417 4418 static sector_t 4419 raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks) 4420 { 4421 struct r5conf *conf = mddev->private; 4422 4423 if (!sectors) 4424 sectors = mddev->dev_sectors; 4425 if (!raid_disks) 4426 /* size is defined by the smallest of previous and new size */ 4427 raid_disks = min(conf->raid_disks, conf->previous_raid_disks); 4428 4429 sectors &= ~((sector_t)mddev->chunk_sectors - 1); 4430 sectors &= ~((sector_t)mddev->new_chunk_sectors - 1); 4431 return sectors * (raid_disks - conf->max_degraded); 4432 } 4433 4434 static void raid5_free_percpu(struct r5conf *conf) 4435 { 4436 struct raid5_percpu *percpu; 4437 unsigned long cpu; 4438 4439 if (!conf->percpu) 4440 return; 4441 4442 get_online_cpus(); 4443 for_each_possible_cpu(cpu) { 4444 percpu = per_cpu_ptr(conf->percpu, cpu); 4445 safe_put_page(percpu->spare_page); 4446 kfree(percpu->scribble); 4447 } 4448 #ifdef CONFIG_HOTPLUG_CPU 4449 unregister_cpu_notifier(&conf->cpu_notify); 4450 #endif 4451 put_online_cpus(); 4452 4453 free_percpu(conf->percpu); 4454 } 4455 4456 static void free_conf(struct r5conf *conf) 4457 { 4458 shrink_stripes(conf); 4459 raid5_free_percpu(conf); 4460 kfree(conf->disks); 4461 kfree(conf->stripe_hashtbl); 4462 kfree(conf); 4463 } 4464 4465 #ifdef CONFIG_HOTPLUG_CPU 4466 static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action, 4467 void *hcpu) 4468 { 4469 struct r5conf *conf = container_of(nfb, struct r5conf, cpu_notify); 4470 long cpu = (long)hcpu; 4471 struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu); 4472 4473 switch (action) { 4474 case CPU_UP_PREPARE: 4475 case CPU_UP_PREPARE_FROZEN: 4476 if (conf->level == 6 && !percpu->spare_page) 4477 percpu->spare_page = alloc_page(GFP_KERNEL); 4478 if (!percpu->scribble) 4479 percpu->scribble = kmalloc(conf->scribble_len, GFP_KERNEL); 4480 4481 if (!percpu->scribble || 4482 (conf->level == 6 && !percpu->spare_page)) { 4483 safe_put_page(percpu->spare_page); 4484 kfree(percpu->scribble); 4485 pr_err("%s: failed memory allocation for cpu%ld\n", 4486 __func__, cpu); 4487 return notifier_from_errno(-ENOMEM); 4488 } 4489 break; 4490 case CPU_DEAD: 4491 case CPU_DEAD_FROZEN: 4492 safe_put_page(percpu->spare_page); 4493 kfree(percpu->scribble); 4494 percpu->spare_page = NULL; 4495 percpu->scribble = NULL; 4496 break; 4497 default: 4498 break; 4499 } 4500 return NOTIFY_OK; 4501 } 4502 #endif 4503 4504 static int raid5_alloc_percpu(struct r5conf *conf) 4505 { 4506 unsigned long cpu; 4507 struct page *spare_page; 4508 struct raid5_percpu __percpu *allcpus; 4509 void *scribble; 4510 int err; 4511 4512 allcpus = alloc_percpu(struct raid5_percpu); 4513 if (!allcpus) 4514 return -ENOMEM; 4515 conf->percpu = allcpus; 4516 4517 get_online_cpus(); 4518 err = 0; 4519 for_each_present_cpu(cpu) { 4520 if (conf->level == 6) { 4521 spare_page = alloc_page(GFP_KERNEL); 4522 if (!spare_page) { 4523 err = -ENOMEM; 4524 break; 4525 } 4526 per_cpu_ptr(conf->percpu, cpu)->spare_page = spare_page; 4527 } 4528 scribble = kmalloc(conf->scribble_len, GFP_KERNEL); 4529 if (!scribble) { 4530 err = -ENOMEM; 4531 break; 4532 } 4533 per_cpu_ptr(conf->percpu, cpu)->scribble = scribble; 4534 } 4535 #ifdef CONFIG_HOTPLUG_CPU 4536 conf->cpu_notify.notifier_call = raid456_cpu_notify; 4537 conf->cpu_notify.priority = 0; 4538 if (err == 0) 4539 err = register_cpu_notifier(&conf->cpu_notify); 4540 #endif 4541 put_online_cpus(); 4542 4543 return err; 4544 } 4545 4546 static struct r5conf *setup_conf(struct mddev *mddev) 4547 { 4548 struct r5conf *conf; 4549 int raid_disk, memory, max_disks; 4550 struct md_rdev *rdev; 4551 struct disk_info *disk; 4552 4553 if (mddev->new_level != 5 4554 && mddev->new_level != 4 4555 && mddev->new_level != 6) { 4556 printk(KERN_ERR "md/raid:%s: raid level not set to 4/5/6 (%d)\n", 4557 mdname(mddev), mddev->new_level); 4558 return ERR_PTR(-EIO); 4559 } 4560 if ((mddev->new_level == 5 4561 && !algorithm_valid_raid5(mddev->new_layout)) || 4562 (mddev->new_level == 6 4563 && !algorithm_valid_raid6(mddev->new_layout))) { 4564 printk(KERN_ERR "md/raid:%s: layout %d not supported\n", 4565 mdname(mddev), mddev->new_layout); 4566 return ERR_PTR(-EIO); 4567 } 4568 if (mddev->new_level == 6 && mddev->raid_disks < 4) { 4569 printk(KERN_ERR "md/raid:%s: not enough configured devices (%d, minimum 4)\n", 4570 mdname(mddev), mddev->raid_disks); 4571 return ERR_PTR(-EINVAL); 4572 } 4573 4574 if (!mddev->new_chunk_sectors || 4575 (mddev->new_chunk_sectors << 9) % PAGE_SIZE || 4576 !is_power_of_2(mddev->new_chunk_sectors)) { 4577 printk(KERN_ERR "md/raid:%s: invalid chunk size %d\n", 4578 mdname(mddev), mddev->new_chunk_sectors << 9); 4579 return ERR_PTR(-EINVAL); 4580 } 4581 4582 conf = kzalloc(sizeof(struct r5conf), GFP_KERNEL); 4583 if (conf == NULL) 4584 goto abort; 4585 spin_lock_init(&conf->device_lock); 4586 init_waitqueue_head(&conf->wait_for_stripe); 4587 init_waitqueue_head(&conf->wait_for_overlap); 4588 INIT_LIST_HEAD(&conf->handle_list); 4589 INIT_LIST_HEAD(&conf->hold_list); 4590 INIT_LIST_HEAD(&conf->delayed_list); 4591 INIT_LIST_HEAD(&conf->bitmap_list); 4592 INIT_LIST_HEAD(&conf->inactive_list); 4593 atomic_set(&conf->active_stripes, 0); 4594 atomic_set(&conf->preread_active_stripes, 0); 4595 atomic_set(&conf->active_aligned_reads, 0); 4596 conf->bypass_threshold = BYPASS_THRESHOLD; 4597 conf->recovery_disabled = mddev->recovery_disabled - 1; 4598 4599 conf->raid_disks = mddev->raid_disks; 4600 if (mddev->reshape_position == MaxSector) 4601 conf->previous_raid_disks = mddev->raid_disks; 4602 else 4603 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks; 4604 max_disks = max(conf->raid_disks, conf->previous_raid_disks); 4605 conf->scribble_len = scribble_len(max_disks); 4606 4607 conf->disks = kzalloc(max_disks * sizeof(struct disk_info), 4608 GFP_KERNEL); 4609 if (!conf->disks) 4610 goto abort; 4611 4612 conf->mddev = mddev; 4613 4614 if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) 4615 goto abort; 4616 4617 conf->level = mddev->new_level; 4618 if (raid5_alloc_percpu(conf) != 0) 4619 goto abort; 4620 4621 pr_debug("raid456: run(%s) called.\n", mdname(mddev)); 4622 4623 list_for_each_entry(rdev, &mddev->disks, same_set) { 4624 raid_disk = rdev->raid_disk; 4625 if (raid_disk >= max_disks 4626 || raid_disk < 0) 4627 continue; 4628 disk = conf->disks + raid_disk; 4629 4630 disk->rdev = rdev; 4631 4632 if (test_bit(In_sync, &rdev->flags)) { 4633 char b[BDEVNAME_SIZE]; 4634 printk(KERN_INFO "md/raid:%s: device %s operational as raid" 4635 " disk %d\n", 4636 mdname(mddev), bdevname(rdev->bdev, b), raid_disk); 4637 } else if (rdev->saved_raid_disk != raid_disk) 4638 /* Cannot rely on bitmap to complete recovery */ 4639 conf->fullsync = 1; 4640 } 4641 4642 conf->chunk_sectors = mddev->new_chunk_sectors; 4643 conf->level = mddev->new_level; 4644 if (conf->level == 6) 4645 conf->max_degraded = 2; 4646 else 4647 conf->max_degraded = 1; 4648 conf->algorithm = mddev->new_layout; 4649 conf->max_nr_stripes = NR_STRIPES; 4650 conf->reshape_progress = mddev->reshape_position; 4651 if (conf->reshape_progress != MaxSector) { 4652 conf->prev_chunk_sectors = mddev->chunk_sectors; 4653 conf->prev_algo = mddev->layout; 4654 } 4655 4656 memory = conf->max_nr_stripes * (sizeof(struct stripe_head) + 4657 max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024; 4658 if (grow_stripes(conf, conf->max_nr_stripes)) { 4659 printk(KERN_ERR 4660 "md/raid:%s: couldn't allocate %dkB for buffers\n", 4661 mdname(mddev), memory); 4662 goto abort; 4663 } else 4664 printk(KERN_INFO "md/raid:%s: allocated %dkB\n", 4665 mdname(mddev), memory); 4666 4667 conf->thread = md_register_thread(raid5d, mddev, NULL); 4668 if (!conf->thread) { 4669 printk(KERN_ERR 4670 "md/raid:%s: couldn't allocate thread.\n", 4671 mdname(mddev)); 4672 goto abort; 4673 } 4674 4675 return conf; 4676 4677 abort: 4678 if (conf) { 4679 free_conf(conf); 4680 return ERR_PTR(-EIO); 4681 } else 4682 return ERR_PTR(-ENOMEM); 4683 } 4684 4685 4686 static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded) 4687 { 4688 switch (algo) { 4689 case ALGORITHM_PARITY_0: 4690 if (raid_disk < max_degraded) 4691 return 1; 4692 break; 4693 case ALGORITHM_PARITY_N: 4694 if (raid_disk >= raid_disks - max_degraded) 4695 return 1; 4696 break; 4697 case ALGORITHM_PARITY_0_6: 4698 if (raid_disk == 0 || 4699 raid_disk == raid_disks - 1) 4700 return 1; 4701 break; 4702 case ALGORITHM_LEFT_ASYMMETRIC_6: 4703 case ALGORITHM_RIGHT_ASYMMETRIC_6: 4704 case ALGORITHM_LEFT_SYMMETRIC_6: 4705 case ALGORITHM_RIGHT_SYMMETRIC_6: 4706 if (raid_disk == raid_disks - 1) 4707 return 1; 4708 } 4709 return 0; 4710 } 4711 4712 static int run(struct mddev *mddev) 4713 { 4714 struct r5conf *conf; 4715 int working_disks = 0; 4716 int dirty_parity_disks = 0; 4717 struct md_rdev *rdev; 4718 sector_t reshape_offset = 0; 4719 4720 if (mddev->recovery_cp != MaxSector) 4721 printk(KERN_NOTICE "md/raid:%s: not clean" 4722 " -- starting background reconstruction\n", 4723 mdname(mddev)); 4724 if (mddev->reshape_position != MaxSector) { 4725 /* Check that we can continue the reshape. 4726 * Currently only disks can change, it must 4727 * increase, and we must be past the point where 4728 * a stripe over-writes itself 4729 */ 4730 sector_t here_new, here_old; 4731 int old_disks; 4732 int max_degraded = (mddev->level == 6 ? 2 : 1); 4733 4734 if (mddev->new_level != mddev->level) { 4735 printk(KERN_ERR "md/raid:%s: unsupported reshape " 4736 "required - aborting.\n", 4737 mdname(mddev)); 4738 return -EINVAL; 4739 } 4740 old_disks = mddev->raid_disks - mddev->delta_disks; 4741 /* reshape_position must be on a new-stripe boundary, and one 4742 * further up in new geometry must map after here in old 4743 * geometry. 4744 */ 4745 here_new = mddev->reshape_position; 4746 if (sector_div(here_new, mddev->new_chunk_sectors * 4747 (mddev->raid_disks - max_degraded))) { 4748 printk(KERN_ERR "md/raid:%s: reshape_position not " 4749 "on a stripe boundary\n", mdname(mddev)); 4750 return -EINVAL; 4751 } 4752 reshape_offset = here_new * mddev->new_chunk_sectors; 4753 /* here_new is the stripe we will write to */ 4754 here_old = mddev->reshape_position; 4755 sector_div(here_old, mddev->chunk_sectors * 4756 (old_disks-max_degraded)); 4757 /* here_old is the first stripe that we might need to read 4758 * from */ 4759 if (mddev->delta_disks == 0) { 4760 /* We cannot be sure it is safe to start an in-place 4761 * reshape. It is only safe if user-space if monitoring 4762 * and taking constant backups. 4763 * mdadm always starts a situation like this in 4764 * readonly mode so it can take control before 4765 * allowing any writes. So just check for that. 4766 */ 4767 if ((here_new * mddev->new_chunk_sectors != 4768 here_old * mddev->chunk_sectors) || 4769 mddev->ro == 0) { 4770 printk(KERN_ERR "md/raid:%s: in-place reshape must be started" 4771 " in read-only mode - aborting\n", 4772 mdname(mddev)); 4773 return -EINVAL; 4774 } 4775 } else if (mddev->delta_disks < 0 4776 ? (here_new * mddev->new_chunk_sectors <= 4777 here_old * mddev->chunk_sectors) 4778 : (here_new * mddev->new_chunk_sectors >= 4779 here_old * mddev->chunk_sectors)) { 4780 /* Reading from the same stripe as writing to - bad */ 4781 printk(KERN_ERR "md/raid:%s: reshape_position too early for " 4782 "auto-recovery - aborting.\n", 4783 mdname(mddev)); 4784 return -EINVAL; 4785 } 4786 printk(KERN_INFO "md/raid:%s: reshape will continue\n", 4787 mdname(mddev)); 4788 /* OK, we should be able to continue; */ 4789 } else { 4790 BUG_ON(mddev->level != mddev->new_level); 4791 BUG_ON(mddev->layout != mddev->new_layout); 4792 BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors); 4793 BUG_ON(mddev->delta_disks != 0); 4794 } 4795 4796 if (mddev->private == NULL) 4797 conf = setup_conf(mddev); 4798 else 4799 conf = mddev->private; 4800 4801 if (IS_ERR(conf)) 4802 return PTR_ERR(conf); 4803 4804 mddev->thread = conf->thread; 4805 conf->thread = NULL; 4806 mddev->private = conf; 4807 4808 /* 4809 * 0 for a fully functional array, 1 or 2 for a degraded array. 4810 */ 4811 list_for_each_entry(rdev, &mddev->disks, same_set) { 4812 if (rdev->raid_disk < 0) 4813 continue; 4814 if (test_bit(In_sync, &rdev->flags)) { 4815 working_disks++; 4816 continue; 4817 } 4818 /* This disc is not fully in-sync. However if it 4819 * just stored parity (beyond the recovery_offset), 4820 * when we don't need to be concerned about the 4821 * array being dirty. 4822 * When reshape goes 'backwards', we never have 4823 * partially completed devices, so we only need 4824 * to worry about reshape going forwards. 4825 */ 4826 /* Hack because v0.91 doesn't store recovery_offset properly. */ 4827 if (mddev->major_version == 0 && 4828 mddev->minor_version > 90) 4829 rdev->recovery_offset = reshape_offset; 4830 4831 if (rdev->recovery_offset < reshape_offset) { 4832 /* We need to check old and new layout */ 4833 if (!only_parity(rdev->raid_disk, 4834 conf->algorithm, 4835 conf->raid_disks, 4836 conf->max_degraded)) 4837 continue; 4838 } 4839 if (!only_parity(rdev->raid_disk, 4840 conf->prev_algo, 4841 conf->previous_raid_disks, 4842 conf->max_degraded)) 4843 continue; 4844 dirty_parity_disks++; 4845 } 4846 4847 mddev->degraded = (max(conf->raid_disks, conf->previous_raid_disks) 4848 - working_disks); 4849 4850 if (has_failed(conf)) { 4851 printk(KERN_ERR "md/raid:%s: not enough operational devices" 4852 " (%d/%d failed)\n", 4853 mdname(mddev), mddev->degraded, conf->raid_disks); 4854 goto abort; 4855 } 4856 4857 /* device size must be a multiple of chunk size */ 4858 mddev->dev_sectors &= ~(mddev->chunk_sectors - 1); 4859 mddev->resync_max_sectors = mddev->dev_sectors; 4860 4861 if (mddev->degraded > dirty_parity_disks && 4862 mddev->recovery_cp != MaxSector) { 4863 if (mddev->ok_start_degraded) 4864 printk(KERN_WARNING 4865 "md/raid:%s: starting dirty degraded array" 4866 " - data corruption possible.\n", 4867 mdname(mddev)); 4868 else { 4869 printk(KERN_ERR 4870 "md/raid:%s: cannot start dirty degraded array.\n", 4871 mdname(mddev)); 4872 goto abort; 4873 } 4874 } 4875 4876 if (mddev->degraded == 0) 4877 printk(KERN_INFO "md/raid:%s: raid level %d active with %d out of %d" 4878 " devices, algorithm %d\n", mdname(mddev), conf->level, 4879 mddev->raid_disks-mddev->degraded, mddev->raid_disks, 4880 mddev->new_layout); 4881 else 4882 printk(KERN_ALERT "md/raid:%s: raid level %d active with %d" 4883 " out of %d devices, algorithm %d\n", 4884 mdname(mddev), conf->level, 4885 mddev->raid_disks - mddev->degraded, 4886 mddev->raid_disks, mddev->new_layout); 4887 4888 print_raid5_conf(conf); 4889 4890 if (conf->reshape_progress != MaxSector) { 4891 conf->reshape_safe = conf->reshape_progress; 4892 atomic_set(&conf->reshape_stripes, 0); 4893 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 4894 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 4895 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 4896 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 4897 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 4898 "reshape"); 4899 } 4900 4901 4902 /* Ok, everything is just fine now */ 4903 if (mddev->to_remove == &raid5_attrs_group) 4904 mddev->to_remove = NULL; 4905 else if (mddev->kobj.sd && 4906 sysfs_create_group(&mddev->kobj, &raid5_attrs_group)) 4907 printk(KERN_WARNING 4908 "raid5: failed to create sysfs attributes for %s\n", 4909 mdname(mddev)); 4910 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); 4911 4912 if (mddev->queue) { 4913 int chunk_size; 4914 /* read-ahead size must cover two whole stripes, which 4915 * is 2 * (datadisks) * chunksize where 'n' is the 4916 * number of raid devices 4917 */ 4918 int data_disks = conf->previous_raid_disks - conf->max_degraded; 4919 int stripe = data_disks * 4920 ((mddev->chunk_sectors << 9) / PAGE_SIZE); 4921 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 4922 mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 4923 4924 blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec); 4925 4926 mddev->queue->backing_dev_info.congested_data = mddev; 4927 mddev->queue->backing_dev_info.congested_fn = raid5_congested; 4928 4929 chunk_size = mddev->chunk_sectors << 9; 4930 blk_queue_io_min(mddev->queue, chunk_size); 4931 blk_queue_io_opt(mddev->queue, chunk_size * 4932 (conf->raid_disks - conf->max_degraded)); 4933 4934 list_for_each_entry(rdev, &mddev->disks, same_set) 4935 disk_stack_limits(mddev->gendisk, rdev->bdev, 4936 rdev->data_offset << 9); 4937 } 4938 4939 return 0; 4940 abort: 4941 md_unregister_thread(&mddev->thread); 4942 print_raid5_conf(conf); 4943 free_conf(conf); 4944 mddev->private = NULL; 4945 printk(KERN_ALERT "md/raid:%s: failed to run raid set.\n", mdname(mddev)); 4946 return -EIO; 4947 } 4948 4949 static int stop(struct mddev *mddev) 4950 { 4951 struct r5conf *conf = mddev->private; 4952 4953 md_unregister_thread(&mddev->thread); 4954 if (mddev->queue) 4955 mddev->queue->backing_dev_info.congested_fn = NULL; 4956 free_conf(conf); 4957 mddev->private = NULL; 4958 mddev->to_remove = &raid5_attrs_group; 4959 return 0; 4960 } 4961 4962 static void status(struct seq_file *seq, struct mddev *mddev) 4963 { 4964 struct r5conf *conf = mddev->private; 4965 int i; 4966 4967 seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level, 4968 mddev->chunk_sectors / 2, mddev->layout); 4969 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded); 4970 for (i = 0; i < conf->raid_disks; i++) 4971 seq_printf (seq, "%s", 4972 conf->disks[i].rdev && 4973 test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_"); 4974 seq_printf (seq, "]"); 4975 } 4976 4977 static void print_raid5_conf (struct r5conf *conf) 4978 { 4979 int i; 4980 struct disk_info *tmp; 4981 4982 printk(KERN_DEBUG "RAID conf printout:\n"); 4983 if (!conf) { 4984 printk("(conf==NULL)\n"); 4985 return; 4986 } 4987 printk(KERN_DEBUG " --- level:%d rd:%d wd:%d\n", conf->level, 4988 conf->raid_disks, 4989 conf->raid_disks - conf->mddev->degraded); 4990 4991 for (i = 0; i < conf->raid_disks; i++) { 4992 char b[BDEVNAME_SIZE]; 4993 tmp = conf->disks + i; 4994 if (tmp->rdev) 4995 printk(KERN_DEBUG " disk %d, o:%d, dev:%s\n", 4996 i, !test_bit(Faulty, &tmp->rdev->flags), 4997 bdevname(tmp->rdev->bdev, b)); 4998 } 4999 } 5000 5001 static int raid5_spare_active(struct mddev *mddev) 5002 { 5003 int i; 5004 struct r5conf *conf = mddev->private; 5005 struct disk_info *tmp; 5006 int count = 0; 5007 unsigned long flags; 5008 5009 for (i = 0; i < conf->raid_disks; i++) { 5010 tmp = conf->disks + i; 5011 if (tmp->rdev 5012 && tmp->rdev->recovery_offset == MaxSector 5013 && !test_bit(Faulty, &tmp->rdev->flags) 5014 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) { 5015 count++; 5016 sysfs_notify_dirent_safe(tmp->rdev->sysfs_state); 5017 } 5018 } 5019 spin_lock_irqsave(&conf->device_lock, flags); 5020 mddev->degraded -= count; 5021 spin_unlock_irqrestore(&conf->device_lock, flags); 5022 print_raid5_conf(conf); 5023 return count; 5024 } 5025 5026 static int raid5_remove_disk(struct mddev *mddev, int number) 5027 { 5028 struct r5conf *conf = mddev->private; 5029 int err = 0; 5030 struct md_rdev *rdev; 5031 struct disk_info *p = conf->disks + number; 5032 5033 print_raid5_conf(conf); 5034 rdev = p->rdev; 5035 if (rdev) { 5036 if (number >= conf->raid_disks && 5037 conf->reshape_progress == MaxSector) 5038 clear_bit(In_sync, &rdev->flags); 5039 5040 if (test_bit(In_sync, &rdev->flags) || 5041 atomic_read(&rdev->nr_pending)) { 5042 err = -EBUSY; 5043 goto abort; 5044 } 5045 /* Only remove non-faulty devices if recovery 5046 * isn't possible. 5047 */ 5048 if (!test_bit(Faulty, &rdev->flags) && 5049 mddev->recovery_disabled != conf->recovery_disabled && 5050 !has_failed(conf) && 5051 number < conf->raid_disks) { 5052 err = -EBUSY; 5053 goto abort; 5054 } 5055 p->rdev = NULL; 5056 synchronize_rcu(); 5057 if (atomic_read(&rdev->nr_pending)) { 5058 /* lost the race, try later */ 5059 err = -EBUSY; 5060 p->rdev = rdev; 5061 } 5062 } 5063 abort: 5064 5065 print_raid5_conf(conf); 5066 return err; 5067 } 5068 5069 static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev) 5070 { 5071 struct r5conf *conf = mddev->private; 5072 int err = -EEXIST; 5073 int disk; 5074 struct disk_info *p; 5075 int first = 0; 5076 int last = conf->raid_disks - 1; 5077 5078 if (mddev->recovery_disabled == conf->recovery_disabled) 5079 return -EBUSY; 5080 5081 if (has_failed(conf)) 5082 /* no point adding a device */ 5083 return -EINVAL; 5084 5085 if (rdev->raid_disk >= 0) 5086 first = last = rdev->raid_disk; 5087 5088 /* 5089 * find the disk ... but prefer rdev->saved_raid_disk 5090 * if possible. 5091 */ 5092 if (rdev->saved_raid_disk >= 0 && 5093 rdev->saved_raid_disk >= first && 5094 conf->disks[rdev->saved_raid_disk].rdev == NULL) 5095 disk = rdev->saved_raid_disk; 5096 else 5097 disk = first; 5098 for ( ; disk <= last ; disk++) 5099 if ((p=conf->disks + disk)->rdev == NULL) { 5100 clear_bit(In_sync, &rdev->flags); 5101 rdev->raid_disk = disk; 5102 err = 0; 5103 if (rdev->saved_raid_disk != disk) 5104 conf->fullsync = 1; 5105 rcu_assign_pointer(p->rdev, rdev); 5106 break; 5107 } 5108 print_raid5_conf(conf); 5109 return err; 5110 } 5111 5112 static int raid5_resize(struct mddev *mddev, sector_t sectors) 5113 { 5114 /* no resync is happening, and there is enough space 5115 * on all devices, so we can resize. 5116 * We need to make sure resync covers any new space. 5117 * If the array is shrinking we should possibly wait until 5118 * any io in the removed space completes, but it hardly seems 5119 * worth it. 5120 */ 5121 sectors &= ~((sector_t)mddev->chunk_sectors - 1); 5122 md_set_array_sectors(mddev, raid5_size(mddev, sectors, 5123 mddev->raid_disks)); 5124 if (mddev->array_sectors > 5125 raid5_size(mddev, sectors, mddev->raid_disks)) 5126 return -EINVAL; 5127 set_capacity(mddev->gendisk, mddev->array_sectors); 5128 revalidate_disk(mddev->gendisk); 5129 if (sectors > mddev->dev_sectors && 5130 mddev->recovery_cp > mddev->dev_sectors) { 5131 mddev->recovery_cp = mddev->dev_sectors; 5132 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 5133 } 5134 mddev->dev_sectors = sectors; 5135 mddev->resync_max_sectors = sectors; 5136 return 0; 5137 } 5138 5139 static int check_stripe_cache(struct mddev *mddev) 5140 { 5141 /* Can only proceed if there are plenty of stripe_heads. 5142 * We need a minimum of one full stripe,, and for sensible progress 5143 * it is best to have about 4 times that. 5144 * If we require 4 times, then the default 256 4K stripe_heads will 5145 * allow for chunk sizes up to 256K, which is probably OK. 5146 * If the chunk size is greater, user-space should request more 5147 * stripe_heads first. 5148 */ 5149 struct r5conf *conf = mddev->private; 5150 if (((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4 5151 > conf->max_nr_stripes || 5152 ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4 5153 > conf->max_nr_stripes) { 5154 printk(KERN_WARNING "md/raid:%s: reshape: not enough stripes. Needed %lu\n", 5155 mdname(mddev), 5156 ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9) 5157 / STRIPE_SIZE)*4); 5158 return 0; 5159 } 5160 return 1; 5161 } 5162 5163 static int check_reshape(struct mddev *mddev) 5164 { 5165 struct r5conf *conf = mddev->private; 5166 5167 if (mddev->delta_disks == 0 && 5168 mddev->new_layout == mddev->layout && 5169 mddev->new_chunk_sectors == mddev->chunk_sectors) 5170 return 0; /* nothing to do */ 5171 if (mddev->bitmap) 5172 /* Cannot grow a bitmap yet */ 5173 return -EBUSY; 5174 if (has_failed(conf)) 5175 return -EINVAL; 5176 if (mddev->delta_disks < 0) { 5177 /* We might be able to shrink, but the devices must 5178 * be made bigger first. 5179 * For raid6, 4 is the minimum size. 5180 * Otherwise 2 is the minimum 5181 */ 5182 int min = 2; 5183 if (mddev->level == 6) 5184 min = 4; 5185 if (mddev->raid_disks + mddev->delta_disks < min) 5186 return -EINVAL; 5187 } 5188 5189 if (!check_stripe_cache(mddev)) 5190 return -ENOSPC; 5191 5192 return resize_stripes(conf, conf->raid_disks + mddev->delta_disks); 5193 } 5194 5195 static int raid5_start_reshape(struct mddev *mddev) 5196 { 5197 struct r5conf *conf = mddev->private; 5198 struct md_rdev *rdev; 5199 int spares = 0; 5200 unsigned long flags; 5201 5202 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 5203 return -EBUSY; 5204 5205 if (!check_stripe_cache(mddev)) 5206 return -ENOSPC; 5207 5208 list_for_each_entry(rdev, &mddev->disks, same_set) 5209 if (!test_bit(In_sync, &rdev->flags) 5210 && !test_bit(Faulty, &rdev->flags)) 5211 spares++; 5212 5213 if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded) 5214 /* Not enough devices even to make a degraded array 5215 * of that size 5216 */ 5217 return -EINVAL; 5218 5219 /* Refuse to reduce size of the array. Any reductions in 5220 * array size must be through explicit setting of array_size 5221 * attribute. 5222 */ 5223 if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks) 5224 < mddev->array_sectors) { 5225 printk(KERN_ERR "md/raid:%s: array size must be reduced " 5226 "before number of disks\n", mdname(mddev)); 5227 return -EINVAL; 5228 } 5229 5230 atomic_set(&conf->reshape_stripes, 0); 5231 spin_lock_irq(&conf->device_lock); 5232 conf->previous_raid_disks = conf->raid_disks; 5233 conf->raid_disks += mddev->delta_disks; 5234 conf->prev_chunk_sectors = conf->chunk_sectors; 5235 conf->chunk_sectors = mddev->new_chunk_sectors; 5236 conf->prev_algo = conf->algorithm; 5237 conf->algorithm = mddev->new_layout; 5238 if (mddev->delta_disks < 0) 5239 conf->reshape_progress = raid5_size(mddev, 0, 0); 5240 else 5241 conf->reshape_progress = 0; 5242 conf->reshape_safe = conf->reshape_progress; 5243 conf->generation++; 5244 spin_unlock_irq(&conf->device_lock); 5245 5246 /* Add some new drives, as many as will fit. 5247 * We know there are enough to make the newly sized array work. 5248 * Don't add devices if we are reducing the number of 5249 * devices in the array. This is because it is not possible 5250 * to correctly record the "partially reconstructed" state of 5251 * such devices during the reshape and confusion could result. 5252 */ 5253 if (mddev->delta_disks >= 0) { 5254 int added_devices = 0; 5255 list_for_each_entry(rdev, &mddev->disks, same_set) 5256 if (rdev->raid_disk < 0 && 5257 !test_bit(Faulty, &rdev->flags)) { 5258 if (raid5_add_disk(mddev, rdev) == 0) { 5259 if (rdev->raid_disk 5260 >= conf->previous_raid_disks) { 5261 set_bit(In_sync, &rdev->flags); 5262 added_devices++; 5263 } else 5264 rdev->recovery_offset = 0; 5265 5266 if (sysfs_link_rdev(mddev, rdev)) 5267 /* Failure here is OK */; 5268 } 5269 } else if (rdev->raid_disk >= conf->previous_raid_disks 5270 && !test_bit(Faulty, &rdev->flags)) { 5271 /* This is a spare that was manually added */ 5272 set_bit(In_sync, &rdev->flags); 5273 added_devices++; 5274 } 5275 5276 /* When a reshape changes the number of devices, 5277 * ->degraded is measured against the larger of the 5278 * pre and post number of devices. 5279 */ 5280 spin_lock_irqsave(&conf->device_lock, flags); 5281 mddev->degraded += (conf->raid_disks - conf->previous_raid_disks) 5282 - added_devices; 5283 spin_unlock_irqrestore(&conf->device_lock, flags); 5284 } 5285 mddev->raid_disks = conf->raid_disks; 5286 mddev->reshape_position = conf->reshape_progress; 5287 set_bit(MD_CHANGE_DEVS, &mddev->flags); 5288 5289 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 5290 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 5291 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 5292 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 5293 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 5294 "reshape"); 5295 if (!mddev->sync_thread) { 5296 mddev->recovery = 0; 5297 spin_lock_irq(&conf->device_lock); 5298 mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks; 5299 conf->reshape_progress = MaxSector; 5300 spin_unlock_irq(&conf->device_lock); 5301 return -EAGAIN; 5302 } 5303 conf->reshape_checkpoint = jiffies; 5304 md_wakeup_thread(mddev->sync_thread); 5305 md_new_event(mddev); 5306 return 0; 5307 } 5308 5309 /* This is called from the reshape thread and should make any 5310 * changes needed in 'conf' 5311 */ 5312 static void end_reshape(struct r5conf *conf) 5313 { 5314 5315 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { 5316 5317 spin_lock_irq(&conf->device_lock); 5318 conf->previous_raid_disks = conf->raid_disks; 5319 conf->reshape_progress = MaxSector; 5320 spin_unlock_irq(&conf->device_lock); 5321 wake_up(&conf->wait_for_overlap); 5322 5323 /* read-ahead size must cover two whole stripes, which is 5324 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices 5325 */ 5326 if (conf->mddev->queue) { 5327 int data_disks = conf->raid_disks - conf->max_degraded; 5328 int stripe = data_disks * ((conf->chunk_sectors << 9) 5329 / PAGE_SIZE); 5330 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 5331 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 5332 } 5333 } 5334 } 5335 5336 /* This is called from the raid5d thread with mddev_lock held. 5337 * It makes config changes to the device. 5338 */ 5339 static void raid5_finish_reshape(struct mddev *mddev) 5340 { 5341 struct r5conf *conf = mddev->private; 5342 5343 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 5344 5345 if (mddev->delta_disks > 0) { 5346 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); 5347 set_capacity(mddev->gendisk, mddev->array_sectors); 5348 revalidate_disk(mddev->gendisk); 5349 } else { 5350 int d; 5351 mddev->degraded = conf->raid_disks; 5352 for (d = 0; d < conf->raid_disks ; d++) 5353 if (conf->disks[d].rdev && 5354 test_bit(In_sync, 5355 &conf->disks[d].rdev->flags)) 5356 mddev->degraded--; 5357 for (d = conf->raid_disks ; 5358 d < conf->raid_disks - mddev->delta_disks; 5359 d++) { 5360 struct md_rdev *rdev = conf->disks[d].rdev; 5361 if (rdev && raid5_remove_disk(mddev, d) == 0) { 5362 sysfs_unlink_rdev(mddev, rdev); 5363 rdev->raid_disk = -1; 5364 } 5365 } 5366 } 5367 mddev->layout = conf->algorithm; 5368 mddev->chunk_sectors = conf->chunk_sectors; 5369 mddev->reshape_position = MaxSector; 5370 mddev->delta_disks = 0; 5371 } 5372 } 5373 5374 static void raid5_quiesce(struct mddev *mddev, int state) 5375 { 5376 struct r5conf *conf = mddev->private; 5377 5378 switch(state) { 5379 case 2: /* resume for a suspend */ 5380 wake_up(&conf->wait_for_overlap); 5381 break; 5382 5383 case 1: /* stop all writes */ 5384 spin_lock_irq(&conf->device_lock); 5385 /* '2' tells resync/reshape to pause so that all 5386 * active stripes can drain 5387 */ 5388 conf->quiesce = 2; 5389 wait_event_lock_irq(conf->wait_for_stripe, 5390 atomic_read(&conf->active_stripes) == 0 && 5391 atomic_read(&conf->active_aligned_reads) == 0, 5392 conf->device_lock, /* nothing */); 5393 conf->quiesce = 1; 5394 spin_unlock_irq(&conf->device_lock); 5395 /* allow reshape to continue */ 5396 wake_up(&conf->wait_for_overlap); 5397 break; 5398 5399 case 0: /* re-enable writes */ 5400 spin_lock_irq(&conf->device_lock); 5401 conf->quiesce = 0; 5402 wake_up(&conf->wait_for_stripe); 5403 wake_up(&conf->wait_for_overlap); 5404 spin_unlock_irq(&conf->device_lock); 5405 break; 5406 } 5407 } 5408 5409 5410 static void *raid45_takeover_raid0(struct mddev *mddev, int level) 5411 { 5412 struct r0conf *raid0_conf = mddev->private; 5413 sector_t sectors; 5414 5415 /* for raid0 takeover only one zone is supported */ 5416 if (raid0_conf->nr_strip_zones > 1) { 5417 printk(KERN_ERR "md/raid:%s: cannot takeover raid0 with more than one zone.\n", 5418 mdname(mddev)); 5419 return ERR_PTR(-EINVAL); 5420 } 5421 5422 sectors = raid0_conf->strip_zone[0].zone_end; 5423 sector_div(sectors, raid0_conf->strip_zone[0].nb_dev); 5424 mddev->dev_sectors = sectors; 5425 mddev->new_level = level; 5426 mddev->new_layout = ALGORITHM_PARITY_N; 5427 mddev->new_chunk_sectors = mddev->chunk_sectors; 5428 mddev->raid_disks += 1; 5429 mddev->delta_disks = 1; 5430 /* make sure it will be not marked as dirty */ 5431 mddev->recovery_cp = MaxSector; 5432 5433 return setup_conf(mddev); 5434 } 5435 5436 5437 static void *raid5_takeover_raid1(struct mddev *mddev) 5438 { 5439 int chunksect; 5440 5441 if (mddev->raid_disks != 2 || 5442 mddev->degraded > 1) 5443 return ERR_PTR(-EINVAL); 5444 5445 /* Should check if there are write-behind devices? */ 5446 5447 chunksect = 64*2; /* 64K by default */ 5448 5449 /* The array must be an exact multiple of chunksize */ 5450 while (chunksect && (mddev->array_sectors & (chunksect-1))) 5451 chunksect >>= 1; 5452 5453 if ((chunksect<<9) < STRIPE_SIZE) 5454 /* array size does not allow a suitable chunk size */ 5455 return ERR_PTR(-EINVAL); 5456 5457 mddev->new_level = 5; 5458 mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC; 5459 mddev->new_chunk_sectors = chunksect; 5460 5461 return setup_conf(mddev); 5462 } 5463 5464 static void *raid5_takeover_raid6(struct mddev *mddev) 5465 { 5466 int new_layout; 5467 5468 switch (mddev->layout) { 5469 case ALGORITHM_LEFT_ASYMMETRIC_6: 5470 new_layout = ALGORITHM_LEFT_ASYMMETRIC; 5471 break; 5472 case ALGORITHM_RIGHT_ASYMMETRIC_6: 5473 new_layout = ALGORITHM_RIGHT_ASYMMETRIC; 5474 break; 5475 case ALGORITHM_LEFT_SYMMETRIC_6: 5476 new_layout = ALGORITHM_LEFT_SYMMETRIC; 5477 break; 5478 case ALGORITHM_RIGHT_SYMMETRIC_6: 5479 new_layout = ALGORITHM_RIGHT_SYMMETRIC; 5480 break; 5481 case ALGORITHM_PARITY_0_6: 5482 new_layout = ALGORITHM_PARITY_0; 5483 break; 5484 case ALGORITHM_PARITY_N: 5485 new_layout = ALGORITHM_PARITY_N; 5486 break; 5487 default: 5488 return ERR_PTR(-EINVAL); 5489 } 5490 mddev->new_level = 5; 5491 mddev->new_layout = new_layout; 5492 mddev->delta_disks = -1; 5493 mddev->raid_disks -= 1; 5494 return setup_conf(mddev); 5495 } 5496 5497 5498 static int raid5_check_reshape(struct mddev *mddev) 5499 { 5500 /* For a 2-drive array, the layout and chunk size can be changed 5501 * immediately as not restriping is needed. 5502 * For larger arrays we record the new value - after validation 5503 * to be used by a reshape pass. 5504 */ 5505 struct r5conf *conf = mddev->private; 5506 int new_chunk = mddev->new_chunk_sectors; 5507 5508 if (mddev->new_layout >= 0 && !algorithm_valid_raid5(mddev->new_layout)) 5509 return -EINVAL; 5510 if (new_chunk > 0) { 5511 if (!is_power_of_2(new_chunk)) 5512 return -EINVAL; 5513 if (new_chunk < (PAGE_SIZE>>9)) 5514 return -EINVAL; 5515 if (mddev->array_sectors & (new_chunk-1)) 5516 /* not factor of array size */ 5517 return -EINVAL; 5518 } 5519 5520 /* They look valid */ 5521 5522 if (mddev->raid_disks == 2) { 5523 /* can make the change immediately */ 5524 if (mddev->new_layout >= 0) { 5525 conf->algorithm = mddev->new_layout; 5526 mddev->layout = mddev->new_layout; 5527 } 5528 if (new_chunk > 0) { 5529 conf->chunk_sectors = new_chunk ; 5530 mddev->chunk_sectors = new_chunk; 5531 } 5532 set_bit(MD_CHANGE_DEVS, &mddev->flags); 5533 md_wakeup_thread(mddev->thread); 5534 } 5535 return check_reshape(mddev); 5536 } 5537 5538 static int raid6_check_reshape(struct mddev *mddev) 5539 { 5540 int new_chunk = mddev->new_chunk_sectors; 5541 5542 if (mddev->new_layout >= 0 && !algorithm_valid_raid6(mddev->new_layout)) 5543 return -EINVAL; 5544 if (new_chunk > 0) { 5545 if (!is_power_of_2(new_chunk)) 5546 return -EINVAL; 5547 if (new_chunk < (PAGE_SIZE >> 9)) 5548 return -EINVAL; 5549 if (mddev->array_sectors & (new_chunk-1)) 5550 /* not factor of array size */ 5551 return -EINVAL; 5552 } 5553 5554 /* They look valid */ 5555 return check_reshape(mddev); 5556 } 5557 5558 static void *raid5_takeover(struct mddev *mddev) 5559 { 5560 /* raid5 can take over: 5561 * raid0 - if there is only one strip zone - make it a raid4 layout 5562 * raid1 - if there are two drives. We need to know the chunk size 5563 * raid4 - trivial - just use a raid4 layout. 5564 * raid6 - Providing it is a *_6 layout 5565 */ 5566 if (mddev->level == 0) 5567 return raid45_takeover_raid0(mddev, 5); 5568 if (mddev->level == 1) 5569 return raid5_takeover_raid1(mddev); 5570 if (mddev->level == 4) { 5571 mddev->new_layout = ALGORITHM_PARITY_N; 5572 mddev->new_level = 5; 5573 return setup_conf(mddev); 5574 } 5575 if (mddev->level == 6) 5576 return raid5_takeover_raid6(mddev); 5577 5578 return ERR_PTR(-EINVAL); 5579 } 5580 5581 static void *raid4_takeover(struct mddev *mddev) 5582 { 5583 /* raid4 can take over: 5584 * raid0 - if there is only one strip zone 5585 * raid5 - if layout is right 5586 */ 5587 if (mddev->level == 0) 5588 return raid45_takeover_raid0(mddev, 4); 5589 if (mddev->level == 5 && 5590 mddev->layout == ALGORITHM_PARITY_N) { 5591 mddev->new_layout = 0; 5592 mddev->new_level = 4; 5593 return setup_conf(mddev); 5594 } 5595 return ERR_PTR(-EINVAL); 5596 } 5597 5598 static struct md_personality raid5_personality; 5599 5600 static void *raid6_takeover(struct mddev *mddev) 5601 { 5602 /* Currently can only take over a raid5. We map the 5603 * personality to an equivalent raid6 personality 5604 * with the Q block at the end. 5605 */ 5606 int new_layout; 5607 5608 if (mddev->pers != &raid5_personality) 5609 return ERR_PTR(-EINVAL); 5610 if (mddev->degraded > 1) 5611 return ERR_PTR(-EINVAL); 5612 if (mddev->raid_disks > 253) 5613 return ERR_PTR(-EINVAL); 5614 if (mddev->raid_disks < 3) 5615 return ERR_PTR(-EINVAL); 5616 5617 switch (mddev->layout) { 5618 case ALGORITHM_LEFT_ASYMMETRIC: 5619 new_layout = ALGORITHM_LEFT_ASYMMETRIC_6; 5620 break; 5621 case ALGORITHM_RIGHT_ASYMMETRIC: 5622 new_layout = ALGORITHM_RIGHT_ASYMMETRIC_6; 5623 break; 5624 case ALGORITHM_LEFT_SYMMETRIC: 5625 new_layout = ALGORITHM_LEFT_SYMMETRIC_6; 5626 break; 5627 case ALGORITHM_RIGHT_SYMMETRIC: 5628 new_layout = ALGORITHM_RIGHT_SYMMETRIC_6; 5629 break; 5630 case ALGORITHM_PARITY_0: 5631 new_layout = ALGORITHM_PARITY_0_6; 5632 break; 5633 case ALGORITHM_PARITY_N: 5634 new_layout = ALGORITHM_PARITY_N; 5635 break; 5636 default: 5637 return ERR_PTR(-EINVAL); 5638 } 5639 mddev->new_level = 6; 5640 mddev->new_layout = new_layout; 5641 mddev->delta_disks = 1; 5642 mddev->raid_disks += 1; 5643 return setup_conf(mddev); 5644 } 5645 5646 5647 static struct md_personality raid6_personality = 5648 { 5649 .name = "raid6", 5650 .level = 6, 5651 .owner = THIS_MODULE, 5652 .make_request = make_request, 5653 .run = run, 5654 .stop = stop, 5655 .status = status, 5656 .error_handler = error, 5657 .hot_add_disk = raid5_add_disk, 5658 .hot_remove_disk= raid5_remove_disk, 5659 .spare_active = raid5_spare_active, 5660 .sync_request = sync_request, 5661 .resize = raid5_resize, 5662 .size = raid5_size, 5663 .check_reshape = raid6_check_reshape, 5664 .start_reshape = raid5_start_reshape, 5665 .finish_reshape = raid5_finish_reshape, 5666 .quiesce = raid5_quiesce, 5667 .takeover = raid6_takeover, 5668 }; 5669 static struct md_personality raid5_personality = 5670 { 5671 .name = "raid5", 5672 .level = 5, 5673 .owner = THIS_MODULE, 5674 .make_request = make_request, 5675 .run = run, 5676 .stop = stop, 5677 .status = status, 5678 .error_handler = error, 5679 .hot_add_disk = raid5_add_disk, 5680 .hot_remove_disk= raid5_remove_disk, 5681 .spare_active = raid5_spare_active, 5682 .sync_request = sync_request, 5683 .resize = raid5_resize, 5684 .size = raid5_size, 5685 .check_reshape = raid5_check_reshape, 5686 .start_reshape = raid5_start_reshape, 5687 .finish_reshape = raid5_finish_reshape, 5688 .quiesce = raid5_quiesce, 5689 .takeover = raid5_takeover, 5690 }; 5691 5692 static struct md_personality raid4_personality = 5693 { 5694 .name = "raid4", 5695 .level = 4, 5696 .owner = THIS_MODULE, 5697 .make_request = make_request, 5698 .run = run, 5699 .stop = stop, 5700 .status = status, 5701 .error_handler = error, 5702 .hot_add_disk = raid5_add_disk, 5703 .hot_remove_disk= raid5_remove_disk, 5704 .spare_active = raid5_spare_active, 5705 .sync_request = sync_request, 5706 .resize = raid5_resize, 5707 .size = raid5_size, 5708 .check_reshape = raid5_check_reshape, 5709 .start_reshape = raid5_start_reshape, 5710 .finish_reshape = raid5_finish_reshape, 5711 .quiesce = raid5_quiesce, 5712 .takeover = raid4_takeover, 5713 }; 5714 5715 static int __init raid5_init(void) 5716 { 5717 register_md_personality(&raid6_personality); 5718 register_md_personality(&raid5_personality); 5719 register_md_personality(&raid4_personality); 5720 return 0; 5721 } 5722 5723 static void raid5_exit(void) 5724 { 5725 unregister_md_personality(&raid6_personality); 5726 unregister_md_personality(&raid5_personality); 5727 unregister_md_personality(&raid4_personality); 5728 } 5729 5730 module_init(raid5_init); 5731 module_exit(raid5_exit); 5732 MODULE_LICENSE("GPL"); 5733 MODULE_DESCRIPTION("RAID4/5/6 (striping with parity) personality for MD"); 5734 MODULE_ALIAS("md-personality-4"); /* RAID5 */ 5735 MODULE_ALIAS("md-raid5"); 5736 MODULE_ALIAS("md-raid4"); 5737 MODULE_ALIAS("md-level-5"); 5738 MODULE_ALIAS("md-level-4"); 5739 MODULE_ALIAS("md-personality-8"); /* RAID6 */ 5740 MODULE_ALIAS("md-raid6"); 5741 MODULE_ALIAS("md-level-6"); 5742 5743 /* This used to be two separate modules, they were: */ 5744 MODULE_ALIAS("raid5"); 5745 MODULE_ALIAS("raid6"); 5746