1 /* 2 * raid5.c : Multiple Devices driver for Linux 3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman 4 * Copyright (C) 1999, 2000 Ingo Molnar 5 * Copyright (C) 2002, 2003 H. Peter Anvin 6 * 7 * RAID-4/5/6 management functions. 8 * Thanks to Penguin Computing for making the RAID-6 development possible 9 * by donating a test server! 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2, or (at your option) 14 * any later version. 15 * 16 * You should have received a copy of the GNU General Public License 17 * (for example /usr/src/linux/COPYING); if not, write to the Free 18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 */ 20 21 /* 22 * BITMAP UNPLUGGING: 23 * 24 * The sequencing for updating the bitmap reliably is a little 25 * subtle (and I got it wrong the first time) so it deserves some 26 * explanation. 27 * 28 * We group bitmap updates into batches. Each batch has a number. 29 * We may write out several batches at once, but that isn't very important. 30 * conf->bm_write is the number of the last batch successfully written. 31 * conf->bm_flush is the number of the last batch that was closed to 32 * new additions. 33 * When we discover that we will need to write to any block in a stripe 34 * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq 35 * the number of the batch it will be in. This is bm_flush+1. 36 * When we are ready to do a write, if that batch hasn't been written yet, 37 * we plug the array and queue the stripe for later. 38 * When an unplug happens, we increment bm_flush, thus closing the current 39 * batch. 40 * When we notice that bm_flush > bm_write, we write out all pending updates 41 * to the bitmap, and advance bm_write to where bm_flush was. 42 * This may occasionally write a bit out twice, but is sure never to 43 * miss any bits. 44 */ 45 46 #include <linux/module.h> 47 #include <linux/slab.h> 48 #include <linux/highmem.h> 49 #include <linux/bitops.h> 50 #include <linux/kthread.h> 51 #include <asm/atomic.h> 52 #include "raid6.h" 53 54 #include <linux/raid/bitmap.h> 55 #include <linux/async_tx.h> 56 57 /* 58 * Stripe cache 59 */ 60 61 #define NR_STRIPES 256 62 #define STRIPE_SIZE PAGE_SIZE 63 #define STRIPE_SHIFT (PAGE_SHIFT - 9) 64 #define STRIPE_SECTORS (STRIPE_SIZE>>9) 65 #define IO_THRESHOLD 1 66 #define BYPASS_THRESHOLD 1 67 #define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head)) 68 #define HASH_MASK (NR_HASH - 1) 69 70 #define stripe_hash(conf, sect) (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK])) 71 72 /* bio's attached to a stripe+device for I/O are linked together in bi_sector 73 * order without overlap. There may be several bio's per stripe+device, and 74 * a bio could span several devices. 75 * When walking this list for a particular stripe+device, we must never proceed 76 * beyond a bio that extends past this device, as the next bio might no longer 77 * be valid. 78 * This macro is used to determine the 'next' bio in the list, given the sector 79 * of the current stripe+device 80 */ 81 #define r5_next_bio(bio, sect) ( ( (bio)->bi_sector + ((bio)->bi_size>>9) < sect + STRIPE_SECTORS) ? (bio)->bi_next : NULL) 82 /* 83 * The following can be used to debug the driver 84 */ 85 #define RAID5_PARANOIA 1 86 #if RAID5_PARANOIA && defined(CONFIG_SMP) 87 # define CHECK_DEVLOCK() assert_spin_locked(&conf->device_lock) 88 #else 89 # define CHECK_DEVLOCK() 90 #endif 91 92 #ifdef DEBUG 93 #define inline 94 #define __inline__ 95 #endif 96 97 #if !RAID6_USE_EMPTY_ZERO_PAGE 98 /* In .bss so it's zeroed */ 99 const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256))); 100 #endif 101 102 static inline int raid6_next_disk(int disk, int raid_disks) 103 { 104 disk++; 105 return (disk < raid_disks) ? disk : 0; 106 } 107 108 static void return_io(struct bio *return_bi) 109 { 110 struct bio *bi = return_bi; 111 while (bi) { 112 113 return_bi = bi->bi_next; 114 bi->bi_next = NULL; 115 bi->bi_size = 0; 116 bi->bi_end_io(bi, 117 test_bit(BIO_UPTODATE, &bi->bi_flags) 118 ? 0 : -EIO); 119 bi = return_bi; 120 } 121 } 122 123 static void print_raid5_conf (raid5_conf_t *conf); 124 125 static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh) 126 { 127 if (atomic_dec_and_test(&sh->count)) { 128 BUG_ON(!list_empty(&sh->lru)); 129 BUG_ON(atomic_read(&conf->active_stripes)==0); 130 if (test_bit(STRIPE_HANDLE, &sh->state)) { 131 if (test_bit(STRIPE_DELAYED, &sh->state)) { 132 list_add_tail(&sh->lru, &conf->delayed_list); 133 blk_plug_device(conf->mddev->queue); 134 } else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && 135 sh->bm_seq - conf->seq_write > 0) { 136 list_add_tail(&sh->lru, &conf->bitmap_list); 137 blk_plug_device(conf->mddev->queue); 138 } else { 139 clear_bit(STRIPE_BIT_DELAY, &sh->state); 140 list_add_tail(&sh->lru, &conf->handle_list); 141 } 142 md_wakeup_thread(conf->mddev->thread); 143 } else { 144 BUG_ON(sh->ops.pending); 145 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 146 atomic_dec(&conf->preread_active_stripes); 147 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) 148 md_wakeup_thread(conf->mddev->thread); 149 } 150 atomic_dec(&conf->active_stripes); 151 if (!test_bit(STRIPE_EXPANDING, &sh->state)) { 152 list_add_tail(&sh->lru, &conf->inactive_list); 153 wake_up(&conf->wait_for_stripe); 154 if (conf->retry_read_aligned) 155 md_wakeup_thread(conf->mddev->thread); 156 } 157 } 158 } 159 } 160 static void release_stripe(struct stripe_head *sh) 161 { 162 raid5_conf_t *conf = sh->raid_conf; 163 unsigned long flags; 164 165 spin_lock_irqsave(&conf->device_lock, flags); 166 __release_stripe(conf, sh); 167 spin_unlock_irqrestore(&conf->device_lock, flags); 168 } 169 170 static inline void remove_hash(struct stripe_head *sh) 171 { 172 pr_debug("remove_hash(), stripe %llu\n", 173 (unsigned long long)sh->sector); 174 175 hlist_del_init(&sh->hash); 176 } 177 178 static inline void insert_hash(raid5_conf_t *conf, struct stripe_head *sh) 179 { 180 struct hlist_head *hp = stripe_hash(conf, sh->sector); 181 182 pr_debug("insert_hash(), stripe %llu\n", 183 (unsigned long long)sh->sector); 184 185 CHECK_DEVLOCK(); 186 hlist_add_head(&sh->hash, hp); 187 } 188 189 190 /* find an idle stripe, make sure it is unhashed, and return it. */ 191 static struct stripe_head *get_free_stripe(raid5_conf_t *conf) 192 { 193 struct stripe_head *sh = NULL; 194 struct list_head *first; 195 196 CHECK_DEVLOCK(); 197 if (list_empty(&conf->inactive_list)) 198 goto out; 199 first = conf->inactive_list.next; 200 sh = list_entry(first, struct stripe_head, lru); 201 list_del_init(first); 202 remove_hash(sh); 203 atomic_inc(&conf->active_stripes); 204 out: 205 return sh; 206 } 207 208 static void shrink_buffers(struct stripe_head *sh, int num) 209 { 210 struct page *p; 211 int i; 212 213 for (i=0; i<num ; i++) { 214 p = sh->dev[i].page; 215 if (!p) 216 continue; 217 sh->dev[i].page = NULL; 218 put_page(p); 219 } 220 } 221 222 static int grow_buffers(struct stripe_head *sh, int num) 223 { 224 int i; 225 226 for (i=0; i<num; i++) { 227 struct page *page; 228 229 if (!(page = alloc_page(GFP_KERNEL))) { 230 return 1; 231 } 232 sh->dev[i].page = page; 233 } 234 return 0; 235 } 236 237 static void raid5_build_block (struct stripe_head *sh, int i); 238 239 static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx, int disks) 240 { 241 raid5_conf_t *conf = sh->raid_conf; 242 int i; 243 244 BUG_ON(atomic_read(&sh->count) != 0); 245 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); 246 BUG_ON(sh->ops.pending || sh->ops.ack || sh->ops.complete); 247 248 CHECK_DEVLOCK(); 249 pr_debug("init_stripe called, stripe %llu\n", 250 (unsigned long long)sh->sector); 251 252 remove_hash(sh); 253 254 sh->sector = sector; 255 sh->pd_idx = pd_idx; 256 sh->state = 0; 257 258 sh->disks = disks; 259 260 for (i = sh->disks; i--; ) { 261 struct r5dev *dev = &sh->dev[i]; 262 263 if (dev->toread || dev->read || dev->towrite || dev->written || 264 test_bit(R5_LOCKED, &dev->flags)) { 265 printk(KERN_ERR "sector=%llx i=%d %p %p %p %p %d\n", 266 (unsigned long long)sh->sector, i, dev->toread, 267 dev->read, dev->towrite, dev->written, 268 test_bit(R5_LOCKED, &dev->flags)); 269 BUG(); 270 } 271 dev->flags = 0; 272 raid5_build_block(sh, i); 273 } 274 insert_hash(conf, sh); 275 } 276 277 static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector, int disks) 278 { 279 struct stripe_head *sh; 280 struct hlist_node *hn; 281 282 CHECK_DEVLOCK(); 283 pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector); 284 hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash) 285 if (sh->sector == sector && sh->disks == disks) 286 return sh; 287 pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector); 288 return NULL; 289 } 290 291 static void unplug_slaves(mddev_t *mddev); 292 static void raid5_unplug_device(struct request_queue *q); 293 294 static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector, int disks, 295 int pd_idx, int noblock) 296 { 297 struct stripe_head *sh; 298 299 pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector); 300 301 spin_lock_irq(&conf->device_lock); 302 303 do { 304 wait_event_lock_irq(conf->wait_for_stripe, 305 conf->quiesce == 0, 306 conf->device_lock, /* nothing */); 307 sh = __find_stripe(conf, sector, disks); 308 if (!sh) { 309 if (!conf->inactive_blocked) 310 sh = get_free_stripe(conf); 311 if (noblock && sh == NULL) 312 break; 313 if (!sh) { 314 conf->inactive_blocked = 1; 315 wait_event_lock_irq(conf->wait_for_stripe, 316 !list_empty(&conf->inactive_list) && 317 (atomic_read(&conf->active_stripes) 318 < (conf->max_nr_stripes *3/4) 319 || !conf->inactive_blocked), 320 conf->device_lock, 321 raid5_unplug_device(conf->mddev->queue) 322 ); 323 conf->inactive_blocked = 0; 324 } else 325 init_stripe(sh, sector, pd_idx, disks); 326 } else { 327 if (atomic_read(&sh->count)) { 328 BUG_ON(!list_empty(&sh->lru)); 329 } else { 330 if (!test_bit(STRIPE_HANDLE, &sh->state)) 331 atomic_inc(&conf->active_stripes); 332 if (list_empty(&sh->lru) && 333 !test_bit(STRIPE_EXPANDING, &sh->state)) 334 BUG(); 335 list_del_init(&sh->lru); 336 } 337 } 338 } while (sh == NULL); 339 340 if (sh) 341 atomic_inc(&sh->count); 342 343 spin_unlock_irq(&conf->device_lock); 344 return sh; 345 } 346 347 /* test_and_ack_op() ensures that we only dequeue an operation once */ 348 #define test_and_ack_op(op, pend) \ 349 do { \ 350 if (test_bit(op, &sh->ops.pending) && \ 351 !test_bit(op, &sh->ops.complete)) { \ 352 if (test_and_set_bit(op, &sh->ops.ack)) \ 353 clear_bit(op, &pend); \ 354 else \ 355 ack++; \ 356 } else \ 357 clear_bit(op, &pend); \ 358 } while (0) 359 360 /* find new work to run, do not resubmit work that is already 361 * in flight 362 */ 363 static unsigned long get_stripe_work(struct stripe_head *sh) 364 { 365 unsigned long pending; 366 int ack = 0; 367 368 pending = sh->ops.pending; 369 370 test_and_ack_op(STRIPE_OP_BIOFILL, pending); 371 test_and_ack_op(STRIPE_OP_COMPUTE_BLK, pending); 372 test_and_ack_op(STRIPE_OP_PREXOR, pending); 373 test_and_ack_op(STRIPE_OP_BIODRAIN, pending); 374 test_and_ack_op(STRIPE_OP_POSTXOR, pending); 375 test_and_ack_op(STRIPE_OP_CHECK, pending); 376 if (test_and_clear_bit(STRIPE_OP_IO, &sh->ops.pending)) 377 ack++; 378 379 sh->ops.count -= ack; 380 if (unlikely(sh->ops.count < 0)) { 381 printk(KERN_ERR "pending: %#lx ops.pending: %#lx ops.ack: %#lx " 382 "ops.complete: %#lx\n", pending, sh->ops.pending, 383 sh->ops.ack, sh->ops.complete); 384 BUG(); 385 } 386 387 return pending; 388 } 389 390 static void 391 raid5_end_read_request(struct bio *bi, int error); 392 static void 393 raid5_end_write_request(struct bio *bi, int error); 394 395 static void ops_run_io(struct stripe_head *sh) 396 { 397 raid5_conf_t *conf = sh->raid_conf; 398 int i, disks = sh->disks; 399 400 might_sleep(); 401 402 set_bit(STRIPE_IO_STARTED, &sh->state); 403 for (i = disks; i--; ) { 404 int rw; 405 struct bio *bi; 406 mdk_rdev_t *rdev; 407 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) 408 rw = WRITE; 409 else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) 410 rw = READ; 411 else 412 continue; 413 414 bi = &sh->dev[i].req; 415 416 bi->bi_rw = rw; 417 if (rw == WRITE) 418 bi->bi_end_io = raid5_end_write_request; 419 else 420 bi->bi_end_io = raid5_end_read_request; 421 422 rcu_read_lock(); 423 rdev = rcu_dereference(conf->disks[i].rdev); 424 if (rdev && test_bit(Faulty, &rdev->flags)) 425 rdev = NULL; 426 if (rdev) 427 atomic_inc(&rdev->nr_pending); 428 rcu_read_unlock(); 429 430 if (rdev) { 431 if (test_bit(STRIPE_SYNCING, &sh->state) || 432 test_bit(STRIPE_EXPAND_SOURCE, &sh->state) || 433 test_bit(STRIPE_EXPAND_READY, &sh->state)) 434 md_sync_acct(rdev->bdev, STRIPE_SECTORS); 435 436 bi->bi_bdev = rdev->bdev; 437 pr_debug("%s: for %llu schedule op %ld on disc %d\n", 438 __func__, (unsigned long long)sh->sector, 439 bi->bi_rw, i); 440 atomic_inc(&sh->count); 441 bi->bi_sector = sh->sector + rdev->data_offset; 442 bi->bi_flags = 1 << BIO_UPTODATE; 443 bi->bi_vcnt = 1; 444 bi->bi_max_vecs = 1; 445 bi->bi_idx = 0; 446 bi->bi_io_vec = &sh->dev[i].vec; 447 bi->bi_io_vec[0].bv_len = STRIPE_SIZE; 448 bi->bi_io_vec[0].bv_offset = 0; 449 bi->bi_size = STRIPE_SIZE; 450 bi->bi_next = NULL; 451 if (rw == WRITE && 452 test_bit(R5_ReWrite, &sh->dev[i].flags)) 453 atomic_add(STRIPE_SECTORS, 454 &rdev->corrected_errors); 455 generic_make_request(bi); 456 } else { 457 if (rw == WRITE) 458 set_bit(STRIPE_DEGRADED, &sh->state); 459 pr_debug("skip op %ld on disc %d for sector %llu\n", 460 bi->bi_rw, i, (unsigned long long)sh->sector); 461 clear_bit(R5_LOCKED, &sh->dev[i].flags); 462 set_bit(STRIPE_HANDLE, &sh->state); 463 } 464 } 465 } 466 467 static struct dma_async_tx_descriptor * 468 async_copy_data(int frombio, struct bio *bio, struct page *page, 469 sector_t sector, struct dma_async_tx_descriptor *tx) 470 { 471 struct bio_vec *bvl; 472 struct page *bio_page; 473 int i; 474 int page_offset; 475 476 if (bio->bi_sector >= sector) 477 page_offset = (signed)(bio->bi_sector - sector) * 512; 478 else 479 page_offset = (signed)(sector - bio->bi_sector) * -512; 480 bio_for_each_segment(bvl, bio, i) { 481 int len = bio_iovec_idx(bio, i)->bv_len; 482 int clen; 483 int b_offset = 0; 484 485 if (page_offset < 0) { 486 b_offset = -page_offset; 487 page_offset += b_offset; 488 len -= b_offset; 489 } 490 491 if (len > 0 && page_offset + len > STRIPE_SIZE) 492 clen = STRIPE_SIZE - page_offset; 493 else 494 clen = len; 495 496 if (clen > 0) { 497 b_offset += bio_iovec_idx(bio, i)->bv_offset; 498 bio_page = bio_iovec_idx(bio, i)->bv_page; 499 if (frombio) 500 tx = async_memcpy(page, bio_page, page_offset, 501 b_offset, clen, 502 ASYNC_TX_DEP_ACK, 503 tx, NULL, NULL); 504 else 505 tx = async_memcpy(bio_page, page, b_offset, 506 page_offset, clen, 507 ASYNC_TX_DEP_ACK, 508 tx, NULL, NULL); 509 } 510 if (clen < len) /* hit end of page */ 511 break; 512 page_offset += len; 513 } 514 515 return tx; 516 } 517 518 static void ops_complete_biofill(void *stripe_head_ref) 519 { 520 struct stripe_head *sh = stripe_head_ref; 521 struct bio *return_bi = NULL; 522 raid5_conf_t *conf = sh->raid_conf; 523 int i; 524 525 pr_debug("%s: stripe %llu\n", __func__, 526 (unsigned long long)sh->sector); 527 528 /* clear completed biofills */ 529 for (i = sh->disks; i--; ) { 530 struct r5dev *dev = &sh->dev[i]; 531 532 /* acknowledge completion of a biofill operation */ 533 /* and check if we need to reply to a read request, 534 * new R5_Wantfill requests are held off until 535 * !test_bit(STRIPE_OP_BIOFILL, &sh->ops.pending) 536 */ 537 if (test_and_clear_bit(R5_Wantfill, &dev->flags)) { 538 struct bio *rbi, *rbi2; 539 540 /* The access to dev->read is outside of the 541 * spin_lock_irq(&conf->device_lock), but is protected 542 * by the STRIPE_OP_BIOFILL pending bit 543 */ 544 BUG_ON(!dev->read); 545 rbi = dev->read; 546 dev->read = NULL; 547 while (rbi && rbi->bi_sector < 548 dev->sector + STRIPE_SECTORS) { 549 rbi2 = r5_next_bio(rbi, dev->sector); 550 spin_lock_irq(&conf->device_lock); 551 if (--rbi->bi_phys_segments == 0) { 552 rbi->bi_next = return_bi; 553 return_bi = rbi; 554 } 555 spin_unlock_irq(&conf->device_lock); 556 rbi = rbi2; 557 } 558 } 559 } 560 set_bit(STRIPE_OP_BIOFILL, &sh->ops.complete); 561 562 return_io(return_bi); 563 564 set_bit(STRIPE_HANDLE, &sh->state); 565 release_stripe(sh); 566 } 567 568 static void ops_run_biofill(struct stripe_head *sh) 569 { 570 struct dma_async_tx_descriptor *tx = NULL; 571 raid5_conf_t *conf = sh->raid_conf; 572 int i; 573 574 pr_debug("%s: stripe %llu\n", __func__, 575 (unsigned long long)sh->sector); 576 577 for (i = sh->disks; i--; ) { 578 struct r5dev *dev = &sh->dev[i]; 579 if (test_bit(R5_Wantfill, &dev->flags)) { 580 struct bio *rbi; 581 spin_lock_irq(&conf->device_lock); 582 dev->read = rbi = dev->toread; 583 dev->toread = NULL; 584 spin_unlock_irq(&conf->device_lock); 585 while (rbi && rbi->bi_sector < 586 dev->sector + STRIPE_SECTORS) { 587 tx = async_copy_data(0, rbi, dev->page, 588 dev->sector, tx); 589 rbi = r5_next_bio(rbi, dev->sector); 590 } 591 } 592 } 593 594 atomic_inc(&sh->count); 595 async_trigger_callback(ASYNC_TX_DEP_ACK | ASYNC_TX_ACK, tx, 596 ops_complete_biofill, sh); 597 } 598 599 static void ops_complete_compute5(void *stripe_head_ref) 600 { 601 struct stripe_head *sh = stripe_head_ref; 602 int target = sh->ops.target; 603 struct r5dev *tgt = &sh->dev[target]; 604 605 pr_debug("%s: stripe %llu\n", __func__, 606 (unsigned long long)sh->sector); 607 608 set_bit(R5_UPTODATE, &tgt->flags); 609 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 610 clear_bit(R5_Wantcompute, &tgt->flags); 611 set_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete); 612 set_bit(STRIPE_HANDLE, &sh->state); 613 release_stripe(sh); 614 } 615 616 static struct dma_async_tx_descriptor * 617 ops_run_compute5(struct stripe_head *sh, unsigned long pending) 618 { 619 /* kernel stack size limits the total number of disks */ 620 int disks = sh->disks; 621 struct page *xor_srcs[disks]; 622 int target = sh->ops.target; 623 struct r5dev *tgt = &sh->dev[target]; 624 struct page *xor_dest = tgt->page; 625 int count = 0; 626 struct dma_async_tx_descriptor *tx; 627 int i; 628 629 pr_debug("%s: stripe %llu block: %d\n", 630 __func__, (unsigned long long)sh->sector, target); 631 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 632 633 for (i = disks; i--; ) 634 if (i != target) 635 xor_srcs[count++] = sh->dev[i].page; 636 637 atomic_inc(&sh->count); 638 639 if (unlikely(count == 1)) 640 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, 641 0, NULL, ops_complete_compute5, sh); 642 else 643 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, 644 ASYNC_TX_XOR_ZERO_DST, NULL, 645 ops_complete_compute5, sh); 646 647 /* ack now if postxor is not set to be run */ 648 if (tx && !test_bit(STRIPE_OP_POSTXOR, &pending)) 649 async_tx_ack(tx); 650 651 return tx; 652 } 653 654 static void ops_complete_prexor(void *stripe_head_ref) 655 { 656 struct stripe_head *sh = stripe_head_ref; 657 658 pr_debug("%s: stripe %llu\n", __func__, 659 (unsigned long long)sh->sector); 660 661 set_bit(STRIPE_OP_PREXOR, &sh->ops.complete); 662 } 663 664 static struct dma_async_tx_descriptor * 665 ops_run_prexor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) 666 { 667 /* kernel stack size limits the total number of disks */ 668 int disks = sh->disks; 669 struct page *xor_srcs[disks]; 670 int count = 0, pd_idx = sh->pd_idx, i; 671 672 /* existing parity data subtracted */ 673 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; 674 675 pr_debug("%s: stripe %llu\n", __func__, 676 (unsigned long long)sh->sector); 677 678 for (i = disks; i--; ) { 679 struct r5dev *dev = &sh->dev[i]; 680 /* Only process blocks that are known to be uptodate */ 681 if (dev->towrite && test_bit(R5_Wantprexor, &dev->flags)) 682 xor_srcs[count++] = dev->page; 683 } 684 685 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, 686 ASYNC_TX_DEP_ACK | ASYNC_TX_XOR_DROP_DST, tx, 687 ops_complete_prexor, sh); 688 689 return tx; 690 } 691 692 static struct dma_async_tx_descriptor * 693 ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx, 694 unsigned long pending) 695 { 696 int disks = sh->disks; 697 int pd_idx = sh->pd_idx, i; 698 699 /* check if prexor is active which means only process blocks 700 * that are part of a read-modify-write (Wantprexor) 701 */ 702 int prexor = test_bit(STRIPE_OP_PREXOR, &pending); 703 704 pr_debug("%s: stripe %llu\n", __func__, 705 (unsigned long long)sh->sector); 706 707 for (i = disks; i--; ) { 708 struct r5dev *dev = &sh->dev[i]; 709 struct bio *chosen; 710 int towrite; 711 712 towrite = 0; 713 if (prexor) { /* rmw */ 714 if (dev->towrite && 715 test_bit(R5_Wantprexor, &dev->flags)) 716 towrite = 1; 717 } else { /* rcw */ 718 if (i != pd_idx && dev->towrite && 719 test_bit(R5_LOCKED, &dev->flags)) 720 towrite = 1; 721 } 722 723 if (towrite) { 724 struct bio *wbi; 725 726 spin_lock(&sh->lock); 727 chosen = dev->towrite; 728 dev->towrite = NULL; 729 BUG_ON(dev->written); 730 wbi = dev->written = chosen; 731 spin_unlock(&sh->lock); 732 733 while (wbi && wbi->bi_sector < 734 dev->sector + STRIPE_SECTORS) { 735 tx = async_copy_data(1, wbi, dev->page, 736 dev->sector, tx); 737 wbi = r5_next_bio(wbi, dev->sector); 738 } 739 } 740 } 741 742 return tx; 743 } 744 745 static void ops_complete_postxor(void *stripe_head_ref) 746 { 747 struct stripe_head *sh = stripe_head_ref; 748 749 pr_debug("%s: stripe %llu\n", __func__, 750 (unsigned long long)sh->sector); 751 752 set_bit(STRIPE_OP_POSTXOR, &sh->ops.complete); 753 set_bit(STRIPE_HANDLE, &sh->state); 754 release_stripe(sh); 755 } 756 757 static void ops_complete_write(void *stripe_head_ref) 758 { 759 struct stripe_head *sh = stripe_head_ref; 760 int disks = sh->disks, i, pd_idx = sh->pd_idx; 761 762 pr_debug("%s: stripe %llu\n", __func__, 763 (unsigned long long)sh->sector); 764 765 for (i = disks; i--; ) { 766 struct r5dev *dev = &sh->dev[i]; 767 if (dev->written || i == pd_idx) 768 set_bit(R5_UPTODATE, &dev->flags); 769 } 770 771 set_bit(STRIPE_OP_BIODRAIN, &sh->ops.complete); 772 set_bit(STRIPE_OP_POSTXOR, &sh->ops.complete); 773 774 set_bit(STRIPE_HANDLE, &sh->state); 775 release_stripe(sh); 776 } 777 778 static void 779 ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx, 780 unsigned long pending) 781 { 782 /* kernel stack size limits the total number of disks */ 783 int disks = sh->disks; 784 struct page *xor_srcs[disks]; 785 786 int count = 0, pd_idx = sh->pd_idx, i; 787 struct page *xor_dest; 788 int prexor = test_bit(STRIPE_OP_PREXOR, &pending); 789 unsigned long flags; 790 dma_async_tx_callback callback; 791 792 pr_debug("%s: stripe %llu\n", __func__, 793 (unsigned long long)sh->sector); 794 795 /* check if prexor is active which means only process blocks 796 * that are part of a read-modify-write (written) 797 */ 798 if (prexor) { 799 xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; 800 for (i = disks; i--; ) { 801 struct r5dev *dev = &sh->dev[i]; 802 if (dev->written) 803 xor_srcs[count++] = dev->page; 804 } 805 } else { 806 xor_dest = sh->dev[pd_idx].page; 807 for (i = disks; i--; ) { 808 struct r5dev *dev = &sh->dev[i]; 809 if (i != pd_idx) 810 xor_srcs[count++] = dev->page; 811 } 812 } 813 814 /* check whether this postxor is part of a write */ 815 callback = test_bit(STRIPE_OP_BIODRAIN, &pending) ? 816 ops_complete_write : ops_complete_postxor; 817 818 /* 1/ if we prexor'd then the dest is reused as a source 819 * 2/ if we did not prexor then we are redoing the parity 820 * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST 821 * for the synchronous xor case 822 */ 823 flags = ASYNC_TX_DEP_ACK | ASYNC_TX_ACK | 824 (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST); 825 826 atomic_inc(&sh->count); 827 828 if (unlikely(count == 1)) { 829 flags &= ~(ASYNC_TX_XOR_DROP_DST | ASYNC_TX_XOR_ZERO_DST); 830 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, 831 flags, tx, callback, sh); 832 } else 833 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, 834 flags, tx, callback, sh); 835 } 836 837 static void ops_complete_check(void *stripe_head_ref) 838 { 839 struct stripe_head *sh = stripe_head_ref; 840 int pd_idx = sh->pd_idx; 841 842 pr_debug("%s: stripe %llu\n", __func__, 843 (unsigned long long)sh->sector); 844 845 if (test_and_clear_bit(STRIPE_OP_MOD_DMA_CHECK, &sh->ops.pending) && 846 sh->ops.zero_sum_result == 0) 847 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 848 849 set_bit(STRIPE_OP_CHECK, &sh->ops.complete); 850 set_bit(STRIPE_HANDLE, &sh->state); 851 release_stripe(sh); 852 } 853 854 static void ops_run_check(struct stripe_head *sh) 855 { 856 /* kernel stack size limits the total number of disks */ 857 int disks = sh->disks; 858 struct page *xor_srcs[disks]; 859 struct dma_async_tx_descriptor *tx; 860 861 int count = 0, pd_idx = sh->pd_idx, i; 862 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; 863 864 pr_debug("%s: stripe %llu\n", __func__, 865 (unsigned long long)sh->sector); 866 867 for (i = disks; i--; ) { 868 struct r5dev *dev = &sh->dev[i]; 869 if (i != pd_idx) 870 xor_srcs[count++] = dev->page; 871 } 872 873 tx = async_xor_zero_sum(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, 874 &sh->ops.zero_sum_result, 0, NULL, NULL, NULL); 875 876 if (tx) 877 set_bit(STRIPE_OP_MOD_DMA_CHECK, &sh->ops.pending); 878 else 879 clear_bit(STRIPE_OP_MOD_DMA_CHECK, &sh->ops.pending); 880 881 atomic_inc(&sh->count); 882 tx = async_trigger_callback(ASYNC_TX_DEP_ACK | ASYNC_TX_ACK, tx, 883 ops_complete_check, sh); 884 } 885 886 static void raid5_run_ops(struct stripe_head *sh, unsigned long pending) 887 { 888 int overlap_clear = 0, i, disks = sh->disks; 889 struct dma_async_tx_descriptor *tx = NULL; 890 891 if (test_bit(STRIPE_OP_BIOFILL, &pending)) { 892 ops_run_biofill(sh); 893 overlap_clear++; 894 } 895 896 if (test_bit(STRIPE_OP_COMPUTE_BLK, &pending)) 897 tx = ops_run_compute5(sh, pending); 898 899 if (test_bit(STRIPE_OP_PREXOR, &pending)) 900 tx = ops_run_prexor(sh, tx); 901 902 if (test_bit(STRIPE_OP_BIODRAIN, &pending)) { 903 tx = ops_run_biodrain(sh, tx, pending); 904 overlap_clear++; 905 } 906 907 if (test_bit(STRIPE_OP_POSTXOR, &pending)) 908 ops_run_postxor(sh, tx, pending); 909 910 if (test_bit(STRIPE_OP_CHECK, &pending)) 911 ops_run_check(sh); 912 913 if (test_bit(STRIPE_OP_IO, &pending)) 914 ops_run_io(sh); 915 916 if (overlap_clear) 917 for (i = disks; i--; ) { 918 struct r5dev *dev = &sh->dev[i]; 919 if (test_and_clear_bit(R5_Overlap, &dev->flags)) 920 wake_up(&sh->raid_conf->wait_for_overlap); 921 } 922 } 923 924 static int grow_one_stripe(raid5_conf_t *conf) 925 { 926 struct stripe_head *sh; 927 sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL); 928 if (!sh) 929 return 0; 930 memset(sh, 0, sizeof(*sh) + (conf->raid_disks-1)*sizeof(struct r5dev)); 931 sh->raid_conf = conf; 932 spin_lock_init(&sh->lock); 933 934 if (grow_buffers(sh, conf->raid_disks)) { 935 shrink_buffers(sh, conf->raid_disks); 936 kmem_cache_free(conf->slab_cache, sh); 937 return 0; 938 } 939 sh->disks = conf->raid_disks; 940 /* we just created an active stripe so... */ 941 atomic_set(&sh->count, 1); 942 atomic_inc(&conf->active_stripes); 943 INIT_LIST_HEAD(&sh->lru); 944 release_stripe(sh); 945 return 1; 946 } 947 948 static int grow_stripes(raid5_conf_t *conf, int num) 949 { 950 struct kmem_cache *sc; 951 int devs = conf->raid_disks; 952 953 sprintf(conf->cache_name[0], "raid5-%s", mdname(conf->mddev)); 954 sprintf(conf->cache_name[1], "raid5-%s-alt", mdname(conf->mddev)); 955 conf->active_name = 0; 956 sc = kmem_cache_create(conf->cache_name[conf->active_name], 957 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev), 958 0, 0, NULL); 959 if (!sc) 960 return 1; 961 conf->slab_cache = sc; 962 conf->pool_size = devs; 963 while (num--) 964 if (!grow_one_stripe(conf)) 965 return 1; 966 return 0; 967 } 968 969 #ifdef CONFIG_MD_RAID5_RESHAPE 970 static int resize_stripes(raid5_conf_t *conf, int newsize) 971 { 972 /* Make all the stripes able to hold 'newsize' devices. 973 * New slots in each stripe get 'page' set to a new page. 974 * 975 * This happens in stages: 976 * 1/ create a new kmem_cache and allocate the required number of 977 * stripe_heads. 978 * 2/ gather all the old stripe_heads and tranfer the pages across 979 * to the new stripe_heads. This will have the side effect of 980 * freezing the array as once all stripe_heads have been collected, 981 * no IO will be possible. Old stripe heads are freed once their 982 * pages have been transferred over, and the old kmem_cache is 983 * freed when all stripes are done. 984 * 3/ reallocate conf->disks to be suitable bigger. If this fails, 985 * we simple return a failre status - no need to clean anything up. 986 * 4/ allocate new pages for the new slots in the new stripe_heads. 987 * If this fails, we don't bother trying the shrink the 988 * stripe_heads down again, we just leave them as they are. 989 * As each stripe_head is processed the new one is released into 990 * active service. 991 * 992 * Once step2 is started, we cannot afford to wait for a write, 993 * so we use GFP_NOIO allocations. 994 */ 995 struct stripe_head *osh, *nsh; 996 LIST_HEAD(newstripes); 997 struct disk_info *ndisks; 998 int err = 0; 999 struct kmem_cache *sc; 1000 int i; 1001 1002 if (newsize <= conf->pool_size) 1003 return 0; /* never bother to shrink */ 1004 1005 md_allow_write(conf->mddev); 1006 1007 /* Step 1 */ 1008 sc = kmem_cache_create(conf->cache_name[1-conf->active_name], 1009 sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev), 1010 0, 0, NULL); 1011 if (!sc) 1012 return -ENOMEM; 1013 1014 for (i = conf->max_nr_stripes; i; i--) { 1015 nsh = kmem_cache_alloc(sc, GFP_KERNEL); 1016 if (!nsh) 1017 break; 1018 1019 memset(nsh, 0, sizeof(*nsh) + (newsize-1)*sizeof(struct r5dev)); 1020 1021 nsh->raid_conf = conf; 1022 spin_lock_init(&nsh->lock); 1023 1024 list_add(&nsh->lru, &newstripes); 1025 } 1026 if (i) { 1027 /* didn't get enough, give up */ 1028 while (!list_empty(&newstripes)) { 1029 nsh = list_entry(newstripes.next, struct stripe_head, lru); 1030 list_del(&nsh->lru); 1031 kmem_cache_free(sc, nsh); 1032 } 1033 kmem_cache_destroy(sc); 1034 return -ENOMEM; 1035 } 1036 /* Step 2 - Must use GFP_NOIO now. 1037 * OK, we have enough stripes, start collecting inactive 1038 * stripes and copying them over 1039 */ 1040 list_for_each_entry(nsh, &newstripes, lru) { 1041 spin_lock_irq(&conf->device_lock); 1042 wait_event_lock_irq(conf->wait_for_stripe, 1043 !list_empty(&conf->inactive_list), 1044 conf->device_lock, 1045 unplug_slaves(conf->mddev) 1046 ); 1047 osh = get_free_stripe(conf); 1048 spin_unlock_irq(&conf->device_lock); 1049 atomic_set(&nsh->count, 1); 1050 for(i=0; i<conf->pool_size; i++) 1051 nsh->dev[i].page = osh->dev[i].page; 1052 for( ; i<newsize; i++) 1053 nsh->dev[i].page = NULL; 1054 kmem_cache_free(conf->slab_cache, osh); 1055 } 1056 kmem_cache_destroy(conf->slab_cache); 1057 1058 /* Step 3. 1059 * At this point, we are holding all the stripes so the array 1060 * is completely stalled, so now is a good time to resize 1061 * conf->disks. 1062 */ 1063 ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO); 1064 if (ndisks) { 1065 for (i=0; i<conf->raid_disks; i++) 1066 ndisks[i] = conf->disks[i]; 1067 kfree(conf->disks); 1068 conf->disks = ndisks; 1069 } else 1070 err = -ENOMEM; 1071 1072 /* Step 4, return new stripes to service */ 1073 while(!list_empty(&newstripes)) { 1074 nsh = list_entry(newstripes.next, struct stripe_head, lru); 1075 list_del_init(&nsh->lru); 1076 for (i=conf->raid_disks; i < newsize; i++) 1077 if (nsh->dev[i].page == NULL) { 1078 struct page *p = alloc_page(GFP_NOIO); 1079 nsh->dev[i].page = p; 1080 if (!p) 1081 err = -ENOMEM; 1082 } 1083 release_stripe(nsh); 1084 } 1085 /* critical section pass, GFP_NOIO no longer needed */ 1086 1087 conf->slab_cache = sc; 1088 conf->active_name = 1-conf->active_name; 1089 conf->pool_size = newsize; 1090 return err; 1091 } 1092 #endif 1093 1094 static int drop_one_stripe(raid5_conf_t *conf) 1095 { 1096 struct stripe_head *sh; 1097 1098 spin_lock_irq(&conf->device_lock); 1099 sh = get_free_stripe(conf); 1100 spin_unlock_irq(&conf->device_lock); 1101 if (!sh) 1102 return 0; 1103 BUG_ON(atomic_read(&sh->count)); 1104 shrink_buffers(sh, conf->pool_size); 1105 kmem_cache_free(conf->slab_cache, sh); 1106 atomic_dec(&conf->active_stripes); 1107 return 1; 1108 } 1109 1110 static void shrink_stripes(raid5_conf_t *conf) 1111 { 1112 while (drop_one_stripe(conf)) 1113 ; 1114 1115 if (conf->slab_cache) 1116 kmem_cache_destroy(conf->slab_cache); 1117 conf->slab_cache = NULL; 1118 } 1119 1120 static void raid5_end_read_request(struct bio * bi, int error) 1121 { 1122 struct stripe_head *sh = bi->bi_private; 1123 raid5_conf_t *conf = sh->raid_conf; 1124 int disks = sh->disks, i; 1125 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 1126 char b[BDEVNAME_SIZE]; 1127 mdk_rdev_t *rdev; 1128 1129 1130 for (i=0 ; i<disks; i++) 1131 if (bi == &sh->dev[i].req) 1132 break; 1133 1134 pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n", 1135 (unsigned long long)sh->sector, i, atomic_read(&sh->count), 1136 uptodate); 1137 if (i == disks) { 1138 BUG(); 1139 return; 1140 } 1141 1142 if (uptodate) { 1143 set_bit(R5_UPTODATE, &sh->dev[i].flags); 1144 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 1145 rdev = conf->disks[i].rdev; 1146 printk(KERN_INFO "raid5:%s: read error corrected (%lu sectors at %llu on %s)\n", 1147 mdname(conf->mddev), STRIPE_SECTORS, 1148 (unsigned long long)(sh->sector + rdev->data_offset), 1149 bdevname(rdev->bdev, b)); 1150 clear_bit(R5_ReadError, &sh->dev[i].flags); 1151 clear_bit(R5_ReWrite, &sh->dev[i].flags); 1152 } 1153 if (atomic_read(&conf->disks[i].rdev->read_errors)) 1154 atomic_set(&conf->disks[i].rdev->read_errors, 0); 1155 } else { 1156 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b); 1157 int retry = 0; 1158 rdev = conf->disks[i].rdev; 1159 1160 clear_bit(R5_UPTODATE, &sh->dev[i].flags); 1161 atomic_inc(&rdev->read_errors); 1162 if (conf->mddev->degraded) 1163 printk(KERN_WARNING "raid5:%s: read error not correctable (sector %llu on %s).\n", 1164 mdname(conf->mddev), 1165 (unsigned long long)(sh->sector + rdev->data_offset), 1166 bdn); 1167 else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) 1168 /* Oh, no!!! */ 1169 printk(KERN_WARNING "raid5:%s: read error NOT corrected!! (sector %llu on %s).\n", 1170 mdname(conf->mddev), 1171 (unsigned long long)(sh->sector + rdev->data_offset), 1172 bdn); 1173 else if (atomic_read(&rdev->read_errors) 1174 > conf->max_nr_stripes) 1175 printk(KERN_WARNING 1176 "raid5:%s: Too many read errors, failing device %s.\n", 1177 mdname(conf->mddev), bdn); 1178 else 1179 retry = 1; 1180 if (retry) 1181 set_bit(R5_ReadError, &sh->dev[i].flags); 1182 else { 1183 clear_bit(R5_ReadError, &sh->dev[i].flags); 1184 clear_bit(R5_ReWrite, &sh->dev[i].flags); 1185 md_error(conf->mddev, rdev); 1186 } 1187 } 1188 rdev_dec_pending(conf->disks[i].rdev, conf->mddev); 1189 clear_bit(R5_LOCKED, &sh->dev[i].flags); 1190 set_bit(STRIPE_HANDLE, &sh->state); 1191 release_stripe(sh); 1192 } 1193 1194 static void raid5_end_write_request (struct bio *bi, int error) 1195 { 1196 struct stripe_head *sh = bi->bi_private; 1197 raid5_conf_t *conf = sh->raid_conf; 1198 int disks = sh->disks, i; 1199 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 1200 1201 for (i=0 ; i<disks; i++) 1202 if (bi == &sh->dev[i].req) 1203 break; 1204 1205 pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n", 1206 (unsigned long long)sh->sector, i, atomic_read(&sh->count), 1207 uptodate); 1208 if (i == disks) { 1209 BUG(); 1210 return; 1211 } 1212 1213 if (!uptodate) 1214 md_error(conf->mddev, conf->disks[i].rdev); 1215 1216 rdev_dec_pending(conf->disks[i].rdev, conf->mddev); 1217 1218 clear_bit(R5_LOCKED, &sh->dev[i].flags); 1219 set_bit(STRIPE_HANDLE, &sh->state); 1220 release_stripe(sh); 1221 } 1222 1223 1224 static sector_t compute_blocknr(struct stripe_head *sh, int i); 1225 1226 static void raid5_build_block (struct stripe_head *sh, int i) 1227 { 1228 struct r5dev *dev = &sh->dev[i]; 1229 1230 bio_init(&dev->req); 1231 dev->req.bi_io_vec = &dev->vec; 1232 dev->req.bi_vcnt++; 1233 dev->req.bi_max_vecs++; 1234 dev->vec.bv_page = dev->page; 1235 dev->vec.bv_len = STRIPE_SIZE; 1236 dev->vec.bv_offset = 0; 1237 1238 dev->req.bi_sector = sh->sector; 1239 dev->req.bi_private = sh; 1240 1241 dev->flags = 0; 1242 dev->sector = compute_blocknr(sh, i); 1243 } 1244 1245 static void error(mddev_t *mddev, mdk_rdev_t *rdev) 1246 { 1247 char b[BDEVNAME_SIZE]; 1248 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 1249 pr_debug("raid5: error called\n"); 1250 1251 if (!test_bit(Faulty, &rdev->flags)) { 1252 set_bit(MD_CHANGE_DEVS, &mddev->flags); 1253 if (test_and_clear_bit(In_sync, &rdev->flags)) { 1254 unsigned long flags; 1255 spin_lock_irqsave(&conf->device_lock, flags); 1256 mddev->degraded++; 1257 spin_unlock_irqrestore(&conf->device_lock, flags); 1258 /* 1259 * if recovery was running, make sure it aborts. 1260 */ 1261 set_bit(MD_RECOVERY_ERR, &mddev->recovery); 1262 } 1263 set_bit(Faulty, &rdev->flags); 1264 printk (KERN_ALERT 1265 "raid5: Disk failure on %s, disabling device.\n" 1266 "raid5: Operation continuing on %d devices.\n", 1267 bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded); 1268 } 1269 } 1270 1271 /* 1272 * Input: a 'big' sector number, 1273 * Output: index of the data and parity disk, and the sector # in them. 1274 */ 1275 static sector_t raid5_compute_sector(sector_t r_sector, unsigned int raid_disks, 1276 unsigned int data_disks, unsigned int * dd_idx, 1277 unsigned int * pd_idx, raid5_conf_t *conf) 1278 { 1279 long stripe; 1280 unsigned long chunk_number; 1281 unsigned int chunk_offset; 1282 sector_t new_sector; 1283 int sectors_per_chunk = conf->chunk_size >> 9; 1284 1285 /* First compute the information on this sector */ 1286 1287 /* 1288 * Compute the chunk number and the sector offset inside the chunk 1289 */ 1290 chunk_offset = sector_div(r_sector, sectors_per_chunk); 1291 chunk_number = r_sector; 1292 BUG_ON(r_sector != chunk_number); 1293 1294 /* 1295 * Compute the stripe number 1296 */ 1297 stripe = chunk_number / data_disks; 1298 1299 /* 1300 * Compute the data disk and parity disk indexes inside the stripe 1301 */ 1302 *dd_idx = chunk_number % data_disks; 1303 1304 /* 1305 * Select the parity disk based on the user selected algorithm. 1306 */ 1307 switch(conf->level) { 1308 case 4: 1309 *pd_idx = data_disks; 1310 break; 1311 case 5: 1312 switch (conf->algorithm) { 1313 case ALGORITHM_LEFT_ASYMMETRIC: 1314 *pd_idx = data_disks - stripe % raid_disks; 1315 if (*dd_idx >= *pd_idx) 1316 (*dd_idx)++; 1317 break; 1318 case ALGORITHM_RIGHT_ASYMMETRIC: 1319 *pd_idx = stripe % raid_disks; 1320 if (*dd_idx >= *pd_idx) 1321 (*dd_idx)++; 1322 break; 1323 case ALGORITHM_LEFT_SYMMETRIC: 1324 *pd_idx = data_disks - stripe % raid_disks; 1325 *dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks; 1326 break; 1327 case ALGORITHM_RIGHT_SYMMETRIC: 1328 *pd_idx = stripe % raid_disks; 1329 *dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks; 1330 break; 1331 default: 1332 printk(KERN_ERR "raid5: unsupported algorithm %d\n", 1333 conf->algorithm); 1334 } 1335 break; 1336 case 6: 1337 1338 /**** FIX THIS ****/ 1339 switch (conf->algorithm) { 1340 case ALGORITHM_LEFT_ASYMMETRIC: 1341 *pd_idx = raid_disks - 1 - (stripe % raid_disks); 1342 if (*pd_idx == raid_disks-1) 1343 (*dd_idx)++; /* Q D D D P */ 1344 else if (*dd_idx >= *pd_idx) 1345 (*dd_idx) += 2; /* D D P Q D */ 1346 break; 1347 case ALGORITHM_RIGHT_ASYMMETRIC: 1348 *pd_idx = stripe % raid_disks; 1349 if (*pd_idx == raid_disks-1) 1350 (*dd_idx)++; /* Q D D D P */ 1351 else if (*dd_idx >= *pd_idx) 1352 (*dd_idx) += 2; /* D D P Q D */ 1353 break; 1354 case ALGORITHM_LEFT_SYMMETRIC: 1355 *pd_idx = raid_disks - 1 - (stripe % raid_disks); 1356 *dd_idx = (*pd_idx + 2 + *dd_idx) % raid_disks; 1357 break; 1358 case ALGORITHM_RIGHT_SYMMETRIC: 1359 *pd_idx = stripe % raid_disks; 1360 *dd_idx = (*pd_idx + 2 + *dd_idx) % raid_disks; 1361 break; 1362 default: 1363 printk (KERN_CRIT "raid6: unsupported algorithm %d\n", 1364 conf->algorithm); 1365 } 1366 break; 1367 } 1368 1369 /* 1370 * Finally, compute the new sector number 1371 */ 1372 new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset; 1373 return new_sector; 1374 } 1375 1376 1377 static sector_t compute_blocknr(struct stripe_head *sh, int i) 1378 { 1379 raid5_conf_t *conf = sh->raid_conf; 1380 int raid_disks = sh->disks; 1381 int data_disks = raid_disks - conf->max_degraded; 1382 sector_t new_sector = sh->sector, check; 1383 int sectors_per_chunk = conf->chunk_size >> 9; 1384 sector_t stripe; 1385 int chunk_offset; 1386 int chunk_number, dummy1, dummy2, dd_idx = i; 1387 sector_t r_sector; 1388 1389 1390 chunk_offset = sector_div(new_sector, sectors_per_chunk); 1391 stripe = new_sector; 1392 BUG_ON(new_sector != stripe); 1393 1394 if (i == sh->pd_idx) 1395 return 0; 1396 switch(conf->level) { 1397 case 4: break; 1398 case 5: 1399 switch (conf->algorithm) { 1400 case ALGORITHM_LEFT_ASYMMETRIC: 1401 case ALGORITHM_RIGHT_ASYMMETRIC: 1402 if (i > sh->pd_idx) 1403 i--; 1404 break; 1405 case ALGORITHM_LEFT_SYMMETRIC: 1406 case ALGORITHM_RIGHT_SYMMETRIC: 1407 if (i < sh->pd_idx) 1408 i += raid_disks; 1409 i -= (sh->pd_idx + 1); 1410 break; 1411 default: 1412 printk(KERN_ERR "raid5: unsupported algorithm %d\n", 1413 conf->algorithm); 1414 } 1415 break; 1416 case 6: 1417 if (i == raid6_next_disk(sh->pd_idx, raid_disks)) 1418 return 0; /* It is the Q disk */ 1419 switch (conf->algorithm) { 1420 case ALGORITHM_LEFT_ASYMMETRIC: 1421 case ALGORITHM_RIGHT_ASYMMETRIC: 1422 if (sh->pd_idx == raid_disks-1) 1423 i--; /* Q D D D P */ 1424 else if (i > sh->pd_idx) 1425 i -= 2; /* D D P Q D */ 1426 break; 1427 case ALGORITHM_LEFT_SYMMETRIC: 1428 case ALGORITHM_RIGHT_SYMMETRIC: 1429 if (sh->pd_idx == raid_disks-1) 1430 i--; /* Q D D D P */ 1431 else { 1432 /* D D P Q D */ 1433 if (i < sh->pd_idx) 1434 i += raid_disks; 1435 i -= (sh->pd_idx + 2); 1436 } 1437 break; 1438 default: 1439 printk (KERN_CRIT "raid6: unsupported algorithm %d\n", 1440 conf->algorithm); 1441 } 1442 break; 1443 } 1444 1445 chunk_number = stripe * data_disks + i; 1446 r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset; 1447 1448 check = raid5_compute_sector (r_sector, raid_disks, data_disks, &dummy1, &dummy2, conf); 1449 if (check != sh->sector || dummy1 != dd_idx || dummy2 != sh->pd_idx) { 1450 printk(KERN_ERR "compute_blocknr: map not correct\n"); 1451 return 0; 1452 } 1453 return r_sector; 1454 } 1455 1456 1457 1458 /* 1459 * Copy data between a page in the stripe cache, and one or more bion 1460 * The page could align with the middle of the bio, or there could be 1461 * several bion, each with several bio_vecs, which cover part of the page 1462 * Multiple bion are linked together on bi_next. There may be extras 1463 * at the end of this list. We ignore them. 1464 */ 1465 static void copy_data(int frombio, struct bio *bio, 1466 struct page *page, 1467 sector_t sector) 1468 { 1469 char *pa = page_address(page); 1470 struct bio_vec *bvl; 1471 int i; 1472 int page_offset; 1473 1474 if (bio->bi_sector >= sector) 1475 page_offset = (signed)(bio->bi_sector - sector) * 512; 1476 else 1477 page_offset = (signed)(sector - bio->bi_sector) * -512; 1478 bio_for_each_segment(bvl, bio, i) { 1479 int len = bio_iovec_idx(bio,i)->bv_len; 1480 int clen; 1481 int b_offset = 0; 1482 1483 if (page_offset < 0) { 1484 b_offset = -page_offset; 1485 page_offset += b_offset; 1486 len -= b_offset; 1487 } 1488 1489 if (len > 0 && page_offset + len > STRIPE_SIZE) 1490 clen = STRIPE_SIZE - page_offset; 1491 else clen = len; 1492 1493 if (clen > 0) { 1494 char *ba = __bio_kmap_atomic(bio, i, KM_USER0); 1495 if (frombio) 1496 memcpy(pa+page_offset, ba+b_offset, clen); 1497 else 1498 memcpy(ba+b_offset, pa+page_offset, clen); 1499 __bio_kunmap_atomic(ba, KM_USER0); 1500 } 1501 if (clen < len) /* hit end of page */ 1502 break; 1503 page_offset += len; 1504 } 1505 } 1506 1507 #define check_xor() do { \ 1508 if (count == MAX_XOR_BLOCKS) { \ 1509 xor_blocks(count, STRIPE_SIZE, dest, ptr);\ 1510 count = 0; \ 1511 } \ 1512 } while(0) 1513 1514 static void compute_parity6(struct stripe_head *sh, int method) 1515 { 1516 raid6_conf_t *conf = sh->raid_conf; 1517 int i, pd_idx = sh->pd_idx, qd_idx, d0_idx, disks = sh->disks, count; 1518 struct bio *chosen; 1519 /**** FIX THIS: This could be very bad if disks is close to 256 ****/ 1520 void *ptrs[disks]; 1521 1522 qd_idx = raid6_next_disk(pd_idx, disks); 1523 d0_idx = raid6_next_disk(qd_idx, disks); 1524 1525 pr_debug("compute_parity, stripe %llu, method %d\n", 1526 (unsigned long long)sh->sector, method); 1527 1528 switch(method) { 1529 case READ_MODIFY_WRITE: 1530 BUG(); /* READ_MODIFY_WRITE N/A for RAID-6 */ 1531 case RECONSTRUCT_WRITE: 1532 for (i= disks; i-- ;) 1533 if ( i != pd_idx && i != qd_idx && sh->dev[i].towrite ) { 1534 chosen = sh->dev[i].towrite; 1535 sh->dev[i].towrite = NULL; 1536 1537 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 1538 wake_up(&conf->wait_for_overlap); 1539 1540 BUG_ON(sh->dev[i].written); 1541 sh->dev[i].written = chosen; 1542 } 1543 break; 1544 case CHECK_PARITY: 1545 BUG(); /* Not implemented yet */ 1546 } 1547 1548 for (i = disks; i--;) 1549 if (sh->dev[i].written) { 1550 sector_t sector = sh->dev[i].sector; 1551 struct bio *wbi = sh->dev[i].written; 1552 while (wbi && wbi->bi_sector < sector + STRIPE_SECTORS) { 1553 copy_data(1, wbi, sh->dev[i].page, sector); 1554 wbi = r5_next_bio(wbi, sector); 1555 } 1556 1557 set_bit(R5_LOCKED, &sh->dev[i].flags); 1558 set_bit(R5_UPTODATE, &sh->dev[i].flags); 1559 } 1560 1561 // switch(method) { 1562 // case RECONSTRUCT_WRITE: 1563 // case CHECK_PARITY: 1564 // case UPDATE_PARITY: 1565 /* Note that unlike RAID-5, the ordering of the disks matters greatly. */ 1566 /* FIX: Is this ordering of drives even remotely optimal? */ 1567 count = 0; 1568 i = d0_idx; 1569 do { 1570 ptrs[count++] = page_address(sh->dev[i].page); 1571 if (count <= disks-2 && !test_bit(R5_UPTODATE, &sh->dev[i].flags)) 1572 printk("block %d/%d not uptodate on parity calc\n", i,count); 1573 i = raid6_next_disk(i, disks); 1574 } while ( i != d0_idx ); 1575 // break; 1576 // } 1577 1578 raid6_call.gen_syndrome(disks, STRIPE_SIZE, ptrs); 1579 1580 switch(method) { 1581 case RECONSTRUCT_WRITE: 1582 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 1583 set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags); 1584 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); 1585 set_bit(R5_LOCKED, &sh->dev[qd_idx].flags); 1586 break; 1587 case UPDATE_PARITY: 1588 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 1589 set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags); 1590 break; 1591 } 1592 } 1593 1594 1595 /* Compute one missing block */ 1596 static void compute_block_1(struct stripe_head *sh, int dd_idx, int nozero) 1597 { 1598 int i, count, disks = sh->disks; 1599 void *ptr[MAX_XOR_BLOCKS], *dest, *p; 1600 int pd_idx = sh->pd_idx; 1601 int qd_idx = raid6_next_disk(pd_idx, disks); 1602 1603 pr_debug("compute_block_1, stripe %llu, idx %d\n", 1604 (unsigned long long)sh->sector, dd_idx); 1605 1606 if ( dd_idx == qd_idx ) { 1607 /* We're actually computing the Q drive */ 1608 compute_parity6(sh, UPDATE_PARITY); 1609 } else { 1610 dest = page_address(sh->dev[dd_idx].page); 1611 if (!nozero) memset(dest, 0, STRIPE_SIZE); 1612 count = 0; 1613 for (i = disks ; i--; ) { 1614 if (i == dd_idx || i == qd_idx) 1615 continue; 1616 p = page_address(sh->dev[i].page); 1617 if (test_bit(R5_UPTODATE, &sh->dev[i].flags)) 1618 ptr[count++] = p; 1619 else 1620 printk("compute_block() %d, stripe %llu, %d" 1621 " not present\n", dd_idx, 1622 (unsigned long long)sh->sector, i); 1623 1624 check_xor(); 1625 } 1626 if (count) 1627 xor_blocks(count, STRIPE_SIZE, dest, ptr); 1628 if (!nozero) set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags); 1629 else clear_bit(R5_UPTODATE, &sh->dev[dd_idx].flags); 1630 } 1631 } 1632 1633 /* Compute two missing blocks */ 1634 static void compute_block_2(struct stripe_head *sh, int dd_idx1, int dd_idx2) 1635 { 1636 int i, count, disks = sh->disks; 1637 int pd_idx = sh->pd_idx; 1638 int qd_idx = raid6_next_disk(pd_idx, disks); 1639 int d0_idx = raid6_next_disk(qd_idx, disks); 1640 int faila, failb; 1641 1642 /* faila and failb are disk numbers relative to d0_idx */ 1643 /* pd_idx become disks-2 and qd_idx become disks-1 */ 1644 faila = (dd_idx1 < d0_idx) ? dd_idx1+(disks-d0_idx) : dd_idx1-d0_idx; 1645 failb = (dd_idx2 < d0_idx) ? dd_idx2+(disks-d0_idx) : dd_idx2-d0_idx; 1646 1647 BUG_ON(faila == failb); 1648 if ( failb < faila ) { int tmp = faila; faila = failb; failb = tmp; } 1649 1650 pr_debug("compute_block_2, stripe %llu, idx %d,%d (%d,%d)\n", 1651 (unsigned long long)sh->sector, dd_idx1, dd_idx2, faila, failb); 1652 1653 if ( failb == disks-1 ) { 1654 /* Q disk is one of the missing disks */ 1655 if ( faila == disks-2 ) { 1656 /* Missing P+Q, just recompute */ 1657 compute_parity6(sh, UPDATE_PARITY); 1658 return; 1659 } else { 1660 /* We're missing D+Q; recompute D from P */ 1661 compute_block_1(sh, (dd_idx1 == qd_idx) ? dd_idx2 : dd_idx1, 0); 1662 compute_parity6(sh, UPDATE_PARITY); /* Is this necessary? */ 1663 return; 1664 } 1665 } 1666 1667 /* We're missing D+P or D+D; build pointer table */ 1668 { 1669 /**** FIX THIS: This could be very bad if disks is close to 256 ****/ 1670 void *ptrs[disks]; 1671 1672 count = 0; 1673 i = d0_idx; 1674 do { 1675 ptrs[count++] = page_address(sh->dev[i].page); 1676 i = raid6_next_disk(i, disks); 1677 if (i != dd_idx1 && i != dd_idx2 && 1678 !test_bit(R5_UPTODATE, &sh->dev[i].flags)) 1679 printk("compute_2 with missing block %d/%d\n", count, i); 1680 } while ( i != d0_idx ); 1681 1682 if ( failb == disks-2 ) { 1683 /* We're missing D+P. */ 1684 raid6_datap_recov(disks, STRIPE_SIZE, faila, ptrs); 1685 } else { 1686 /* We're missing D+D. */ 1687 raid6_2data_recov(disks, STRIPE_SIZE, faila, failb, ptrs); 1688 } 1689 1690 /* Both the above update both missing blocks */ 1691 set_bit(R5_UPTODATE, &sh->dev[dd_idx1].flags); 1692 set_bit(R5_UPTODATE, &sh->dev[dd_idx2].flags); 1693 } 1694 } 1695 1696 static int 1697 handle_write_operations5(struct stripe_head *sh, int rcw, int expand) 1698 { 1699 int i, pd_idx = sh->pd_idx, disks = sh->disks; 1700 int locked = 0; 1701 1702 if (rcw) { 1703 /* if we are not expanding this is a proper write request, and 1704 * there will be bios with new data to be drained into the 1705 * stripe cache 1706 */ 1707 if (!expand) { 1708 set_bit(STRIPE_OP_BIODRAIN, &sh->ops.pending); 1709 sh->ops.count++; 1710 } 1711 1712 set_bit(STRIPE_OP_POSTXOR, &sh->ops.pending); 1713 sh->ops.count++; 1714 1715 for (i = disks; i--; ) { 1716 struct r5dev *dev = &sh->dev[i]; 1717 1718 if (dev->towrite) { 1719 set_bit(R5_LOCKED, &dev->flags); 1720 if (!expand) 1721 clear_bit(R5_UPTODATE, &dev->flags); 1722 locked++; 1723 } 1724 } 1725 if (locked + 1 == disks) 1726 if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) 1727 atomic_inc(&sh->raid_conf->pending_full_writes); 1728 } else { 1729 BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) || 1730 test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags))); 1731 1732 set_bit(STRIPE_OP_PREXOR, &sh->ops.pending); 1733 set_bit(STRIPE_OP_BIODRAIN, &sh->ops.pending); 1734 set_bit(STRIPE_OP_POSTXOR, &sh->ops.pending); 1735 1736 sh->ops.count += 3; 1737 1738 for (i = disks; i--; ) { 1739 struct r5dev *dev = &sh->dev[i]; 1740 if (i == pd_idx) 1741 continue; 1742 1743 /* For a read-modify write there may be blocks that are 1744 * locked for reading while others are ready to be 1745 * written so we distinguish these blocks by the 1746 * R5_Wantprexor bit 1747 */ 1748 if (dev->towrite && 1749 (test_bit(R5_UPTODATE, &dev->flags) || 1750 test_bit(R5_Wantcompute, &dev->flags))) { 1751 set_bit(R5_Wantprexor, &dev->flags); 1752 set_bit(R5_LOCKED, &dev->flags); 1753 clear_bit(R5_UPTODATE, &dev->flags); 1754 locked++; 1755 } 1756 } 1757 } 1758 1759 /* keep the parity disk locked while asynchronous operations 1760 * are in flight 1761 */ 1762 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); 1763 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 1764 locked++; 1765 1766 pr_debug("%s: stripe %llu locked: %d pending: %lx\n", 1767 __func__, (unsigned long long)sh->sector, 1768 locked, sh->ops.pending); 1769 1770 return locked; 1771 } 1772 1773 /* 1774 * Each stripe/dev can have one or more bion attached. 1775 * toread/towrite point to the first in a chain. 1776 * The bi_next chain must be in order. 1777 */ 1778 static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite) 1779 { 1780 struct bio **bip; 1781 raid5_conf_t *conf = sh->raid_conf; 1782 int firstwrite=0; 1783 1784 pr_debug("adding bh b#%llu to stripe s#%llu\n", 1785 (unsigned long long)bi->bi_sector, 1786 (unsigned long long)sh->sector); 1787 1788 1789 spin_lock(&sh->lock); 1790 spin_lock_irq(&conf->device_lock); 1791 if (forwrite) { 1792 bip = &sh->dev[dd_idx].towrite; 1793 if (*bip == NULL && sh->dev[dd_idx].written == NULL) 1794 firstwrite = 1; 1795 } else 1796 bip = &sh->dev[dd_idx].toread; 1797 while (*bip && (*bip)->bi_sector < bi->bi_sector) { 1798 if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector) 1799 goto overlap; 1800 bip = & (*bip)->bi_next; 1801 } 1802 if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9)) 1803 goto overlap; 1804 1805 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next); 1806 if (*bip) 1807 bi->bi_next = *bip; 1808 *bip = bi; 1809 bi->bi_phys_segments ++; 1810 spin_unlock_irq(&conf->device_lock); 1811 spin_unlock(&sh->lock); 1812 1813 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n", 1814 (unsigned long long)bi->bi_sector, 1815 (unsigned long long)sh->sector, dd_idx); 1816 1817 if (conf->mddev->bitmap && firstwrite) { 1818 bitmap_startwrite(conf->mddev->bitmap, sh->sector, 1819 STRIPE_SECTORS, 0); 1820 sh->bm_seq = conf->seq_flush+1; 1821 set_bit(STRIPE_BIT_DELAY, &sh->state); 1822 } 1823 1824 if (forwrite) { 1825 /* check if page is covered */ 1826 sector_t sector = sh->dev[dd_idx].sector; 1827 for (bi=sh->dev[dd_idx].towrite; 1828 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS && 1829 bi && bi->bi_sector <= sector; 1830 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) { 1831 if (bi->bi_sector + (bi->bi_size>>9) >= sector) 1832 sector = bi->bi_sector + (bi->bi_size>>9); 1833 } 1834 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS) 1835 set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags); 1836 } 1837 return 1; 1838 1839 overlap: 1840 set_bit(R5_Overlap, &sh->dev[dd_idx].flags); 1841 spin_unlock_irq(&conf->device_lock); 1842 spin_unlock(&sh->lock); 1843 return 0; 1844 } 1845 1846 static void end_reshape(raid5_conf_t *conf); 1847 1848 static int page_is_zero(struct page *p) 1849 { 1850 char *a = page_address(p); 1851 return ((*(u32*)a) == 0 && 1852 memcmp(a, a+4, STRIPE_SIZE-4)==0); 1853 } 1854 1855 static int stripe_to_pdidx(sector_t stripe, raid5_conf_t *conf, int disks) 1856 { 1857 int sectors_per_chunk = conf->chunk_size >> 9; 1858 int pd_idx, dd_idx; 1859 int chunk_offset = sector_div(stripe, sectors_per_chunk); 1860 1861 raid5_compute_sector(stripe * (disks - conf->max_degraded) 1862 *sectors_per_chunk + chunk_offset, 1863 disks, disks - conf->max_degraded, 1864 &dd_idx, &pd_idx, conf); 1865 return pd_idx; 1866 } 1867 1868 static void 1869 handle_requests_to_failed_array(raid5_conf_t *conf, struct stripe_head *sh, 1870 struct stripe_head_state *s, int disks, 1871 struct bio **return_bi) 1872 { 1873 int i; 1874 for (i = disks; i--; ) { 1875 struct bio *bi; 1876 int bitmap_end = 0; 1877 1878 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 1879 mdk_rdev_t *rdev; 1880 rcu_read_lock(); 1881 rdev = rcu_dereference(conf->disks[i].rdev); 1882 if (rdev && test_bit(In_sync, &rdev->flags)) 1883 /* multiple read failures in one stripe */ 1884 md_error(conf->mddev, rdev); 1885 rcu_read_unlock(); 1886 } 1887 spin_lock_irq(&conf->device_lock); 1888 /* fail all writes first */ 1889 bi = sh->dev[i].towrite; 1890 sh->dev[i].towrite = NULL; 1891 if (bi) { 1892 s->to_write--; 1893 bitmap_end = 1; 1894 } 1895 1896 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 1897 wake_up(&conf->wait_for_overlap); 1898 1899 while (bi && bi->bi_sector < 1900 sh->dev[i].sector + STRIPE_SECTORS) { 1901 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); 1902 clear_bit(BIO_UPTODATE, &bi->bi_flags); 1903 if (--bi->bi_phys_segments == 0) { 1904 md_write_end(conf->mddev); 1905 bi->bi_next = *return_bi; 1906 *return_bi = bi; 1907 } 1908 bi = nextbi; 1909 } 1910 /* and fail all 'written' */ 1911 bi = sh->dev[i].written; 1912 sh->dev[i].written = NULL; 1913 if (bi) bitmap_end = 1; 1914 while (bi && bi->bi_sector < 1915 sh->dev[i].sector + STRIPE_SECTORS) { 1916 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); 1917 clear_bit(BIO_UPTODATE, &bi->bi_flags); 1918 if (--bi->bi_phys_segments == 0) { 1919 md_write_end(conf->mddev); 1920 bi->bi_next = *return_bi; 1921 *return_bi = bi; 1922 } 1923 bi = bi2; 1924 } 1925 1926 /* fail any reads if this device is non-operational and 1927 * the data has not reached the cache yet. 1928 */ 1929 if (!test_bit(R5_Wantfill, &sh->dev[i].flags) && 1930 (!test_bit(R5_Insync, &sh->dev[i].flags) || 1931 test_bit(R5_ReadError, &sh->dev[i].flags))) { 1932 bi = sh->dev[i].toread; 1933 sh->dev[i].toread = NULL; 1934 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 1935 wake_up(&conf->wait_for_overlap); 1936 if (bi) s->to_read--; 1937 while (bi && bi->bi_sector < 1938 sh->dev[i].sector + STRIPE_SECTORS) { 1939 struct bio *nextbi = 1940 r5_next_bio(bi, sh->dev[i].sector); 1941 clear_bit(BIO_UPTODATE, &bi->bi_flags); 1942 if (--bi->bi_phys_segments == 0) { 1943 bi->bi_next = *return_bi; 1944 *return_bi = bi; 1945 } 1946 bi = nextbi; 1947 } 1948 } 1949 spin_unlock_irq(&conf->device_lock); 1950 if (bitmap_end) 1951 bitmap_endwrite(conf->mddev->bitmap, sh->sector, 1952 STRIPE_SECTORS, 0, 0); 1953 } 1954 1955 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) 1956 if (atomic_dec_and_test(&conf->pending_full_writes)) 1957 md_wakeup_thread(conf->mddev->thread); 1958 } 1959 1960 /* __handle_issuing_new_read_requests5 - returns 0 if there are no more disks 1961 * to process 1962 */ 1963 static int __handle_issuing_new_read_requests5(struct stripe_head *sh, 1964 struct stripe_head_state *s, int disk_idx, int disks) 1965 { 1966 struct r5dev *dev = &sh->dev[disk_idx]; 1967 struct r5dev *failed_dev = &sh->dev[s->failed_num]; 1968 1969 /* don't schedule compute operations or reads on the parity block while 1970 * a check is in flight 1971 */ 1972 if ((disk_idx == sh->pd_idx) && 1973 test_bit(STRIPE_OP_CHECK, &sh->ops.pending)) 1974 return ~0; 1975 1976 /* is the data in this block needed, and can we get it? */ 1977 if (!test_bit(R5_LOCKED, &dev->flags) && 1978 !test_bit(R5_UPTODATE, &dev->flags) && (dev->toread || 1979 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) || 1980 s->syncing || s->expanding || (s->failed && 1981 (failed_dev->toread || (failed_dev->towrite && 1982 !test_bit(R5_OVERWRITE, &failed_dev->flags) 1983 ))))) { 1984 /* 1/ We would like to get this block, possibly by computing it, 1985 * but we might not be able to. 1986 * 1987 * 2/ Since parity check operations potentially make the parity 1988 * block !uptodate it will need to be refreshed before any 1989 * compute operations on data disks are scheduled. 1990 * 1991 * 3/ We hold off parity block re-reads until check operations 1992 * have quiesced. 1993 */ 1994 if ((s->uptodate == disks - 1) && 1995 !test_bit(STRIPE_OP_CHECK, &sh->ops.pending)) { 1996 set_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending); 1997 set_bit(R5_Wantcompute, &dev->flags); 1998 sh->ops.target = disk_idx; 1999 s->req_compute = 1; 2000 sh->ops.count++; 2001 /* Careful: from this point on 'uptodate' is in the eye 2002 * of raid5_run_ops which services 'compute' operations 2003 * before writes. R5_Wantcompute flags a block that will 2004 * be R5_UPTODATE by the time it is needed for a 2005 * subsequent operation. 2006 */ 2007 s->uptodate++; 2008 return 0; /* uptodate + compute == disks */ 2009 } else if ((s->uptodate < disks - 1) && 2010 test_bit(R5_Insync, &dev->flags)) { 2011 /* Note: we hold off compute operations while checks are 2012 * in flight, but we still prefer 'compute' over 'read' 2013 * hence we only read if (uptodate < * disks-1) 2014 */ 2015 set_bit(R5_LOCKED, &dev->flags); 2016 set_bit(R5_Wantread, &dev->flags); 2017 if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending)) 2018 sh->ops.count++; 2019 s->locked++; 2020 pr_debug("Reading block %d (sync=%d)\n", disk_idx, 2021 s->syncing); 2022 } 2023 } 2024 2025 return ~0; 2026 } 2027 2028 static void handle_issuing_new_read_requests5(struct stripe_head *sh, 2029 struct stripe_head_state *s, int disks) 2030 { 2031 int i; 2032 2033 /* Clear completed compute operations. Parity recovery 2034 * (STRIPE_OP_MOD_REPAIR_PD) implies a write-back which is handled 2035 * later on in this routine 2036 */ 2037 if (test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete) && 2038 !test_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending)) { 2039 clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete); 2040 clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.ack); 2041 clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending); 2042 } 2043 2044 /* look for blocks to read/compute, skip this if a compute 2045 * is already in flight, or if the stripe contents are in the 2046 * midst of changing due to a write 2047 */ 2048 if (!test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending) && 2049 !test_bit(STRIPE_OP_PREXOR, &sh->ops.pending) && 2050 !test_bit(STRIPE_OP_POSTXOR, &sh->ops.pending)) { 2051 for (i = disks; i--; ) 2052 if (__handle_issuing_new_read_requests5( 2053 sh, s, i, disks) == 0) 2054 break; 2055 } 2056 set_bit(STRIPE_HANDLE, &sh->state); 2057 } 2058 2059 static void handle_issuing_new_read_requests6(struct stripe_head *sh, 2060 struct stripe_head_state *s, struct r6_state *r6s, 2061 int disks) 2062 { 2063 int i; 2064 for (i = disks; i--; ) { 2065 struct r5dev *dev = &sh->dev[i]; 2066 if (!test_bit(R5_LOCKED, &dev->flags) && 2067 !test_bit(R5_UPTODATE, &dev->flags) && 2068 (dev->toread || (dev->towrite && 2069 !test_bit(R5_OVERWRITE, &dev->flags)) || 2070 s->syncing || s->expanding || 2071 (s->failed >= 1 && 2072 (sh->dev[r6s->failed_num[0]].toread || 2073 s->to_write)) || 2074 (s->failed >= 2 && 2075 (sh->dev[r6s->failed_num[1]].toread || 2076 s->to_write)))) { 2077 /* we would like to get this block, possibly 2078 * by computing it, but we might not be able to 2079 */ 2080 if (s->uptodate == disks-1) { 2081 pr_debug("Computing stripe %llu block %d\n", 2082 (unsigned long long)sh->sector, i); 2083 compute_block_1(sh, i, 0); 2084 s->uptodate++; 2085 } else if ( s->uptodate == disks-2 && s->failed >= 2 ) { 2086 /* Computing 2-failure is *very* expensive; only 2087 * do it if failed >= 2 2088 */ 2089 int other; 2090 for (other = disks; other--; ) { 2091 if (other == i) 2092 continue; 2093 if (!test_bit(R5_UPTODATE, 2094 &sh->dev[other].flags)) 2095 break; 2096 } 2097 BUG_ON(other < 0); 2098 pr_debug("Computing stripe %llu blocks %d,%d\n", 2099 (unsigned long long)sh->sector, 2100 i, other); 2101 compute_block_2(sh, i, other); 2102 s->uptodate += 2; 2103 } else if (test_bit(R5_Insync, &dev->flags)) { 2104 set_bit(R5_LOCKED, &dev->flags); 2105 set_bit(R5_Wantread, &dev->flags); 2106 s->locked++; 2107 pr_debug("Reading block %d (sync=%d)\n", 2108 i, s->syncing); 2109 } 2110 } 2111 } 2112 set_bit(STRIPE_HANDLE, &sh->state); 2113 } 2114 2115 2116 /* handle_completed_write_requests 2117 * any written block on an uptodate or failed drive can be returned. 2118 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but 2119 * never LOCKED, so we don't need to test 'failed' directly. 2120 */ 2121 static void handle_completed_write_requests(raid5_conf_t *conf, 2122 struct stripe_head *sh, int disks, struct bio **return_bi) 2123 { 2124 int i; 2125 struct r5dev *dev; 2126 2127 for (i = disks; i--; ) 2128 if (sh->dev[i].written) { 2129 dev = &sh->dev[i]; 2130 if (!test_bit(R5_LOCKED, &dev->flags) && 2131 test_bit(R5_UPTODATE, &dev->flags)) { 2132 /* We can return any write requests */ 2133 struct bio *wbi, *wbi2; 2134 int bitmap_end = 0; 2135 pr_debug("Return write for disc %d\n", i); 2136 spin_lock_irq(&conf->device_lock); 2137 wbi = dev->written; 2138 dev->written = NULL; 2139 while (wbi && wbi->bi_sector < 2140 dev->sector + STRIPE_SECTORS) { 2141 wbi2 = r5_next_bio(wbi, dev->sector); 2142 if (--wbi->bi_phys_segments == 0) { 2143 md_write_end(conf->mddev); 2144 wbi->bi_next = *return_bi; 2145 *return_bi = wbi; 2146 } 2147 wbi = wbi2; 2148 } 2149 if (dev->towrite == NULL) 2150 bitmap_end = 1; 2151 spin_unlock_irq(&conf->device_lock); 2152 if (bitmap_end) 2153 bitmap_endwrite(conf->mddev->bitmap, 2154 sh->sector, 2155 STRIPE_SECTORS, 2156 !test_bit(STRIPE_DEGRADED, &sh->state), 2157 0); 2158 } 2159 } 2160 2161 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) 2162 if (atomic_dec_and_test(&conf->pending_full_writes)) 2163 md_wakeup_thread(conf->mddev->thread); 2164 } 2165 2166 static void handle_issuing_new_write_requests5(raid5_conf_t *conf, 2167 struct stripe_head *sh, struct stripe_head_state *s, int disks) 2168 { 2169 int rmw = 0, rcw = 0, i; 2170 for (i = disks; i--; ) { 2171 /* would I have to read this buffer for read_modify_write */ 2172 struct r5dev *dev = &sh->dev[i]; 2173 if ((dev->towrite || i == sh->pd_idx) && 2174 !test_bit(R5_LOCKED, &dev->flags) && 2175 !(test_bit(R5_UPTODATE, &dev->flags) || 2176 test_bit(R5_Wantcompute, &dev->flags))) { 2177 if (test_bit(R5_Insync, &dev->flags)) 2178 rmw++; 2179 else 2180 rmw += 2*disks; /* cannot read it */ 2181 } 2182 /* Would I have to read this buffer for reconstruct_write */ 2183 if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx && 2184 !test_bit(R5_LOCKED, &dev->flags) && 2185 !(test_bit(R5_UPTODATE, &dev->flags) || 2186 test_bit(R5_Wantcompute, &dev->flags))) { 2187 if (test_bit(R5_Insync, &dev->flags)) rcw++; 2188 else 2189 rcw += 2*disks; 2190 } 2191 } 2192 pr_debug("for sector %llu, rmw=%d rcw=%d\n", 2193 (unsigned long long)sh->sector, rmw, rcw); 2194 set_bit(STRIPE_HANDLE, &sh->state); 2195 if (rmw < rcw && rmw > 0) 2196 /* prefer read-modify-write, but need to get some data */ 2197 for (i = disks; i--; ) { 2198 struct r5dev *dev = &sh->dev[i]; 2199 if ((dev->towrite || i == sh->pd_idx) && 2200 !test_bit(R5_LOCKED, &dev->flags) && 2201 !(test_bit(R5_UPTODATE, &dev->flags) || 2202 test_bit(R5_Wantcompute, &dev->flags)) && 2203 test_bit(R5_Insync, &dev->flags)) { 2204 if ( 2205 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 2206 pr_debug("Read_old block " 2207 "%d for r-m-w\n", i); 2208 set_bit(R5_LOCKED, &dev->flags); 2209 set_bit(R5_Wantread, &dev->flags); 2210 if (!test_and_set_bit( 2211 STRIPE_OP_IO, &sh->ops.pending)) 2212 sh->ops.count++; 2213 s->locked++; 2214 } else { 2215 set_bit(STRIPE_DELAYED, &sh->state); 2216 set_bit(STRIPE_HANDLE, &sh->state); 2217 } 2218 } 2219 } 2220 if (rcw <= rmw && rcw > 0) 2221 /* want reconstruct write, but need to get some data */ 2222 for (i = disks; i--; ) { 2223 struct r5dev *dev = &sh->dev[i]; 2224 if (!test_bit(R5_OVERWRITE, &dev->flags) && 2225 i != sh->pd_idx && 2226 !test_bit(R5_LOCKED, &dev->flags) && 2227 !(test_bit(R5_UPTODATE, &dev->flags) || 2228 test_bit(R5_Wantcompute, &dev->flags)) && 2229 test_bit(R5_Insync, &dev->flags)) { 2230 if ( 2231 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 2232 pr_debug("Read_old block " 2233 "%d for Reconstruct\n", i); 2234 set_bit(R5_LOCKED, &dev->flags); 2235 set_bit(R5_Wantread, &dev->flags); 2236 if (!test_and_set_bit( 2237 STRIPE_OP_IO, &sh->ops.pending)) 2238 sh->ops.count++; 2239 s->locked++; 2240 } else { 2241 set_bit(STRIPE_DELAYED, &sh->state); 2242 set_bit(STRIPE_HANDLE, &sh->state); 2243 } 2244 } 2245 } 2246 /* now if nothing is locked, and if we have enough data, 2247 * we can start a write request 2248 */ 2249 /* since handle_stripe can be called at any time we need to handle the 2250 * case where a compute block operation has been submitted and then a 2251 * subsequent call wants to start a write request. raid5_run_ops only 2252 * handles the case where compute block and postxor are requested 2253 * simultaneously. If this is not the case then new writes need to be 2254 * held off until the compute completes. 2255 */ 2256 if ((s->req_compute || 2257 !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending)) && 2258 (s->locked == 0 && (rcw == 0 || rmw == 0) && 2259 !test_bit(STRIPE_BIT_DELAY, &sh->state))) 2260 s->locked += handle_write_operations5(sh, rcw == 0, 0); 2261 } 2262 2263 static void handle_issuing_new_write_requests6(raid5_conf_t *conf, 2264 struct stripe_head *sh, struct stripe_head_state *s, 2265 struct r6_state *r6s, int disks) 2266 { 2267 int rcw = 0, must_compute = 0, pd_idx = sh->pd_idx, i; 2268 int qd_idx = r6s->qd_idx; 2269 for (i = disks; i--; ) { 2270 struct r5dev *dev = &sh->dev[i]; 2271 /* Would I have to read this buffer for reconstruct_write */ 2272 if (!test_bit(R5_OVERWRITE, &dev->flags) 2273 && i != pd_idx && i != qd_idx 2274 && (!test_bit(R5_LOCKED, &dev->flags) 2275 ) && 2276 !test_bit(R5_UPTODATE, &dev->flags)) { 2277 if (test_bit(R5_Insync, &dev->flags)) rcw++; 2278 else { 2279 pr_debug("raid6: must_compute: " 2280 "disk %d flags=%#lx\n", i, dev->flags); 2281 must_compute++; 2282 } 2283 } 2284 } 2285 pr_debug("for sector %llu, rcw=%d, must_compute=%d\n", 2286 (unsigned long long)sh->sector, rcw, must_compute); 2287 set_bit(STRIPE_HANDLE, &sh->state); 2288 2289 if (rcw > 0) 2290 /* want reconstruct write, but need to get some data */ 2291 for (i = disks; i--; ) { 2292 struct r5dev *dev = &sh->dev[i]; 2293 if (!test_bit(R5_OVERWRITE, &dev->flags) 2294 && !(s->failed == 0 && (i == pd_idx || i == qd_idx)) 2295 && !test_bit(R5_LOCKED, &dev->flags) && 2296 !test_bit(R5_UPTODATE, &dev->flags) && 2297 test_bit(R5_Insync, &dev->flags)) { 2298 if ( 2299 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 2300 pr_debug("Read_old stripe %llu " 2301 "block %d for Reconstruct\n", 2302 (unsigned long long)sh->sector, i); 2303 set_bit(R5_LOCKED, &dev->flags); 2304 set_bit(R5_Wantread, &dev->flags); 2305 s->locked++; 2306 } else { 2307 pr_debug("Request delayed stripe %llu " 2308 "block %d for Reconstruct\n", 2309 (unsigned long long)sh->sector, i); 2310 set_bit(STRIPE_DELAYED, &sh->state); 2311 set_bit(STRIPE_HANDLE, &sh->state); 2312 } 2313 } 2314 } 2315 /* now if nothing is locked, and if we have enough data, we can start a 2316 * write request 2317 */ 2318 if (s->locked == 0 && rcw == 0 && 2319 !test_bit(STRIPE_BIT_DELAY, &sh->state)) { 2320 if (must_compute > 0) { 2321 /* We have failed blocks and need to compute them */ 2322 switch (s->failed) { 2323 case 0: 2324 BUG(); 2325 case 1: 2326 compute_block_1(sh, r6s->failed_num[0], 0); 2327 break; 2328 case 2: 2329 compute_block_2(sh, r6s->failed_num[0], 2330 r6s->failed_num[1]); 2331 break; 2332 default: /* This request should have been failed? */ 2333 BUG(); 2334 } 2335 } 2336 2337 pr_debug("Computing parity for stripe %llu\n", 2338 (unsigned long long)sh->sector); 2339 compute_parity6(sh, RECONSTRUCT_WRITE); 2340 /* now every locked buffer is ready to be written */ 2341 for (i = disks; i--; ) 2342 if (test_bit(R5_LOCKED, &sh->dev[i].flags)) { 2343 pr_debug("Writing stripe %llu block %d\n", 2344 (unsigned long long)sh->sector, i); 2345 s->locked++; 2346 set_bit(R5_Wantwrite, &sh->dev[i].flags); 2347 } 2348 if (s->locked == disks) 2349 if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) 2350 atomic_inc(&conf->pending_full_writes); 2351 /* after a RECONSTRUCT_WRITE, the stripe MUST be in-sync */ 2352 set_bit(STRIPE_INSYNC, &sh->state); 2353 2354 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 2355 atomic_dec(&conf->preread_active_stripes); 2356 if (atomic_read(&conf->preread_active_stripes) < 2357 IO_THRESHOLD) 2358 md_wakeup_thread(conf->mddev->thread); 2359 } 2360 } 2361 } 2362 2363 static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh, 2364 struct stripe_head_state *s, int disks) 2365 { 2366 int canceled_check = 0; 2367 2368 set_bit(STRIPE_HANDLE, &sh->state); 2369 2370 /* complete a check operation */ 2371 if (test_and_clear_bit(STRIPE_OP_CHECK, &sh->ops.complete)) { 2372 clear_bit(STRIPE_OP_CHECK, &sh->ops.ack); 2373 clear_bit(STRIPE_OP_CHECK, &sh->ops.pending); 2374 if (s->failed == 0) { 2375 if (sh->ops.zero_sum_result == 0) 2376 /* parity is correct (on disc, 2377 * not in buffer any more) 2378 */ 2379 set_bit(STRIPE_INSYNC, &sh->state); 2380 else { 2381 conf->mddev->resync_mismatches += 2382 STRIPE_SECTORS; 2383 if (test_bit( 2384 MD_RECOVERY_CHECK, &conf->mddev->recovery)) 2385 /* don't try to repair!! */ 2386 set_bit(STRIPE_INSYNC, &sh->state); 2387 else { 2388 set_bit(STRIPE_OP_COMPUTE_BLK, 2389 &sh->ops.pending); 2390 set_bit(STRIPE_OP_MOD_REPAIR_PD, 2391 &sh->ops.pending); 2392 set_bit(R5_Wantcompute, 2393 &sh->dev[sh->pd_idx].flags); 2394 sh->ops.target = sh->pd_idx; 2395 sh->ops.count++; 2396 s->uptodate++; 2397 } 2398 } 2399 } else 2400 canceled_check = 1; /* STRIPE_INSYNC is not set */ 2401 } 2402 2403 /* check if we can clear a parity disk reconstruct */ 2404 if (test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete) && 2405 test_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending)) { 2406 2407 clear_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending); 2408 clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete); 2409 clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.ack); 2410 clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending); 2411 } 2412 2413 /* start a new check operation if there are no failures, the stripe is 2414 * not insync, and a repair is not in flight 2415 */ 2416 if (s->failed == 0 && 2417 !test_bit(STRIPE_INSYNC, &sh->state) && 2418 !test_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending)) { 2419 if (!test_and_set_bit(STRIPE_OP_CHECK, &sh->ops.pending)) { 2420 BUG_ON(s->uptodate != disks); 2421 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); 2422 sh->ops.count++; 2423 s->uptodate--; 2424 } 2425 } 2426 2427 /* Wait for check parity and compute block operations to complete 2428 * before write-back. If a failure occurred while the check operation 2429 * was in flight we need to cycle this stripe through handle_stripe 2430 * since the parity block may not be uptodate 2431 */ 2432 if (!canceled_check && !test_bit(STRIPE_INSYNC, &sh->state) && 2433 !test_bit(STRIPE_OP_CHECK, &sh->ops.pending) && 2434 !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending)) { 2435 struct r5dev *dev; 2436 /* either failed parity check, or recovery is happening */ 2437 if (s->failed == 0) 2438 s->failed_num = sh->pd_idx; 2439 dev = &sh->dev[s->failed_num]; 2440 BUG_ON(!test_bit(R5_UPTODATE, &dev->flags)); 2441 BUG_ON(s->uptodate != disks); 2442 2443 set_bit(R5_LOCKED, &dev->flags); 2444 set_bit(R5_Wantwrite, &dev->flags); 2445 if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending)) 2446 sh->ops.count++; 2447 2448 clear_bit(STRIPE_DEGRADED, &sh->state); 2449 s->locked++; 2450 set_bit(STRIPE_INSYNC, &sh->state); 2451 } 2452 } 2453 2454 2455 static void handle_parity_checks6(raid5_conf_t *conf, struct stripe_head *sh, 2456 struct stripe_head_state *s, 2457 struct r6_state *r6s, struct page *tmp_page, 2458 int disks) 2459 { 2460 int update_p = 0, update_q = 0; 2461 struct r5dev *dev; 2462 int pd_idx = sh->pd_idx; 2463 int qd_idx = r6s->qd_idx; 2464 2465 set_bit(STRIPE_HANDLE, &sh->state); 2466 2467 BUG_ON(s->failed > 2); 2468 BUG_ON(s->uptodate < disks); 2469 /* Want to check and possibly repair P and Q. 2470 * However there could be one 'failed' device, in which 2471 * case we can only check one of them, possibly using the 2472 * other to generate missing data 2473 */ 2474 2475 /* If !tmp_page, we cannot do the calculations, 2476 * but as we have set STRIPE_HANDLE, we will soon be called 2477 * by stripe_handle with a tmp_page - just wait until then. 2478 */ 2479 if (tmp_page) { 2480 if (s->failed == r6s->q_failed) { 2481 /* The only possible failed device holds 'Q', so it 2482 * makes sense to check P (If anything else were failed, 2483 * we would have used P to recreate it). 2484 */ 2485 compute_block_1(sh, pd_idx, 1); 2486 if (!page_is_zero(sh->dev[pd_idx].page)) { 2487 compute_block_1(sh, pd_idx, 0); 2488 update_p = 1; 2489 } 2490 } 2491 if (!r6s->q_failed && s->failed < 2) { 2492 /* q is not failed, and we didn't use it to generate 2493 * anything, so it makes sense to check it 2494 */ 2495 memcpy(page_address(tmp_page), 2496 page_address(sh->dev[qd_idx].page), 2497 STRIPE_SIZE); 2498 compute_parity6(sh, UPDATE_PARITY); 2499 if (memcmp(page_address(tmp_page), 2500 page_address(sh->dev[qd_idx].page), 2501 STRIPE_SIZE) != 0) { 2502 clear_bit(STRIPE_INSYNC, &sh->state); 2503 update_q = 1; 2504 } 2505 } 2506 if (update_p || update_q) { 2507 conf->mddev->resync_mismatches += STRIPE_SECTORS; 2508 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) 2509 /* don't try to repair!! */ 2510 update_p = update_q = 0; 2511 } 2512 2513 /* now write out any block on a failed drive, 2514 * or P or Q if they need it 2515 */ 2516 2517 if (s->failed == 2) { 2518 dev = &sh->dev[r6s->failed_num[1]]; 2519 s->locked++; 2520 set_bit(R5_LOCKED, &dev->flags); 2521 set_bit(R5_Wantwrite, &dev->flags); 2522 } 2523 if (s->failed >= 1) { 2524 dev = &sh->dev[r6s->failed_num[0]]; 2525 s->locked++; 2526 set_bit(R5_LOCKED, &dev->flags); 2527 set_bit(R5_Wantwrite, &dev->flags); 2528 } 2529 2530 if (update_p) { 2531 dev = &sh->dev[pd_idx]; 2532 s->locked++; 2533 set_bit(R5_LOCKED, &dev->flags); 2534 set_bit(R5_Wantwrite, &dev->flags); 2535 } 2536 if (update_q) { 2537 dev = &sh->dev[qd_idx]; 2538 s->locked++; 2539 set_bit(R5_LOCKED, &dev->flags); 2540 set_bit(R5_Wantwrite, &dev->flags); 2541 } 2542 clear_bit(STRIPE_DEGRADED, &sh->state); 2543 2544 set_bit(STRIPE_INSYNC, &sh->state); 2545 } 2546 } 2547 2548 static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh, 2549 struct r6_state *r6s) 2550 { 2551 int i; 2552 2553 /* We have read all the blocks in this stripe and now we need to 2554 * copy some of them into a target stripe for expand. 2555 */ 2556 struct dma_async_tx_descriptor *tx = NULL; 2557 clear_bit(STRIPE_EXPAND_SOURCE, &sh->state); 2558 for (i = 0; i < sh->disks; i++) 2559 if (i != sh->pd_idx && (!r6s || i != r6s->qd_idx)) { 2560 int dd_idx, pd_idx, j; 2561 struct stripe_head *sh2; 2562 2563 sector_t bn = compute_blocknr(sh, i); 2564 sector_t s = raid5_compute_sector(bn, conf->raid_disks, 2565 conf->raid_disks - 2566 conf->max_degraded, &dd_idx, 2567 &pd_idx, conf); 2568 sh2 = get_active_stripe(conf, s, conf->raid_disks, 2569 pd_idx, 1); 2570 if (sh2 == NULL) 2571 /* so far only the early blocks of this stripe 2572 * have been requested. When later blocks 2573 * get requested, we will try again 2574 */ 2575 continue; 2576 if (!test_bit(STRIPE_EXPANDING, &sh2->state) || 2577 test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) { 2578 /* must have already done this block */ 2579 release_stripe(sh2); 2580 continue; 2581 } 2582 2583 /* place all the copies on one channel */ 2584 tx = async_memcpy(sh2->dev[dd_idx].page, 2585 sh->dev[i].page, 0, 0, STRIPE_SIZE, 2586 ASYNC_TX_DEP_ACK, tx, NULL, NULL); 2587 2588 set_bit(R5_Expanded, &sh2->dev[dd_idx].flags); 2589 set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags); 2590 for (j = 0; j < conf->raid_disks; j++) 2591 if (j != sh2->pd_idx && 2592 (!r6s || j != raid6_next_disk(sh2->pd_idx, 2593 sh2->disks)) && 2594 !test_bit(R5_Expanded, &sh2->dev[j].flags)) 2595 break; 2596 if (j == conf->raid_disks) { 2597 set_bit(STRIPE_EXPAND_READY, &sh2->state); 2598 set_bit(STRIPE_HANDLE, &sh2->state); 2599 } 2600 release_stripe(sh2); 2601 2602 } 2603 /* done submitting copies, wait for them to complete */ 2604 if (tx) { 2605 async_tx_ack(tx); 2606 dma_wait_for_async_tx(tx); 2607 } 2608 } 2609 2610 2611 /* 2612 * handle_stripe - do things to a stripe. 2613 * 2614 * We lock the stripe and then examine the state of various bits 2615 * to see what needs to be done. 2616 * Possible results: 2617 * return some read request which now have data 2618 * return some write requests which are safely on disc 2619 * schedule a read on some buffers 2620 * schedule a write of some buffers 2621 * return confirmation of parity correctness 2622 * 2623 * buffers are taken off read_list or write_list, and bh_cache buffers 2624 * get BH_Lock set before the stripe lock is released. 2625 * 2626 */ 2627 2628 static void handle_stripe5(struct stripe_head *sh) 2629 { 2630 raid5_conf_t *conf = sh->raid_conf; 2631 int disks = sh->disks, i; 2632 struct bio *return_bi = NULL; 2633 struct stripe_head_state s; 2634 struct r5dev *dev; 2635 unsigned long pending = 0; 2636 mdk_rdev_t *blocked_rdev = NULL; 2637 2638 memset(&s, 0, sizeof(s)); 2639 pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d " 2640 "ops=%lx:%lx:%lx\n", (unsigned long long)sh->sector, sh->state, 2641 atomic_read(&sh->count), sh->pd_idx, 2642 sh->ops.pending, sh->ops.ack, sh->ops.complete); 2643 2644 spin_lock(&sh->lock); 2645 clear_bit(STRIPE_HANDLE, &sh->state); 2646 clear_bit(STRIPE_DELAYED, &sh->state); 2647 2648 s.syncing = test_bit(STRIPE_SYNCING, &sh->state); 2649 s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state); 2650 s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state); 2651 /* Now to look around and see what can be done */ 2652 2653 /* clean-up completed biofill operations */ 2654 if (test_bit(STRIPE_OP_BIOFILL, &sh->ops.complete)) { 2655 clear_bit(STRIPE_OP_BIOFILL, &sh->ops.pending); 2656 clear_bit(STRIPE_OP_BIOFILL, &sh->ops.ack); 2657 clear_bit(STRIPE_OP_BIOFILL, &sh->ops.complete); 2658 } 2659 2660 rcu_read_lock(); 2661 for (i=disks; i--; ) { 2662 mdk_rdev_t *rdev; 2663 struct r5dev *dev = &sh->dev[i]; 2664 clear_bit(R5_Insync, &dev->flags); 2665 2666 pr_debug("check %d: state 0x%lx toread %p read %p write %p " 2667 "written %p\n", i, dev->flags, dev->toread, dev->read, 2668 dev->towrite, dev->written); 2669 2670 /* maybe we can request a biofill operation 2671 * 2672 * new wantfill requests are only permitted while 2673 * STRIPE_OP_BIOFILL is clear 2674 */ 2675 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread && 2676 !test_bit(STRIPE_OP_BIOFILL, &sh->ops.pending)) 2677 set_bit(R5_Wantfill, &dev->flags); 2678 2679 /* now count some things */ 2680 if (test_bit(R5_LOCKED, &dev->flags)) s.locked++; 2681 if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++; 2682 if (test_bit(R5_Wantcompute, &dev->flags)) s.compute++; 2683 2684 if (test_bit(R5_Wantfill, &dev->flags)) 2685 s.to_fill++; 2686 else if (dev->toread) 2687 s.to_read++; 2688 if (dev->towrite) { 2689 s.to_write++; 2690 if (!test_bit(R5_OVERWRITE, &dev->flags)) 2691 s.non_overwrite++; 2692 } 2693 if (dev->written) 2694 s.written++; 2695 rdev = rcu_dereference(conf->disks[i].rdev); 2696 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) { 2697 blocked_rdev = rdev; 2698 atomic_inc(&rdev->nr_pending); 2699 break; 2700 } 2701 if (!rdev || !test_bit(In_sync, &rdev->flags)) { 2702 /* The ReadError flag will just be confusing now */ 2703 clear_bit(R5_ReadError, &dev->flags); 2704 clear_bit(R5_ReWrite, &dev->flags); 2705 } 2706 if (!rdev || !test_bit(In_sync, &rdev->flags) 2707 || test_bit(R5_ReadError, &dev->flags)) { 2708 s.failed++; 2709 s.failed_num = i; 2710 } else 2711 set_bit(R5_Insync, &dev->flags); 2712 } 2713 rcu_read_unlock(); 2714 2715 if (unlikely(blocked_rdev)) { 2716 set_bit(STRIPE_HANDLE, &sh->state); 2717 goto unlock; 2718 } 2719 2720 if (s.to_fill && !test_and_set_bit(STRIPE_OP_BIOFILL, &sh->ops.pending)) 2721 sh->ops.count++; 2722 2723 pr_debug("locked=%d uptodate=%d to_read=%d" 2724 " to_write=%d failed=%d failed_num=%d\n", 2725 s.locked, s.uptodate, s.to_read, s.to_write, 2726 s.failed, s.failed_num); 2727 /* check if the array has lost two devices and, if so, some requests might 2728 * need to be failed 2729 */ 2730 if (s.failed > 1 && s.to_read+s.to_write+s.written) 2731 handle_requests_to_failed_array(conf, sh, &s, disks, 2732 &return_bi); 2733 if (s.failed > 1 && s.syncing) { 2734 md_done_sync(conf->mddev, STRIPE_SECTORS,0); 2735 clear_bit(STRIPE_SYNCING, &sh->state); 2736 s.syncing = 0; 2737 } 2738 2739 /* might be able to return some write requests if the parity block 2740 * is safe, or on a failed drive 2741 */ 2742 dev = &sh->dev[sh->pd_idx]; 2743 if ( s.written && 2744 ((test_bit(R5_Insync, &dev->flags) && 2745 !test_bit(R5_LOCKED, &dev->flags) && 2746 test_bit(R5_UPTODATE, &dev->flags)) || 2747 (s.failed == 1 && s.failed_num == sh->pd_idx))) 2748 handle_completed_write_requests(conf, sh, disks, &return_bi); 2749 2750 /* Now we might consider reading some blocks, either to check/generate 2751 * parity, or to satisfy requests 2752 * or to load a block that is being partially written. 2753 */ 2754 if (s.to_read || s.non_overwrite || 2755 (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding || 2756 test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending)) 2757 handle_issuing_new_read_requests5(sh, &s, disks); 2758 2759 /* Now we check to see if any write operations have recently 2760 * completed 2761 */ 2762 2763 /* leave prexor set until postxor is done, allows us to distinguish 2764 * a rmw from a rcw during biodrain 2765 */ 2766 if (test_bit(STRIPE_OP_PREXOR, &sh->ops.complete) && 2767 test_bit(STRIPE_OP_POSTXOR, &sh->ops.complete)) { 2768 2769 clear_bit(STRIPE_OP_PREXOR, &sh->ops.complete); 2770 clear_bit(STRIPE_OP_PREXOR, &sh->ops.ack); 2771 clear_bit(STRIPE_OP_PREXOR, &sh->ops.pending); 2772 2773 for (i = disks; i--; ) 2774 clear_bit(R5_Wantprexor, &sh->dev[i].flags); 2775 } 2776 2777 /* if only POSTXOR is set then this is an 'expand' postxor */ 2778 if (test_bit(STRIPE_OP_BIODRAIN, &sh->ops.complete) && 2779 test_bit(STRIPE_OP_POSTXOR, &sh->ops.complete)) { 2780 2781 clear_bit(STRIPE_OP_BIODRAIN, &sh->ops.complete); 2782 clear_bit(STRIPE_OP_BIODRAIN, &sh->ops.ack); 2783 clear_bit(STRIPE_OP_BIODRAIN, &sh->ops.pending); 2784 2785 clear_bit(STRIPE_OP_POSTXOR, &sh->ops.complete); 2786 clear_bit(STRIPE_OP_POSTXOR, &sh->ops.ack); 2787 clear_bit(STRIPE_OP_POSTXOR, &sh->ops.pending); 2788 2789 /* All the 'written' buffers and the parity block are ready to 2790 * be written back to disk 2791 */ 2792 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags)); 2793 for (i = disks; i--; ) { 2794 dev = &sh->dev[i]; 2795 if (test_bit(R5_LOCKED, &dev->flags) && 2796 (i == sh->pd_idx || dev->written)) { 2797 pr_debug("Writing block %d\n", i); 2798 set_bit(R5_Wantwrite, &dev->flags); 2799 if (!test_and_set_bit( 2800 STRIPE_OP_IO, &sh->ops.pending)) 2801 sh->ops.count++; 2802 if (!test_bit(R5_Insync, &dev->flags) || 2803 (i == sh->pd_idx && s.failed == 0)) 2804 set_bit(STRIPE_INSYNC, &sh->state); 2805 } 2806 } 2807 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 2808 atomic_dec(&conf->preread_active_stripes); 2809 if (atomic_read(&conf->preread_active_stripes) < 2810 IO_THRESHOLD) 2811 md_wakeup_thread(conf->mddev->thread); 2812 } 2813 } 2814 2815 /* Now to consider new write requests and what else, if anything 2816 * should be read. We do not handle new writes when: 2817 * 1/ A 'write' operation (copy+xor) is already in flight. 2818 * 2/ A 'check' operation is in flight, as it may clobber the parity 2819 * block. 2820 */ 2821 if (s.to_write && !test_bit(STRIPE_OP_POSTXOR, &sh->ops.pending) && 2822 !test_bit(STRIPE_OP_CHECK, &sh->ops.pending)) 2823 handle_issuing_new_write_requests5(conf, sh, &s, disks); 2824 2825 /* maybe we need to check and possibly fix the parity for this stripe 2826 * Any reads will already have been scheduled, so we just see if enough 2827 * data is available. The parity check is held off while parity 2828 * dependent operations are in flight. 2829 */ 2830 if ((s.syncing && s.locked == 0 && 2831 !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending) && 2832 !test_bit(STRIPE_INSYNC, &sh->state)) || 2833 test_bit(STRIPE_OP_CHECK, &sh->ops.pending) || 2834 test_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending)) 2835 handle_parity_checks5(conf, sh, &s, disks); 2836 2837 if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) { 2838 md_done_sync(conf->mddev, STRIPE_SECTORS,1); 2839 clear_bit(STRIPE_SYNCING, &sh->state); 2840 } 2841 2842 /* If the failed drive is just a ReadError, then we might need to progress 2843 * the repair/check process 2844 */ 2845 if (s.failed == 1 && !conf->mddev->ro && 2846 test_bit(R5_ReadError, &sh->dev[s.failed_num].flags) 2847 && !test_bit(R5_LOCKED, &sh->dev[s.failed_num].flags) 2848 && test_bit(R5_UPTODATE, &sh->dev[s.failed_num].flags) 2849 ) { 2850 dev = &sh->dev[s.failed_num]; 2851 if (!test_bit(R5_ReWrite, &dev->flags)) { 2852 set_bit(R5_Wantwrite, &dev->flags); 2853 if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending)) 2854 sh->ops.count++; 2855 set_bit(R5_ReWrite, &dev->flags); 2856 set_bit(R5_LOCKED, &dev->flags); 2857 s.locked++; 2858 } else { 2859 /* let's read it back */ 2860 set_bit(R5_Wantread, &dev->flags); 2861 if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending)) 2862 sh->ops.count++; 2863 set_bit(R5_LOCKED, &dev->flags); 2864 s.locked++; 2865 } 2866 } 2867 2868 /* Finish postxor operations initiated by the expansion 2869 * process 2870 */ 2871 if (test_bit(STRIPE_OP_POSTXOR, &sh->ops.complete) && 2872 !test_bit(STRIPE_OP_BIODRAIN, &sh->ops.pending)) { 2873 2874 clear_bit(STRIPE_EXPANDING, &sh->state); 2875 2876 clear_bit(STRIPE_OP_POSTXOR, &sh->ops.pending); 2877 clear_bit(STRIPE_OP_POSTXOR, &sh->ops.ack); 2878 clear_bit(STRIPE_OP_POSTXOR, &sh->ops.complete); 2879 2880 for (i = conf->raid_disks; i--; ) { 2881 set_bit(R5_Wantwrite, &sh->dev[i].flags); 2882 if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending)) 2883 sh->ops.count++; 2884 } 2885 } 2886 2887 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) && 2888 !test_bit(STRIPE_OP_POSTXOR, &sh->ops.pending)) { 2889 /* Need to write out all blocks after computing parity */ 2890 sh->disks = conf->raid_disks; 2891 sh->pd_idx = stripe_to_pdidx(sh->sector, conf, 2892 conf->raid_disks); 2893 s.locked += handle_write_operations5(sh, 1, 1); 2894 } else if (s.expanded && 2895 !test_bit(STRIPE_OP_POSTXOR, &sh->ops.pending)) { 2896 clear_bit(STRIPE_EXPAND_READY, &sh->state); 2897 atomic_dec(&conf->reshape_stripes); 2898 wake_up(&conf->wait_for_overlap); 2899 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); 2900 } 2901 2902 if (s.expanding && s.locked == 0 && 2903 !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending)) 2904 handle_stripe_expansion(conf, sh, NULL); 2905 2906 if (sh->ops.count) 2907 pending = get_stripe_work(sh); 2908 2909 unlock: 2910 spin_unlock(&sh->lock); 2911 2912 /* wait for this device to become unblocked */ 2913 if (unlikely(blocked_rdev)) 2914 md_wait_for_blocked_rdev(blocked_rdev, conf->mddev); 2915 2916 if (pending) 2917 raid5_run_ops(sh, pending); 2918 2919 return_io(return_bi); 2920 2921 } 2922 2923 static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page) 2924 { 2925 raid6_conf_t *conf = sh->raid_conf; 2926 int disks = sh->disks; 2927 struct bio *return_bi = NULL; 2928 int i, pd_idx = sh->pd_idx; 2929 struct stripe_head_state s; 2930 struct r6_state r6s; 2931 struct r5dev *dev, *pdev, *qdev; 2932 mdk_rdev_t *blocked_rdev = NULL; 2933 2934 r6s.qd_idx = raid6_next_disk(pd_idx, disks); 2935 pr_debug("handling stripe %llu, state=%#lx cnt=%d, " 2936 "pd_idx=%d, qd_idx=%d\n", 2937 (unsigned long long)sh->sector, sh->state, 2938 atomic_read(&sh->count), pd_idx, r6s.qd_idx); 2939 memset(&s, 0, sizeof(s)); 2940 2941 spin_lock(&sh->lock); 2942 clear_bit(STRIPE_HANDLE, &sh->state); 2943 clear_bit(STRIPE_DELAYED, &sh->state); 2944 2945 s.syncing = test_bit(STRIPE_SYNCING, &sh->state); 2946 s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state); 2947 s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state); 2948 /* Now to look around and see what can be done */ 2949 2950 rcu_read_lock(); 2951 for (i=disks; i--; ) { 2952 mdk_rdev_t *rdev; 2953 dev = &sh->dev[i]; 2954 clear_bit(R5_Insync, &dev->flags); 2955 2956 pr_debug("check %d: state 0x%lx read %p write %p written %p\n", 2957 i, dev->flags, dev->toread, dev->towrite, dev->written); 2958 /* maybe we can reply to a read */ 2959 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread) { 2960 struct bio *rbi, *rbi2; 2961 pr_debug("Return read for disc %d\n", i); 2962 spin_lock_irq(&conf->device_lock); 2963 rbi = dev->toread; 2964 dev->toread = NULL; 2965 if (test_and_clear_bit(R5_Overlap, &dev->flags)) 2966 wake_up(&conf->wait_for_overlap); 2967 spin_unlock_irq(&conf->device_lock); 2968 while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) { 2969 copy_data(0, rbi, dev->page, dev->sector); 2970 rbi2 = r5_next_bio(rbi, dev->sector); 2971 spin_lock_irq(&conf->device_lock); 2972 if (--rbi->bi_phys_segments == 0) { 2973 rbi->bi_next = return_bi; 2974 return_bi = rbi; 2975 } 2976 spin_unlock_irq(&conf->device_lock); 2977 rbi = rbi2; 2978 } 2979 } 2980 2981 /* now count some things */ 2982 if (test_bit(R5_LOCKED, &dev->flags)) s.locked++; 2983 if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++; 2984 2985 2986 if (dev->toread) 2987 s.to_read++; 2988 if (dev->towrite) { 2989 s.to_write++; 2990 if (!test_bit(R5_OVERWRITE, &dev->flags)) 2991 s.non_overwrite++; 2992 } 2993 if (dev->written) 2994 s.written++; 2995 rdev = rcu_dereference(conf->disks[i].rdev); 2996 if (rdev && unlikely(test_bit(Blocked, &rdev->flags))) { 2997 blocked_rdev = rdev; 2998 atomic_inc(&rdev->nr_pending); 2999 break; 3000 } 3001 if (!rdev || !test_bit(In_sync, &rdev->flags)) { 3002 /* The ReadError flag will just be confusing now */ 3003 clear_bit(R5_ReadError, &dev->flags); 3004 clear_bit(R5_ReWrite, &dev->flags); 3005 } 3006 if (!rdev || !test_bit(In_sync, &rdev->flags) 3007 || test_bit(R5_ReadError, &dev->flags)) { 3008 if (s.failed < 2) 3009 r6s.failed_num[s.failed] = i; 3010 s.failed++; 3011 } else 3012 set_bit(R5_Insync, &dev->flags); 3013 } 3014 rcu_read_unlock(); 3015 3016 if (unlikely(blocked_rdev)) { 3017 set_bit(STRIPE_HANDLE, &sh->state); 3018 goto unlock; 3019 } 3020 pr_debug("locked=%d uptodate=%d to_read=%d" 3021 " to_write=%d failed=%d failed_num=%d,%d\n", 3022 s.locked, s.uptodate, s.to_read, s.to_write, s.failed, 3023 r6s.failed_num[0], r6s.failed_num[1]); 3024 /* check if the array has lost >2 devices and, if so, some requests 3025 * might need to be failed 3026 */ 3027 if (s.failed > 2 && s.to_read+s.to_write+s.written) 3028 handle_requests_to_failed_array(conf, sh, &s, disks, 3029 &return_bi); 3030 if (s.failed > 2 && s.syncing) { 3031 md_done_sync(conf->mddev, STRIPE_SECTORS,0); 3032 clear_bit(STRIPE_SYNCING, &sh->state); 3033 s.syncing = 0; 3034 } 3035 3036 /* 3037 * might be able to return some write requests if the parity blocks 3038 * are safe, or on a failed drive 3039 */ 3040 pdev = &sh->dev[pd_idx]; 3041 r6s.p_failed = (s.failed >= 1 && r6s.failed_num[0] == pd_idx) 3042 || (s.failed >= 2 && r6s.failed_num[1] == pd_idx); 3043 qdev = &sh->dev[r6s.qd_idx]; 3044 r6s.q_failed = (s.failed >= 1 && r6s.failed_num[0] == r6s.qd_idx) 3045 || (s.failed >= 2 && r6s.failed_num[1] == r6s.qd_idx); 3046 3047 if ( s.written && 3048 ( r6s.p_failed || ((test_bit(R5_Insync, &pdev->flags) 3049 && !test_bit(R5_LOCKED, &pdev->flags) 3050 && test_bit(R5_UPTODATE, &pdev->flags)))) && 3051 ( r6s.q_failed || ((test_bit(R5_Insync, &qdev->flags) 3052 && !test_bit(R5_LOCKED, &qdev->flags) 3053 && test_bit(R5_UPTODATE, &qdev->flags))))) 3054 handle_completed_write_requests(conf, sh, disks, &return_bi); 3055 3056 /* Now we might consider reading some blocks, either to check/generate 3057 * parity, or to satisfy requests 3058 * or to load a block that is being partially written. 3059 */ 3060 if (s.to_read || s.non_overwrite || (s.to_write && s.failed) || 3061 (s.syncing && (s.uptodate < disks)) || s.expanding) 3062 handle_issuing_new_read_requests6(sh, &s, &r6s, disks); 3063 3064 /* now to consider writing and what else, if anything should be read */ 3065 if (s.to_write) 3066 handle_issuing_new_write_requests6(conf, sh, &s, &r6s, disks); 3067 3068 /* maybe we need to check and possibly fix the parity for this stripe 3069 * Any reads will already have been scheduled, so we just see if enough 3070 * data is available 3071 */ 3072 if (s.syncing && s.locked == 0 && !test_bit(STRIPE_INSYNC, &sh->state)) 3073 handle_parity_checks6(conf, sh, &s, &r6s, tmp_page, disks); 3074 3075 if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) { 3076 md_done_sync(conf->mddev, STRIPE_SECTORS,1); 3077 clear_bit(STRIPE_SYNCING, &sh->state); 3078 } 3079 3080 /* If the failed drives are just a ReadError, then we might need 3081 * to progress the repair/check process 3082 */ 3083 if (s.failed <= 2 && !conf->mddev->ro) 3084 for (i = 0; i < s.failed; i++) { 3085 dev = &sh->dev[r6s.failed_num[i]]; 3086 if (test_bit(R5_ReadError, &dev->flags) 3087 && !test_bit(R5_LOCKED, &dev->flags) 3088 && test_bit(R5_UPTODATE, &dev->flags) 3089 ) { 3090 if (!test_bit(R5_ReWrite, &dev->flags)) { 3091 set_bit(R5_Wantwrite, &dev->flags); 3092 set_bit(R5_ReWrite, &dev->flags); 3093 set_bit(R5_LOCKED, &dev->flags); 3094 } else { 3095 /* let's read it back */ 3096 set_bit(R5_Wantread, &dev->flags); 3097 set_bit(R5_LOCKED, &dev->flags); 3098 } 3099 } 3100 } 3101 3102 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state)) { 3103 /* Need to write out all blocks after computing P&Q */ 3104 sh->disks = conf->raid_disks; 3105 sh->pd_idx = stripe_to_pdidx(sh->sector, conf, 3106 conf->raid_disks); 3107 compute_parity6(sh, RECONSTRUCT_WRITE); 3108 for (i = conf->raid_disks ; i-- ; ) { 3109 set_bit(R5_LOCKED, &sh->dev[i].flags); 3110 s.locked++; 3111 set_bit(R5_Wantwrite, &sh->dev[i].flags); 3112 } 3113 clear_bit(STRIPE_EXPANDING, &sh->state); 3114 } else if (s.expanded) { 3115 clear_bit(STRIPE_EXPAND_READY, &sh->state); 3116 atomic_dec(&conf->reshape_stripes); 3117 wake_up(&conf->wait_for_overlap); 3118 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); 3119 } 3120 3121 if (s.expanding && s.locked == 0 && 3122 !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending)) 3123 handle_stripe_expansion(conf, sh, &r6s); 3124 3125 unlock: 3126 spin_unlock(&sh->lock); 3127 3128 /* wait for this device to become unblocked */ 3129 if (unlikely(blocked_rdev)) 3130 md_wait_for_blocked_rdev(blocked_rdev, conf->mddev); 3131 3132 return_io(return_bi); 3133 3134 for (i=disks; i-- ;) { 3135 int rw; 3136 struct bio *bi; 3137 mdk_rdev_t *rdev; 3138 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) 3139 rw = WRITE; 3140 else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) 3141 rw = READ; 3142 else 3143 continue; 3144 3145 set_bit(STRIPE_IO_STARTED, &sh->state); 3146 3147 bi = &sh->dev[i].req; 3148 3149 bi->bi_rw = rw; 3150 if (rw == WRITE) 3151 bi->bi_end_io = raid5_end_write_request; 3152 else 3153 bi->bi_end_io = raid5_end_read_request; 3154 3155 rcu_read_lock(); 3156 rdev = rcu_dereference(conf->disks[i].rdev); 3157 if (rdev && test_bit(Faulty, &rdev->flags)) 3158 rdev = NULL; 3159 if (rdev) 3160 atomic_inc(&rdev->nr_pending); 3161 rcu_read_unlock(); 3162 3163 if (rdev) { 3164 if (s.syncing || s.expanding || s.expanded) 3165 md_sync_acct(rdev->bdev, STRIPE_SECTORS); 3166 3167 bi->bi_bdev = rdev->bdev; 3168 pr_debug("for %llu schedule op %ld on disc %d\n", 3169 (unsigned long long)sh->sector, bi->bi_rw, i); 3170 atomic_inc(&sh->count); 3171 bi->bi_sector = sh->sector + rdev->data_offset; 3172 bi->bi_flags = 1 << BIO_UPTODATE; 3173 bi->bi_vcnt = 1; 3174 bi->bi_max_vecs = 1; 3175 bi->bi_idx = 0; 3176 bi->bi_io_vec = &sh->dev[i].vec; 3177 bi->bi_io_vec[0].bv_len = STRIPE_SIZE; 3178 bi->bi_io_vec[0].bv_offset = 0; 3179 bi->bi_size = STRIPE_SIZE; 3180 bi->bi_next = NULL; 3181 if (rw == WRITE && 3182 test_bit(R5_ReWrite, &sh->dev[i].flags)) 3183 atomic_add(STRIPE_SECTORS, &rdev->corrected_errors); 3184 generic_make_request(bi); 3185 } else { 3186 if (rw == WRITE) 3187 set_bit(STRIPE_DEGRADED, &sh->state); 3188 pr_debug("skip op %ld on disc %d for sector %llu\n", 3189 bi->bi_rw, i, (unsigned long long)sh->sector); 3190 clear_bit(R5_LOCKED, &sh->dev[i].flags); 3191 set_bit(STRIPE_HANDLE, &sh->state); 3192 } 3193 } 3194 } 3195 3196 static void handle_stripe(struct stripe_head *sh, struct page *tmp_page) 3197 { 3198 if (sh->raid_conf->level == 6) 3199 handle_stripe6(sh, tmp_page); 3200 else 3201 handle_stripe5(sh); 3202 } 3203 3204 3205 3206 static void raid5_activate_delayed(raid5_conf_t *conf) 3207 { 3208 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) { 3209 while (!list_empty(&conf->delayed_list)) { 3210 struct list_head *l = conf->delayed_list.next; 3211 struct stripe_head *sh; 3212 sh = list_entry(l, struct stripe_head, lru); 3213 list_del_init(l); 3214 clear_bit(STRIPE_DELAYED, &sh->state); 3215 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 3216 atomic_inc(&conf->preread_active_stripes); 3217 list_add_tail(&sh->lru, &conf->hold_list); 3218 } 3219 } else 3220 blk_plug_device(conf->mddev->queue); 3221 } 3222 3223 static void activate_bit_delay(raid5_conf_t *conf) 3224 { 3225 /* device_lock is held */ 3226 struct list_head head; 3227 list_add(&head, &conf->bitmap_list); 3228 list_del_init(&conf->bitmap_list); 3229 while (!list_empty(&head)) { 3230 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru); 3231 list_del_init(&sh->lru); 3232 atomic_inc(&sh->count); 3233 __release_stripe(conf, sh); 3234 } 3235 } 3236 3237 static void unplug_slaves(mddev_t *mddev) 3238 { 3239 raid5_conf_t *conf = mddev_to_conf(mddev); 3240 int i; 3241 3242 rcu_read_lock(); 3243 for (i=0; i<mddev->raid_disks; i++) { 3244 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); 3245 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { 3246 struct request_queue *r_queue = bdev_get_queue(rdev->bdev); 3247 3248 atomic_inc(&rdev->nr_pending); 3249 rcu_read_unlock(); 3250 3251 blk_unplug(r_queue); 3252 3253 rdev_dec_pending(rdev, mddev); 3254 rcu_read_lock(); 3255 } 3256 } 3257 rcu_read_unlock(); 3258 } 3259 3260 static void raid5_unplug_device(struct request_queue *q) 3261 { 3262 mddev_t *mddev = q->queuedata; 3263 raid5_conf_t *conf = mddev_to_conf(mddev); 3264 unsigned long flags; 3265 3266 spin_lock_irqsave(&conf->device_lock, flags); 3267 3268 if (blk_remove_plug(q)) { 3269 conf->seq_flush++; 3270 raid5_activate_delayed(conf); 3271 } 3272 md_wakeup_thread(mddev->thread); 3273 3274 spin_unlock_irqrestore(&conf->device_lock, flags); 3275 3276 unplug_slaves(mddev); 3277 } 3278 3279 static int raid5_congested(void *data, int bits) 3280 { 3281 mddev_t *mddev = data; 3282 raid5_conf_t *conf = mddev_to_conf(mddev); 3283 3284 /* No difference between reads and writes. Just check 3285 * how busy the stripe_cache is 3286 */ 3287 if (conf->inactive_blocked) 3288 return 1; 3289 if (conf->quiesce) 3290 return 1; 3291 if (list_empty_careful(&conf->inactive_list)) 3292 return 1; 3293 3294 return 0; 3295 } 3296 3297 /* We want read requests to align with chunks where possible, 3298 * but write requests don't need to. 3299 */ 3300 static int raid5_mergeable_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *biovec) 3301 { 3302 mddev_t *mddev = q->queuedata; 3303 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); 3304 int max; 3305 unsigned int chunk_sectors = mddev->chunk_size >> 9; 3306 unsigned int bio_sectors = bio->bi_size >> 9; 3307 3308 if (bio_data_dir(bio) == WRITE) 3309 return biovec->bv_len; /* always allow writes to be mergeable */ 3310 3311 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; 3312 if (max < 0) max = 0; 3313 if (max <= biovec->bv_len && bio_sectors == 0) 3314 return biovec->bv_len; 3315 else 3316 return max; 3317 } 3318 3319 3320 static int in_chunk_boundary(mddev_t *mddev, struct bio *bio) 3321 { 3322 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); 3323 unsigned int chunk_sectors = mddev->chunk_size >> 9; 3324 unsigned int bio_sectors = bio->bi_size >> 9; 3325 3326 return chunk_sectors >= 3327 ((sector & (chunk_sectors - 1)) + bio_sectors); 3328 } 3329 3330 /* 3331 * add bio to the retry LIFO ( in O(1) ... we are in interrupt ) 3332 * later sampled by raid5d. 3333 */ 3334 static void add_bio_to_retry(struct bio *bi,raid5_conf_t *conf) 3335 { 3336 unsigned long flags; 3337 3338 spin_lock_irqsave(&conf->device_lock, flags); 3339 3340 bi->bi_next = conf->retry_read_aligned_list; 3341 conf->retry_read_aligned_list = bi; 3342 3343 spin_unlock_irqrestore(&conf->device_lock, flags); 3344 md_wakeup_thread(conf->mddev->thread); 3345 } 3346 3347 3348 static struct bio *remove_bio_from_retry(raid5_conf_t *conf) 3349 { 3350 struct bio *bi; 3351 3352 bi = conf->retry_read_aligned; 3353 if (bi) { 3354 conf->retry_read_aligned = NULL; 3355 return bi; 3356 } 3357 bi = conf->retry_read_aligned_list; 3358 if(bi) { 3359 conf->retry_read_aligned_list = bi->bi_next; 3360 bi->bi_next = NULL; 3361 bi->bi_phys_segments = 1; /* biased count of active stripes */ 3362 bi->bi_hw_segments = 0; /* count of processed stripes */ 3363 } 3364 3365 return bi; 3366 } 3367 3368 3369 /* 3370 * The "raid5_align_endio" should check if the read succeeded and if it 3371 * did, call bio_endio on the original bio (having bio_put the new bio 3372 * first). 3373 * If the read failed.. 3374 */ 3375 static void raid5_align_endio(struct bio *bi, int error) 3376 { 3377 struct bio* raid_bi = bi->bi_private; 3378 mddev_t *mddev; 3379 raid5_conf_t *conf; 3380 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 3381 mdk_rdev_t *rdev; 3382 3383 bio_put(bi); 3384 3385 mddev = raid_bi->bi_bdev->bd_disk->queue->queuedata; 3386 conf = mddev_to_conf(mddev); 3387 rdev = (void*)raid_bi->bi_next; 3388 raid_bi->bi_next = NULL; 3389 3390 rdev_dec_pending(rdev, conf->mddev); 3391 3392 if (!error && uptodate) { 3393 bio_endio(raid_bi, 0); 3394 if (atomic_dec_and_test(&conf->active_aligned_reads)) 3395 wake_up(&conf->wait_for_stripe); 3396 return; 3397 } 3398 3399 3400 pr_debug("raid5_align_endio : io error...handing IO for a retry\n"); 3401 3402 add_bio_to_retry(raid_bi, conf); 3403 } 3404 3405 static int bio_fits_rdev(struct bio *bi) 3406 { 3407 struct request_queue *q = bdev_get_queue(bi->bi_bdev); 3408 3409 if ((bi->bi_size>>9) > q->max_sectors) 3410 return 0; 3411 blk_recount_segments(q, bi); 3412 if (bi->bi_phys_segments > q->max_phys_segments || 3413 bi->bi_hw_segments > q->max_hw_segments) 3414 return 0; 3415 3416 if (q->merge_bvec_fn) 3417 /* it's too hard to apply the merge_bvec_fn at this stage, 3418 * just just give up 3419 */ 3420 return 0; 3421 3422 return 1; 3423 } 3424 3425 3426 static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio) 3427 { 3428 mddev_t *mddev = q->queuedata; 3429 raid5_conf_t *conf = mddev_to_conf(mddev); 3430 const unsigned int raid_disks = conf->raid_disks; 3431 const unsigned int data_disks = raid_disks - conf->max_degraded; 3432 unsigned int dd_idx, pd_idx; 3433 struct bio* align_bi; 3434 mdk_rdev_t *rdev; 3435 3436 if (!in_chunk_boundary(mddev, raid_bio)) { 3437 pr_debug("chunk_aligned_read : non aligned\n"); 3438 return 0; 3439 } 3440 /* 3441 * use bio_clone to make a copy of the bio 3442 */ 3443 align_bi = bio_clone(raid_bio, GFP_NOIO); 3444 if (!align_bi) 3445 return 0; 3446 /* 3447 * set bi_end_io to a new function, and set bi_private to the 3448 * original bio. 3449 */ 3450 align_bi->bi_end_io = raid5_align_endio; 3451 align_bi->bi_private = raid_bio; 3452 /* 3453 * compute position 3454 */ 3455 align_bi->bi_sector = raid5_compute_sector(raid_bio->bi_sector, 3456 raid_disks, 3457 data_disks, 3458 &dd_idx, 3459 &pd_idx, 3460 conf); 3461 3462 rcu_read_lock(); 3463 rdev = rcu_dereference(conf->disks[dd_idx].rdev); 3464 if (rdev && test_bit(In_sync, &rdev->flags)) { 3465 atomic_inc(&rdev->nr_pending); 3466 rcu_read_unlock(); 3467 raid_bio->bi_next = (void*)rdev; 3468 align_bi->bi_bdev = rdev->bdev; 3469 align_bi->bi_flags &= ~(1 << BIO_SEG_VALID); 3470 align_bi->bi_sector += rdev->data_offset; 3471 3472 if (!bio_fits_rdev(align_bi)) { 3473 /* too big in some way */ 3474 bio_put(align_bi); 3475 rdev_dec_pending(rdev, mddev); 3476 return 0; 3477 } 3478 3479 spin_lock_irq(&conf->device_lock); 3480 wait_event_lock_irq(conf->wait_for_stripe, 3481 conf->quiesce == 0, 3482 conf->device_lock, /* nothing */); 3483 atomic_inc(&conf->active_aligned_reads); 3484 spin_unlock_irq(&conf->device_lock); 3485 3486 generic_make_request(align_bi); 3487 return 1; 3488 } else { 3489 rcu_read_unlock(); 3490 bio_put(align_bi); 3491 return 0; 3492 } 3493 } 3494 3495 /* __get_priority_stripe - get the next stripe to process 3496 * 3497 * Full stripe writes are allowed to pass preread active stripes up until 3498 * the bypass_threshold is exceeded. In general the bypass_count 3499 * increments when the handle_list is handled before the hold_list; however, it 3500 * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a 3501 * stripe with in flight i/o. The bypass_count will be reset when the 3502 * head of the hold_list has changed, i.e. the head was promoted to the 3503 * handle_list. 3504 */ 3505 static struct stripe_head *__get_priority_stripe(raid5_conf_t *conf) 3506 { 3507 struct stripe_head *sh; 3508 3509 pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n", 3510 __func__, 3511 list_empty(&conf->handle_list) ? "empty" : "busy", 3512 list_empty(&conf->hold_list) ? "empty" : "busy", 3513 atomic_read(&conf->pending_full_writes), conf->bypass_count); 3514 3515 if (!list_empty(&conf->handle_list)) { 3516 sh = list_entry(conf->handle_list.next, typeof(*sh), lru); 3517 3518 if (list_empty(&conf->hold_list)) 3519 conf->bypass_count = 0; 3520 else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) { 3521 if (conf->hold_list.next == conf->last_hold) 3522 conf->bypass_count++; 3523 else { 3524 conf->last_hold = conf->hold_list.next; 3525 conf->bypass_count -= conf->bypass_threshold; 3526 if (conf->bypass_count < 0) 3527 conf->bypass_count = 0; 3528 } 3529 } 3530 } else if (!list_empty(&conf->hold_list) && 3531 ((conf->bypass_threshold && 3532 conf->bypass_count > conf->bypass_threshold) || 3533 atomic_read(&conf->pending_full_writes) == 0)) { 3534 sh = list_entry(conf->hold_list.next, 3535 typeof(*sh), lru); 3536 conf->bypass_count -= conf->bypass_threshold; 3537 if (conf->bypass_count < 0) 3538 conf->bypass_count = 0; 3539 } else 3540 return NULL; 3541 3542 list_del_init(&sh->lru); 3543 atomic_inc(&sh->count); 3544 BUG_ON(atomic_read(&sh->count) != 1); 3545 return sh; 3546 } 3547 3548 static int make_request(struct request_queue *q, struct bio * bi) 3549 { 3550 mddev_t *mddev = q->queuedata; 3551 raid5_conf_t *conf = mddev_to_conf(mddev); 3552 unsigned int dd_idx, pd_idx; 3553 sector_t new_sector; 3554 sector_t logical_sector, last_sector; 3555 struct stripe_head *sh; 3556 const int rw = bio_data_dir(bi); 3557 int remaining; 3558 3559 if (unlikely(bio_barrier(bi))) { 3560 bio_endio(bi, -EOPNOTSUPP); 3561 return 0; 3562 } 3563 3564 md_write_start(mddev, bi); 3565 3566 disk_stat_inc(mddev->gendisk, ios[rw]); 3567 disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bi)); 3568 3569 if (rw == READ && 3570 mddev->reshape_position == MaxSector && 3571 chunk_aligned_read(q,bi)) 3572 return 0; 3573 3574 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 3575 last_sector = bi->bi_sector + (bi->bi_size>>9); 3576 bi->bi_next = NULL; 3577 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ 3578 3579 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) { 3580 DEFINE_WAIT(w); 3581 int disks, data_disks; 3582 3583 retry: 3584 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); 3585 if (likely(conf->expand_progress == MaxSector)) 3586 disks = conf->raid_disks; 3587 else { 3588 /* spinlock is needed as expand_progress may be 3589 * 64bit on a 32bit platform, and so it might be 3590 * possible to see a half-updated value 3591 * Ofcourse expand_progress could change after 3592 * the lock is dropped, so once we get a reference 3593 * to the stripe that we think it is, we will have 3594 * to check again. 3595 */ 3596 spin_lock_irq(&conf->device_lock); 3597 disks = conf->raid_disks; 3598 if (logical_sector >= conf->expand_progress) 3599 disks = conf->previous_raid_disks; 3600 else { 3601 if (logical_sector >= conf->expand_lo) { 3602 spin_unlock_irq(&conf->device_lock); 3603 schedule(); 3604 goto retry; 3605 } 3606 } 3607 spin_unlock_irq(&conf->device_lock); 3608 } 3609 data_disks = disks - conf->max_degraded; 3610 3611 new_sector = raid5_compute_sector(logical_sector, disks, data_disks, 3612 &dd_idx, &pd_idx, conf); 3613 pr_debug("raid5: make_request, sector %llu logical %llu\n", 3614 (unsigned long long)new_sector, 3615 (unsigned long long)logical_sector); 3616 3617 sh = get_active_stripe(conf, new_sector, disks, pd_idx, (bi->bi_rw&RWA_MASK)); 3618 if (sh) { 3619 if (unlikely(conf->expand_progress != MaxSector)) { 3620 /* expansion might have moved on while waiting for a 3621 * stripe, so we must do the range check again. 3622 * Expansion could still move past after this 3623 * test, but as we are holding a reference to 3624 * 'sh', we know that if that happens, 3625 * STRIPE_EXPANDING will get set and the expansion 3626 * won't proceed until we finish with the stripe. 3627 */ 3628 int must_retry = 0; 3629 spin_lock_irq(&conf->device_lock); 3630 if (logical_sector < conf->expand_progress && 3631 disks == conf->previous_raid_disks) 3632 /* mismatch, need to try again */ 3633 must_retry = 1; 3634 spin_unlock_irq(&conf->device_lock); 3635 if (must_retry) { 3636 release_stripe(sh); 3637 goto retry; 3638 } 3639 } 3640 /* FIXME what if we get a false positive because these 3641 * are being updated. 3642 */ 3643 if (logical_sector >= mddev->suspend_lo && 3644 logical_sector < mddev->suspend_hi) { 3645 release_stripe(sh); 3646 schedule(); 3647 goto retry; 3648 } 3649 3650 if (test_bit(STRIPE_EXPANDING, &sh->state) || 3651 !add_stripe_bio(sh, bi, dd_idx, (bi->bi_rw&RW_MASK))) { 3652 /* Stripe is busy expanding or 3653 * add failed due to overlap. Flush everything 3654 * and wait a while 3655 */ 3656 raid5_unplug_device(mddev->queue); 3657 release_stripe(sh); 3658 schedule(); 3659 goto retry; 3660 } 3661 finish_wait(&conf->wait_for_overlap, &w); 3662 set_bit(STRIPE_HANDLE, &sh->state); 3663 clear_bit(STRIPE_DELAYED, &sh->state); 3664 release_stripe(sh); 3665 } else { 3666 /* cannot get stripe for read-ahead, just give-up */ 3667 clear_bit(BIO_UPTODATE, &bi->bi_flags); 3668 finish_wait(&conf->wait_for_overlap, &w); 3669 break; 3670 } 3671 3672 } 3673 spin_lock_irq(&conf->device_lock); 3674 remaining = --bi->bi_phys_segments; 3675 spin_unlock_irq(&conf->device_lock); 3676 if (remaining == 0) { 3677 3678 if ( rw == WRITE ) 3679 md_write_end(mddev); 3680 3681 bi->bi_end_io(bi, 3682 test_bit(BIO_UPTODATE, &bi->bi_flags) 3683 ? 0 : -EIO); 3684 } 3685 return 0; 3686 } 3687 3688 static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped) 3689 { 3690 /* reshaping is quite different to recovery/resync so it is 3691 * handled quite separately ... here. 3692 * 3693 * On each call to sync_request, we gather one chunk worth of 3694 * destination stripes and flag them as expanding. 3695 * Then we find all the source stripes and request reads. 3696 * As the reads complete, handle_stripe will copy the data 3697 * into the destination stripe and release that stripe. 3698 */ 3699 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 3700 struct stripe_head *sh; 3701 int pd_idx; 3702 sector_t first_sector, last_sector; 3703 int raid_disks = conf->previous_raid_disks; 3704 int data_disks = raid_disks - conf->max_degraded; 3705 int new_data_disks = conf->raid_disks - conf->max_degraded; 3706 int i; 3707 int dd_idx; 3708 sector_t writepos, safepos, gap; 3709 3710 if (sector_nr == 0 && 3711 conf->expand_progress != 0) { 3712 /* restarting in the middle, skip the initial sectors */ 3713 sector_nr = conf->expand_progress; 3714 sector_div(sector_nr, new_data_disks); 3715 *skipped = 1; 3716 return sector_nr; 3717 } 3718 3719 /* we update the metadata when there is more than 3Meg 3720 * in the block range (that is rather arbitrary, should 3721 * probably be time based) or when the data about to be 3722 * copied would over-write the source of the data at 3723 * the front of the range. 3724 * i.e. one new_stripe forward from expand_progress new_maps 3725 * to after where expand_lo old_maps to 3726 */ 3727 writepos = conf->expand_progress + 3728 conf->chunk_size/512*(new_data_disks); 3729 sector_div(writepos, new_data_disks); 3730 safepos = conf->expand_lo; 3731 sector_div(safepos, data_disks); 3732 gap = conf->expand_progress - conf->expand_lo; 3733 3734 if (writepos >= safepos || 3735 gap > (new_data_disks)*3000*2 /*3Meg*/) { 3736 /* Cannot proceed until we've updated the superblock... */ 3737 wait_event(conf->wait_for_overlap, 3738 atomic_read(&conf->reshape_stripes)==0); 3739 mddev->reshape_position = conf->expand_progress; 3740 set_bit(MD_CHANGE_DEVS, &mddev->flags); 3741 md_wakeup_thread(mddev->thread); 3742 wait_event(mddev->sb_wait, mddev->flags == 0 || 3743 kthread_should_stop()); 3744 spin_lock_irq(&conf->device_lock); 3745 conf->expand_lo = mddev->reshape_position; 3746 spin_unlock_irq(&conf->device_lock); 3747 wake_up(&conf->wait_for_overlap); 3748 } 3749 3750 for (i=0; i < conf->chunk_size/512; i+= STRIPE_SECTORS) { 3751 int j; 3752 int skipped = 0; 3753 pd_idx = stripe_to_pdidx(sector_nr+i, conf, conf->raid_disks); 3754 sh = get_active_stripe(conf, sector_nr+i, 3755 conf->raid_disks, pd_idx, 0); 3756 set_bit(STRIPE_EXPANDING, &sh->state); 3757 atomic_inc(&conf->reshape_stripes); 3758 /* If any of this stripe is beyond the end of the old 3759 * array, then we need to zero those blocks 3760 */ 3761 for (j=sh->disks; j--;) { 3762 sector_t s; 3763 if (j == sh->pd_idx) 3764 continue; 3765 if (conf->level == 6 && 3766 j == raid6_next_disk(sh->pd_idx, sh->disks)) 3767 continue; 3768 s = compute_blocknr(sh, j); 3769 if (s < (mddev->array_size<<1)) { 3770 skipped = 1; 3771 continue; 3772 } 3773 memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE); 3774 set_bit(R5_Expanded, &sh->dev[j].flags); 3775 set_bit(R5_UPTODATE, &sh->dev[j].flags); 3776 } 3777 if (!skipped) { 3778 set_bit(STRIPE_EXPAND_READY, &sh->state); 3779 set_bit(STRIPE_HANDLE, &sh->state); 3780 } 3781 release_stripe(sh); 3782 } 3783 spin_lock_irq(&conf->device_lock); 3784 conf->expand_progress = (sector_nr + i) * new_data_disks; 3785 spin_unlock_irq(&conf->device_lock); 3786 /* Ok, those stripe are ready. We can start scheduling 3787 * reads on the source stripes. 3788 * The source stripes are determined by mapping the first and last 3789 * block on the destination stripes. 3790 */ 3791 first_sector = 3792 raid5_compute_sector(sector_nr*(new_data_disks), 3793 raid_disks, data_disks, 3794 &dd_idx, &pd_idx, conf); 3795 last_sector = 3796 raid5_compute_sector((sector_nr+conf->chunk_size/512) 3797 *(new_data_disks) -1, 3798 raid_disks, data_disks, 3799 &dd_idx, &pd_idx, conf); 3800 if (last_sector >= (mddev->size<<1)) 3801 last_sector = (mddev->size<<1)-1; 3802 while (first_sector <= last_sector) { 3803 pd_idx = stripe_to_pdidx(first_sector, conf, 3804 conf->previous_raid_disks); 3805 sh = get_active_stripe(conf, first_sector, 3806 conf->previous_raid_disks, pd_idx, 0); 3807 set_bit(STRIPE_EXPAND_SOURCE, &sh->state); 3808 set_bit(STRIPE_HANDLE, &sh->state); 3809 release_stripe(sh); 3810 first_sector += STRIPE_SECTORS; 3811 } 3812 /* If this takes us to the resync_max point where we have to pause, 3813 * then we need to write out the superblock. 3814 */ 3815 sector_nr += conf->chunk_size>>9; 3816 if (sector_nr >= mddev->resync_max) { 3817 /* Cannot proceed until we've updated the superblock... */ 3818 wait_event(conf->wait_for_overlap, 3819 atomic_read(&conf->reshape_stripes) == 0); 3820 mddev->reshape_position = conf->expand_progress; 3821 set_bit(MD_CHANGE_DEVS, &mddev->flags); 3822 md_wakeup_thread(mddev->thread); 3823 wait_event(mddev->sb_wait, 3824 !test_bit(MD_CHANGE_DEVS, &mddev->flags) 3825 || kthread_should_stop()); 3826 spin_lock_irq(&conf->device_lock); 3827 conf->expand_lo = mddev->reshape_position; 3828 spin_unlock_irq(&conf->device_lock); 3829 wake_up(&conf->wait_for_overlap); 3830 } 3831 return conf->chunk_size>>9; 3832 } 3833 3834 /* FIXME go_faster isn't used */ 3835 static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster) 3836 { 3837 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 3838 struct stripe_head *sh; 3839 int pd_idx; 3840 int raid_disks = conf->raid_disks; 3841 sector_t max_sector = mddev->size << 1; 3842 int sync_blocks; 3843 int still_degraded = 0; 3844 int i; 3845 3846 if (sector_nr >= max_sector) { 3847 /* just being told to finish up .. nothing much to do */ 3848 unplug_slaves(mddev); 3849 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { 3850 end_reshape(conf); 3851 return 0; 3852 } 3853 3854 if (mddev->curr_resync < max_sector) /* aborted */ 3855 bitmap_end_sync(mddev->bitmap, mddev->curr_resync, 3856 &sync_blocks, 1); 3857 else /* completed sync */ 3858 conf->fullsync = 0; 3859 bitmap_close_sync(mddev->bitmap); 3860 3861 return 0; 3862 } 3863 3864 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 3865 return reshape_request(mddev, sector_nr, skipped); 3866 3867 /* No need to check resync_max as we never do more than one 3868 * stripe, and as resync_max will always be on a chunk boundary, 3869 * if the check in md_do_sync didn't fire, there is no chance 3870 * of overstepping resync_max here 3871 */ 3872 3873 /* if there is too many failed drives and we are trying 3874 * to resync, then assert that we are finished, because there is 3875 * nothing we can do. 3876 */ 3877 if (mddev->degraded >= conf->max_degraded && 3878 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 3879 sector_t rv = (mddev->size << 1) - sector_nr; 3880 *skipped = 1; 3881 return rv; 3882 } 3883 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && 3884 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && 3885 !conf->fullsync && sync_blocks >= STRIPE_SECTORS) { 3886 /* we can skip this block, and probably more */ 3887 sync_blocks /= STRIPE_SECTORS; 3888 *skipped = 1; 3889 return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */ 3890 } 3891 3892 3893 bitmap_cond_end_sync(mddev->bitmap, sector_nr); 3894 3895 pd_idx = stripe_to_pdidx(sector_nr, conf, raid_disks); 3896 sh = get_active_stripe(conf, sector_nr, raid_disks, pd_idx, 1); 3897 if (sh == NULL) { 3898 sh = get_active_stripe(conf, sector_nr, raid_disks, pd_idx, 0); 3899 /* make sure we don't swamp the stripe cache if someone else 3900 * is trying to get access 3901 */ 3902 schedule_timeout_uninterruptible(1); 3903 } 3904 /* Need to check if array will still be degraded after recovery/resync 3905 * We don't need to check the 'failed' flag as when that gets set, 3906 * recovery aborts. 3907 */ 3908 for (i=0; i<mddev->raid_disks; i++) 3909 if (conf->disks[i].rdev == NULL) 3910 still_degraded = 1; 3911 3912 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded); 3913 3914 spin_lock(&sh->lock); 3915 set_bit(STRIPE_SYNCING, &sh->state); 3916 clear_bit(STRIPE_INSYNC, &sh->state); 3917 spin_unlock(&sh->lock); 3918 3919 handle_stripe(sh, NULL); 3920 release_stripe(sh); 3921 3922 return STRIPE_SECTORS; 3923 } 3924 3925 static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio) 3926 { 3927 /* We may not be able to submit a whole bio at once as there 3928 * may not be enough stripe_heads available. 3929 * We cannot pre-allocate enough stripe_heads as we may need 3930 * more than exist in the cache (if we allow ever large chunks). 3931 * So we do one stripe head at a time and record in 3932 * ->bi_hw_segments how many have been done. 3933 * 3934 * We *know* that this entire raid_bio is in one chunk, so 3935 * it will be only one 'dd_idx' and only need one call to raid5_compute_sector. 3936 */ 3937 struct stripe_head *sh; 3938 int dd_idx, pd_idx; 3939 sector_t sector, logical_sector, last_sector; 3940 int scnt = 0; 3941 int remaining; 3942 int handled = 0; 3943 3944 logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 3945 sector = raid5_compute_sector( logical_sector, 3946 conf->raid_disks, 3947 conf->raid_disks - conf->max_degraded, 3948 &dd_idx, 3949 &pd_idx, 3950 conf); 3951 last_sector = raid_bio->bi_sector + (raid_bio->bi_size>>9); 3952 3953 for (; logical_sector < last_sector; 3954 logical_sector += STRIPE_SECTORS, 3955 sector += STRIPE_SECTORS, 3956 scnt++) { 3957 3958 if (scnt < raid_bio->bi_hw_segments) 3959 /* already done this stripe */ 3960 continue; 3961 3962 sh = get_active_stripe(conf, sector, conf->raid_disks, pd_idx, 1); 3963 3964 if (!sh) { 3965 /* failed to get a stripe - must wait */ 3966 raid_bio->bi_hw_segments = scnt; 3967 conf->retry_read_aligned = raid_bio; 3968 return handled; 3969 } 3970 3971 set_bit(R5_ReadError, &sh->dev[dd_idx].flags); 3972 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) { 3973 release_stripe(sh); 3974 raid_bio->bi_hw_segments = scnt; 3975 conf->retry_read_aligned = raid_bio; 3976 return handled; 3977 } 3978 3979 handle_stripe(sh, NULL); 3980 release_stripe(sh); 3981 handled++; 3982 } 3983 spin_lock_irq(&conf->device_lock); 3984 remaining = --raid_bio->bi_phys_segments; 3985 spin_unlock_irq(&conf->device_lock); 3986 if (remaining == 0) { 3987 3988 raid_bio->bi_end_io(raid_bio, 3989 test_bit(BIO_UPTODATE, &raid_bio->bi_flags) 3990 ? 0 : -EIO); 3991 } 3992 if (atomic_dec_and_test(&conf->active_aligned_reads)) 3993 wake_up(&conf->wait_for_stripe); 3994 return handled; 3995 } 3996 3997 3998 3999 /* 4000 * This is our raid5 kernel thread. 4001 * 4002 * We scan the hash table for stripes which can be handled now. 4003 * During the scan, completed stripes are saved for us by the interrupt 4004 * handler, so that they will not have to wait for our next wakeup. 4005 */ 4006 static void raid5d(mddev_t *mddev) 4007 { 4008 struct stripe_head *sh; 4009 raid5_conf_t *conf = mddev_to_conf(mddev); 4010 int handled; 4011 4012 pr_debug("+++ raid5d active\n"); 4013 4014 md_check_recovery(mddev); 4015 4016 handled = 0; 4017 spin_lock_irq(&conf->device_lock); 4018 while (1) { 4019 struct bio *bio; 4020 4021 if (conf->seq_flush != conf->seq_write) { 4022 int seq = conf->seq_flush; 4023 spin_unlock_irq(&conf->device_lock); 4024 bitmap_unplug(mddev->bitmap); 4025 spin_lock_irq(&conf->device_lock); 4026 conf->seq_write = seq; 4027 activate_bit_delay(conf); 4028 } 4029 4030 while ((bio = remove_bio_from_retry(conf))) { 4031 int ok; 4032 spin_unlock_irq(&conf->device_lock); 4033 ok = retry_aligned_read(conf, bio); 4034 spin_lock_irq(&conf->device_lock); 4035 if (!ok) 4036 break; 4037 handled++; 4038 } 4039 4040 sh = __get_priority_stripe(conf); 4041 4042 if (!sh) { 4043 async_tx_issue_pending_all(); 4044 break; 4045 } 4046 spin_unlock_irq(&conf->device_lock); 4047 4048 handled++; 4049 handle_stripe(sh, conf->spare_page); 4050 release_stripe(sh); 4051 4052 spin_lock_irq(&conf->device_lock); 4053 } 4054 pr_debug("%d stripes handled\n", handled); 4055 4056 spin_unlock_irq(&conf->device_lock); 4057 4058 unplug_slaves(mddev); 4059 4060 pr_debug("--- raid5d inactive\n"); 4061 } 4062 4063 static ssize_t 4064 raid5_show_stripe_cache_size(mddev_t *mddev, char *page) 4065 { 4066 raid5_conf_t *conf = mddev_to_conf(mddev); 4067 if (conf) 4068 return sprintf(page, "%d\n", conf->max_nr_stripes); 4069 else 4070 return 0; 4071 } 4072 4073 static ssize_t 4074 raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len) 4075 { 4076 raid5_conf_t *conf = mddev_to_conf(mddev); 4077 unsigned long new; 4078 if (len >= PAGE_SIZE) 4079 return -EINVAL; 4080 if (!conf) 4081 return -ENODEV; 4082 4083 if (strict_strtoul(page, 10, &new)) 4084 return -EINVAL; 4085 if (new <= 16 || new > 32768) 4086 return -EINVAL; 4087 while (new < conf->max_nr_stripes) { 4088 if (drop_one_stripe(conf)) 4089 conf->max_nr_stripes--; 4090 else 4091 break; 4092 } 4093 md_allow_write(mddev); 4094 while (new > conf->max_nr_stripes) { 4095 if (grow_one_stripe(conf)) 4096 conf->max_nr_stripes++; 4097 else break; 4098 } 4099 return len; 4100 } 4101 4102 static struct md_sysfs_entry 4103 raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR, 4104 raid5_show_stripe_cache_size, 4105 raid5_store_stripe_cache_size); 4106 4107 static ssize_t 4108 raid5_show_preread_threshold(mddev_t *mddev, char *page) 4109 { 4110 raid5_conf_t *conf = mddev_to_conf(mddev); 4111 if (conf) 4112 return sprintf(page, "%d\n", conf->bypass_threshold); 4113 else 4114 return 0; 4115 } 4116 4117 static ssize_t 4118 raid5_store_preread_threshold(mddev_t *mddev, const char *page, size_t len) 4119 { 4120 raid5_conf_t *conf = mddev_to_conf(mddev); 4121 unsigned long new; 4122 if (len >= PAGE_SIZE) 4123 return -EINVAL; 4124 if (!conf) 4125 return -ENODEV; 4126 4127 if (strict_strtoul(page, 10, &new)) 4128 return -EINVAL; 4129 if (new > conf->max_nr_stripes) 4130 return -EINVAL; 4131 conf->bypass_threshold = new; 4132 return len; 4133 } 4134 4135 static struct md_sysfs_entry 4136 raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold, 4137 S_IRUGO | S_IWUSR, 4138 raid5_show_preread_threshold, 4139 raid5_store_preread_threshold); 4140 4141 static ssize_t 4142 stripe_cache_active_show(mddev_t *mddev, char *page) 4143 { 4144 raid5_conf_t *conf = mddev_to_conf(mddev); 4145 if (conf) 4146 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes)); 4147 else 4148 return 0; 4149 } 4150 4151 static struct md_sysfs_entry 4152 raid5_stripecache_active = __ATTR_RO(stripe_cache_active); 4153 4154 static struct attribute *raid5_attrs[] = { 4155 &raid5_stripecache_size.attr, 4156 &raid5_stripecache_active.attr, 4157 &raid5_preread_bypass_threshold.attr, 4158 NULL, 4159 }; 4160 static struct attribute_group raid5_attrs_group = { 4161 .name = NULL, 4162 .attrs = raid5_attrs, 4163 }; 4164 4165 static int run(mddev_t *mddev) 4166 { 4167 raid5_conf_t *conf; 4168 int raid_disk, memory; 4169 mdk_rdev_t *rdev; 4170 struct disk_info *disk; 4171 struct list_head *tmp; 4172 int working_disks = 0; 4173 4174 if (mddev->level != 5 && mddev->level != 4 && mddev->level != 6) { 4175 printk(KERN_ERR "raid5: %s: raid level not set to 4/5/6 (%d)\n", 4176 mdname(mddev), mddev->level); 4177 return -EIO; 4178 } 4179 4180 if (mddev->reshape_position != MaxSector) { 4181 /* Check that we can continue the reshape. 4182 * Currently only disks can change, it must 4183 * increase, and we must be past the point where 4184 * a stripe over-writes itself 4185 */ 4186 sector_t here_new, here_old; 4187 int old_disks; 4188 int max_degraded = (mddev->level == 5 ? 1 : 2); 4189 4190 if (mddev->new_level != mddev->level || 4191 mddev->new_layout != mddev->layout || 4192 mddev->new_chunk != mddev->chunk_size) { 4193 printk(KERN_ERR "raid5: %s: unsupported reshape " 4194 "required - aborting.\n", 4195 mdname(mddev)); 4196 return -EINVAL; 4197 } 4198 if (mddev->delta_disks <= 0) { 4199 printk(KERN_ERR "raid5: %s: unsupported reshape " 4200 "(reduce disks) required - aborting.\n", 4201 mdname(mddev)); 4202 return -EINVAL; 4203 } 4204 old_disks = mddev->raid_disks - mddev->delta_disks; 4205 /* reshape_position must be on a new-stripe boundary, and one 4206 * further up in new geometry must map after here in old 4207 * geometry. 4208 */ 4209 here_new = mddev->reshape_position; 4210 if (sector_div(here_new, (mddev->chunk_size>>9)* 4211 (mddev->raid_disks - max_degraded))) { 4212 printk(KERN_ERR "raid5: reshape_position not " 4213 "on a stripe boundary\n"); 4214 return -EINVAL; 4215 } 4216 /* here_new is the stripe we will write to */ 4217 here_old = mddev->reshape_position; 4218 sector_div(here_old, (mddev->chunk_size>>9)* 4219 (old_disks-max_degraded)); 4220 /* here_old is the first stripe that we might need to read 4221 * from */ 4222 if (here_new >= here_old) { 4223 /* Reading from the same stripe as writing to - bad */ 4224 printk(KERN_ERR "raid5: reshape_position too early for " 4225 "auto-recovery - aborting.\n"); 4226 return -EINVAL; 4227 } 4228 printk(KERN_INFO "raid5: reshape will continue\n"); 4229 /* OK, we should be able to continue; */ 4230 } 4231 4232 4233 mddev->private = kzalloc(sizeof (raid5_conf_t), GFP_KERNEL); 4234 if ((conf = mddev->private) == NULL) 4235 goto abort; 4236 if (mddev->reshape_position == MaxSector) { 4237 conf->previous_raid_disks = conf->raid_disks = mddev->raid_disks; 4238 } else { 4239 conf->raid_disks = mddev->raid_disks; 4240 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks; 4241 } 4242 4243 conf->disks = kzalloc(conf->raid_disks * sizeof(struct disk_info), 4244 GFP_KERNEL); 4245 if (!conf->disks) 4246 goto abort; 4247 4248 conf->mddev = mddev; 4249 4250 if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) 4251 goto abort; 4252 4253 if (mddev->level == 6) { 4254 conf->spare_page = alloc_page(GFP_KERNEL); 4255 if (!conf->spare_page) 4256 goto abort; 4257 } 4258 spin_lock_init(&conf->device_lock); 4259 init_waitqueue_head(&conf->wait_for_stripe); 4260 init_waitqueue_head(&conf->wait_for_overlap); 4261 INIT_LIST_HEAD(&conf->handle_list); 4262 INIT_LIST_HEAD(&conf->hold_list); 4263 INIT_LIST_HEAD(&conf->delayed_list); 4264 INIT_LIST_HEAD(&conf->bitmap_list); 4265 INIT_LIST_HEAD(&conf->inactive_list); 4266 atomic_set(&conf->active_stripes, 0); 4267 atomic_set(&conf->preread_active_stripes, 0); 4268 atomic_set(&conf->active_aligned_reads, 0); 4269 conf->bypass_threshold = BYPASS_THRESHOLD; 4270 4271 pr_debug("raid5: run(%s) called.\n", mdname(mddev)); 4272 4273 rdev_for_each(rdev, tmp, mddev) { 4274 raid_disk = rdev->raid_disk; 4275 if (raid_disk >= conf->raid_disks 4276 || raid_disk < 0) 4277 continue; 4278 disk = conf->disks + raid_disk; 4279 4280 disk->rdev = rdev; 4281 4282 if (test_bit(In_sync, &rdev->flags)) { 4283 char b[BDEVNAME_SIZE]; 4284 printk(KERN_INFO "raid5: device %s operational as raid" 4285 " disk %d\n", bdevname(rdev->bdev,b), 4286 raid_disk); 4287 working_disks++; 4288 } 4289 } 4290 4291 /* 4292 * 0 for a fully functional array, 1 or 2 for a degraded array. 4293 */ 4294 mddev->degraded = conf->raid_disks - working_disks; 4295 conf->mddev = mddev; 4296 conf->chunk_size = mddev->chunk_size; 4297 conf->level = mddev->level; 4298 if (conf->level == 6) 4299 conf->max_degraded = 2; 4300 else 4301 conf->max_degraded = 1; 4302 conf->algorithm = mddev->layout; 4303 conf->max_nr_stripes = NR_STRIPES; 4304 conf->expand_progress = mddev->reshape_position; 4305 4306 /* device size must be a multiple of chunk size */ 4307 mddev->size &= ~(mddev->chunk_size/1024 -1); 4308 mddev->resync_max_sectors = mddev->size << 1; 4309 4310 if (conf->level == 6 && conf->raid_disks < 4) { 4311 printk(KERN_ERR "raid6: not enough configured devices for %s (%d, minimum 4)\n", 4312 mdname(mddev), conf->raid_disks); 4313 goto abort; 4314 } 4315 if (!conf->chunk_size || conf->chunk_size % 4) { 4316 printk(KERN_ERR "raid5: invalid chunk size %d for %s\n", 4317 conf->chunk_size, mdname(mddev)); 4318 goto abort; 4319 } 4320 if (conf->algorithm > ALGORITHM_RIGHT_SYMMETRIC) { 4321 printk(KERN_ERR 4322 "raid5: unsupported parity algorithm %d for %s\n", 4323 conf->algorithm, mdname(mddev)); 4324 goto abort; 4325 } 4326 if (mddev->degraded > conf->max_degraded) { 4327 printk(KERN_ERR "raid5: not enough operational devices for %s" 4328 " (%d/%d failed)\n", 4329 mdname(mddev), mddev->degraded, conf->raid_disks); 4330 goto abort; 4331 } 4332 4333 if (mddev->degraded > 0 && 4334 mddev->recovery_cp != MaxSector) { 4335 if (mddev->ok_start_degraded) 4336 printk(KERN_WARNING 4337 "raid5: starting dirty degraded array: %s" 4338 "- data corruption possible.\n", 4339 mdname(mddev)); 4340 else { 4341 printk(KERN_ERR 4342 "raid5: cannot start dirty degraded array for %s\n", 4343 mdname(mddev)); 4344 goto abort; 4345 } 4346 } 4347 4348 { 4349 mddev->thread = md_register_thread(raid5d, mddev, "%s_raid5"); 4350 if (!mddev->thread) { 4351 printk(KERN_ERR 4352 "raid5: couldn't allocate thread for %s\n", 4353 mdname(mddev)); 4354 goto abort; 4355 } 4356 } 4357 memory = conf->max_nr_stripes * (sizeof(struct stripe_head) + 4358 conf->raid_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024; 4359 if (grow_stripes(conf, conf->max_nr_stripes)) { 4360 printk(KERN_ERR 4361 "raid5: couldn't allocate %dkB for buffers\n", memory); 4362 shrink_stripes(conf); 4363 md_unregister_thread(mddev->thread); 4364 goto abort; 4365 } else 4366 printk(KERN_INFO "raid5: allocated %dkB for %s\n", 4367 memory, mdname(mddev)); 4368 4369 if (mddev->degraded == 0) 4370 printk("raid5: raid level %d set %s active with %d out of %d" 4371 " devices, algorithm %d\n", conf->level, mdname(mddev), 4372 mddev->raid_disks-mddev->degraded, mddev->raid_disks, 4373 conf->algorithm); 4374 else 4375 printk(KERN_ALERT "raid5: raid level %d set %s active with %d" 4376 " out of %d devices, algorithm %d\n", conf->level, 4377 mdname(mddev), mddev->raid_disks - mddev->degraded, 4378 mddev->raid_disks, conf->algorithm); 4379 4380 print_raid5_conf(conf); 4381 4382 if (conf->expand_progress != MaxSector) { 4383 printk("...ok start reshape thread\n"); 4384 conf->expand_lo = conf->expand_progress; 4385 atomic_set(&conf->reshape_stripes, 0); 4386 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 4387 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 4388 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 4389 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 4390 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 4391 "%s_reshape"); 4392 } 4393 4394 /* read-ahead size must cover two whole stripes, which is 4395 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices 4396 */ 4397 { 4398 int data_disks = conf->previous_raid_disks - conf->max_degraded; 4399 int stripe = data_disks * 4400 (mddev->chunk_size / PAGE_SIZE); 4401 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 4402 mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 4403 } 4404 4405 /* Ok, everything is just fine now */ 4406 if (sysfs_create_group(&mddev->kobj, &raid5_attrs_group)) 4407 printk(KERN_WARNING 4408 "raid5: failed to create sysfs attributes for %s\n", 4409 mdname(mddev)); 4410 4411 mddev->queue->unplug_fn = raid5_unplug_device; 4412 mddev->queue->backing_dev_info.congested_data = mddev; 4413 mddev->queue->backing_dev_info.congested_fn = raid5_congested; 4414 4415 mddev->array_size = mddev->size * (conf->previous_raid_disks - 4416 conf->max_degraded); 4417 4418 blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec); 4419 4420 return 0; 4421 abort: 4422 if (conf) { 4423 print_raid5_conf(conf); 4424 safe_put_page(conf->spare_page); 4425 kfree(conf->disks); 4426 kfree(conf->stripe_hashtbl); 4427 kfree(conf); 4428 } 4429 mddev->private = NULL; 4430 printk(KERN_ALERT "raid5: failed to run raid set %s\n", mdname(mddev)); 4431 return -EIO; 4432 } 4433 4434 4435 4436 static int stop(mddev_t *mddev) 4437 { 4438 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 4439 4440 md_unregister_thread(mddev->thread); 4441 mddev->thread = NULL; 4442 shrink_stripes(conf); 4443 kfree(conf->stripe_hashtbl); 4444 mddev->queue->backing_dev_info.congested_fn = NULL; 4445 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 4446 sysfs_remove_group(&mddev->kobj, &raid5_attrs_group); 4447 kfree(conf->disks); 4448 kfree(conf); 4449 mddev->private = NULL; 4450 return 0; 4451 } 4452 4453 #ifdef DEBUG 4454 static void print_sh (struct seq_file *seq, struct stripe_head *sh) 4455 { 4456 int i; 4457 4458 seq_printf(seq, "sh %llu, pd_idx %d, state %ld.\n", 4459 (unsigned long long)sh->sector, sh->pd_idx, sh->state); 4460 seq_printf(seq, "sh %llu, count %d.\n", 4461 (unsigned long long)sh->sector, atomic_read(&sh->count)); 4462 seq_printf(seq, "sh %llu, ", (unsigned long long)sh->sector); 4463 for (i = 0; i < sh->disks; i++) { 4464 seq_printf(seq, "(cache%d: %p %ld) ", 4465 i, sh->dev[i].page, sh->dev[i].flags); 4466 } 4467 seq_printf(seq, "\n"); 4468 } 4469 4470 static void printall (struct seq_file *seq, raid5_conf_t *conf) 4471 { 4472 struct stripe_head *sh; 4473 struct hlist_node *hn; 4474 int i; 4475 4476 spin_lock_irq(&conf->device_lock); 4477 for (i = 0; i < NR_HASH; i++) { 4478 hlist_for_each_entry(sh, hn, &conf->stripe_hashtbl[i], hash) { 4479 if (sh->raid_conf != conf) 4480 continue; 4481 print_sh(seq, sh); 4482 } 4483 } 4484 spin_unlock_irq(&conf->device_lock); 4485 } 4486 #endif 4487 4488 static void status (struct seq_file *seq, mddev_t *mddev) 4489 { 4490 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 4491 int i; 4492 4493 seq_printf (seq, " level %d, %dk chunk, algorithm %d", mddev->level, mddev->chunk_size >> 10, mddev->layout); 4494 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded); 4495 for (i = 0; i < conf->raid_disks; i++) 4496 seq_printf (seq, "%s", 4497 conf->disks[i].rdev && 4498 test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_"); 4499 seq_printf (seq, "]"); 4500 #ifdef DEBUG 4501 seq_printf (seq, "\n"); 4502 printall(seq, conf); 4503 #endif 4504 } 4505 4506 static void print_raid5_conf (raid5_conf_t *conf) 4507 { 4508 int i; 4509 struct disk_info *tmp; 4510 4511 printk("RAID5 conf printout:\n"); 4512 if (!conf) { 4513 printk("(conf==NULL)\n"); 4514 return; 4515 } 4516 printk(" --- rd:%d wd:%d\n", conf->raid_disks, 4517 conf->raid_disks - conf->mddev->degraded); 4518 4519 for (i = 0; i < conf->raid_disks; i++) { 4520 char b[BDEVNAME_SIZE]; 4521 tmp = conf->disks + i; 4522 if (tmp->rdev) 4523 printk(" disk %d, o:%d, dev:%s\n", 4524 i, !test_bit(Faulty, &tmp->rdev->flags), 4525 bdevname(tmp->rdev->bdev,b)); 4526 } 4527 } 4528 4529 static int raid5_spare_active(mddev_t *mddev) 4530 { 4531 int i; 4532 raid5_conf_t *conf = mddev->private; 4533 struct disk_info *tmp; 4534 4535 for (i = 0; i < conf->raid_disks; i++) { 4536 tmp = conf->disks + i; 4537 if (tmp->rdev 4538 && !test_bit(Faulty, &tmp->rdev->flags) 4539 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) { 4540 unsigned long flags; 4541 spin_lock_irqsave(&conf->device_lock, flags); 4542 mddev->degraded--; 4543 spin_unlock_irqrestore(&conf->device_lock, flags); 4544 } 4545 } 4546 print_raid5_conf(conf); 4547 return 0; 4548 } 4549 4550 static int raid5_remove_disk(mddev_t *mddev, int number) 4551 { 4552 raid5_conf_t *conf = mddev->private; 4553 int err = 0; 4554 mdk_rdev_t *rdev; 4555 struct disk_info *p = conf->disks + number; 4556 4557 print_raid5_conf(conf); 4558 rdev = p->rdev; 4559 if (rdev) { 4560 if (test_bit(In_sync, &rdev->flags) || 4561 atomic_read(&rdev->nr_pending)) { 4562 err = -EBUSY; 4563 goto abort; 4564 } 4565 p->rdev = NULL; 4566 synchronize_rcu(); 4567 if (atomic_read(&rdev->nr_pending)) { 4568 /* lost the race, try later */ 4569 err = -EBUSY; 4570 p->rdev = rdev; 4571 } 4572 } 4573 abort: 4574 4575 print_raid5_conf(conf); 4576 return err; 4577 } 4578 4579 static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) 4580 { 4581 raid5_conf_t *conf = mddev->private; 4582 int found = 0; 4583 int disk; 4584 struct disk_info *p; 4585 4586 if (mddev->degraded > conf->max_degraded) 4587 /* no point adding a device */ 4588 return 0; 4589 4590 /* 4591 * find the disk ... but prefer rdev->saved_raid_disk 4592 * if possible. 4593 */ 4594 if (rdev->saved_raid_disk >= 0 && 4595 conf->disks[rdev->saved_raid_disk].rdev == NULL) 4596 disk = rdev->saved_raid_disk; 4597 else 4598 disk = 0; 4599 for ( ; disk < conf->raid_disks; disk++) 4600 if ((p=conf->disks + disk)->rdev == NULL) { 4601 clear_bit(In_sync, &rdev->flags); 4602 rdev->raid_disk = disk; 4603 found = 1; 4604 if (rdev->saved_raid_disk != disk) 4605 conf->fullsync = 1; 4606 rcu_assign_pointer(p->rdev, rdev); 4607 break; 4608 } 4609 print_raid5_conf(conf); 4610 return found; 4611 } 4612 4613 static int raid5_resize(mddev_t *mddev, sector_t sectors) 4614 { 4615 /* no resync is happening, and there is enough space 4616 * on all devices, so we can resize. 4617 * We need to make sure resync covers any new space. 4618 * If the array is shrinking we should possibly wait until 4619 * any io in the removed space completes, but it hardly seems 4620 * worth it. 4621 */ 4622 raid5_conf_t *conf = mddev_to_conf(mddev); 4623 4624 sectors &= ~((sector_t)mddev->chunk_size/512 - 1); 4625 mddev->array_size = (sectors * (mddev->raid_disks-conf->max_degraded))>>1; 4626 set_capacity(mddev->gendisk, mddev->array_size << 1); 4627 mddev->changed = 1; 4628 if (sectors/2 > mddev->size && mddev->recovery_cp == MaxSector) { 4629 mddev->recovery_cp = mddev->size << 1; 4630 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4631 } 4632 mddev->size = sectors /2; 4633 mddev->resync_max_sectors = sectors; 4634 return 0; 4635 } 4636 4637 #ifdef CONFIG_MD_RAID5_RESHAPE 4638 static int raid5_check_reshape(mddev_t *mddev) 4639 { 4640 raid5_conf_t *conf = mddev_to_conf(mddev); 4641 int err; 4642 4643 if (mddev->delta_disks < 0 || 4644 mddev->new_level != mddev->level) 4645 return -EINVAL; /* Cannot shrink array or change level yet */ 4646 if (mddev->delta_disks == 0) 4647 return 0; /* nothing to do */ 4648 4649 /* Can only proceed if there are plenty of stripe_heads. 4650 * We need a minimum of one full stripe,, and for sensible progress 4651 * it is best to have about 4 times that. 4652 * If we require 4 times, then the default 256 4K stripe_heads will 4653 * allow for chunk sizes up to 256K, which is probably OK. 4654 * If the chunk size is greater, user-space should request more 4655 * stripe_heads first. 4656 */ 4657 if ((mddev->chunk_size / STRIPE_SIZE) * 4 > conf->max_nr_stripes || 4658 (mddev->new_chunk / STRIPE_SIZE) * 4 > conf->max_nr_stripes) { 4659 printk(KERN_WARNING "raid5: reshape: not enough stripes. Needed %lu\n", 4660 (mddev->chunk_size / STRIPE_SIZE)*4); 4661 return -ENOSPC; 4662 } 4663 4664 err = resize_stripes(conf, conf->raid_disks + mddev->delta_disks); 4665 if (err) 4666 return err; 4667 4668 if (mddev->degraded > conf->max_degraded) 4669 return -EINVAL; 4670 /* looks like we might be able to manage this */ 4671 return 0; 4672 } 4673 4674 static int raid5_start_reshape(mddev_t *mddev) 4675 { 4676 raid5_conf_t *conf = mddev_to_conf(mddev); 4677 mdk_rdev_t *rdev; 4678 struct list_head *rtmp; 4679 int spares = 0; 4680 int added_devices = 0; 4681 unsigned long flags; 4682 4683 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4684 return -EBUSY; 4685 4686 rdev_for_each(rdev, rtmp, mddev) 4687 if (rdev->raid_disk < 0 && 4688 !test_bit(Faulty, &rdev->flags)) 4689 spares++; 4690 4691 if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded) 4692 /* Not enough devices even to make a degraded array 4693 * of that size 4694 */ 4695 return -EINVAL; 4696 4697 atomic_set(&conf->reshape_stripes, 0); 4698 spin_lock_irq(&conf->device_lock); 4699 conf->previous_raid_disks = conf->raid_disks; 4700 conf->raid_disks += mddev->delta_disks; 4701 conf->expand_progress = 0; 4702 conf->expand_lo = 0; 4703 spin_unlock_irq(&conf->device_lock); 4704 4705 /* Add some new drives, as many as will fit. 4706 * We know there are enough to make the newly sized array work. 4707 */ 4708 rdev_for_each(rdev, rtmp, mddev) 4709 if (rdev->raid_disk < 0 && 4710 !test_bit(Faulty, &rdev->flags)) { 4711 if (raid5_add_disk(mddev, rdev)) { 4712 char nm[20]; 4713 set_bit(In_sync, &rdev->flags); 4714 added_devices++; 4715 rdev->recovery_offset = 0; 4716 sprintf(nm, "rd%d", rdev->raid_disk); 4717 if (sysfs_create_link(&mddev->kobj, 4718 &rdev->kobj, nm)) 4719 printk(KERN_WARNING 4720 "raid5: failed to create " 4721 " link %s for %s\n", 4722 nm, mdname(mddev)); 4723 } else 4724 break; 4725 } 4726 4727 spin_lock_irqsave(&conf->device_lock, flags); 4728 mddev->degraded = (conf->raid_disks - conf->previous_raid_disks) - added_devices; 4729 spin_unlock_irqrestore(&conf->device_lock, flags); 4730 mddev->raid_disks = conf->raid_disks; 4731 mddev->reshape_position = 0; 4732 set_bit(MD_CHANGE_DEVS, &mddev->flags); 4733 4734 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 4735 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 4736 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 4737 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 4738 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 4739 "%s_reshape"); 4740 if (!mddev->sync_thread) { 4741 mddev->recovery = 0; 4742 spin_lock_irq(&conf->device_lock); 4743 mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks; 4744 conf->expand_progress = MaxSector; 4745 spin_unlock_irq(&conf->device_lock); 4746 return -EAGAIN; 4747 } 4748 md_wakeup_thread(mddev->sync_thread); 4749 md_new_event(mddev); 4750 return 0; 4751 } 4752 #endif 4753 4754 static void end_reshape(raid5_conf_t *conf) 4755 { 4756 struct block_device *bdev; 4757 4758 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { 4759 conf->mddev->array_size = conf->mddev->size * 4760 (conf->raid_disks - conf->max_degraded); 4761 set_capacity(conf->mddev->gendisk, conf->mddev->array_size << 1); 4762 conf->mddev->changed = 1; 4763 4764 bdev = bdget_disk(conf->mddev->gendisk, 0); 4765 if (bdev) { 4766 mutex_lock(&bdev->bd_inode->i_mutex); 4767 i_size_write(bdev->bd_inode, (loff_t)conf->mddev->array_size << 10); 4768 mutex_unlock(&bdev->bd_inode->i_mutex); 4769 bdput(bdev); 4770 } 4771 spin_lock_irq(&conf->device_lock); 4772 conf->expand_progress = MaxSector; 4773 spin_unlock_irq(&conf->device_lock); 4774 conf->mddev->reshape_position = MaxSector; 4775 4776 /* read-ahead size must cover two whole stripes, which is 4777 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices 4778 */ 4779 { 4780 int data_disks = conf->previous_raid_disks - conf->max_degraded; 4781 int stripe = data_disks * 4782 (conf->mddev->chunk_size / PAGE_SIZE); 4783 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 4784 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 4785 } 4786 } 4787 } 4788 4789 static void raid5_quiesce(mddev_t *mddev, int state) 4790 { 4791 raid5_conf_t *conf = mddev_to_conf(mddev); 4792 4793 switch(state) { 4794 case 2: /* resume for a suspend */ 4795 wake_up(&conf->wait_for_overlap); 4796 break; 4797 4798 case 1: /* stop all writes */ 4799 spin_lock_irq(&conf->device_lock); 4800 conf->quiesce = 1; 4801 wait_event_lock_irq(conf->wait_for_stripe, 4802 atomic_read(&conf->active_stripes) == 0 && 4803 atomic_read(&conf->active_aligned_reads) == 0, 4804 conf->device_lock, /* nothing */); 4805 spin_unlock_irq(&conf->device_lock); 4806 break; 4807 4808 case 0: /* re-enable writes */ 4809 spin_lock_irq(&conf->device_lock); 4810 conf->quiesce = 0; 4811 wake_up(&conf->wait_for_stripe); 4812 wake_up(&conf->wait_for_overlap); 4813 spin_unlock_irq(&conf->device_lock); 4814 break; 4815 } 4816 } 4817 4818 static struct mdk_personality raid6_personality = 4819 { 4820 .name = "raid6", 4821 .level = 6, 4822 .owner = THIS_MODULE, 4823 .make_request = make_request, 4824 .run = run, 4825 .stop = stop, 4826 .status = status, 4827 .error_handler = error, 4828 .hot_add_disk = raid5_add_disk, 4829 .hot_remove_disk= raid5_remove_disk, 4830 .spare_active = raid5_spare_active, 4831 .sync_request = sync_request, 4832 .resize = raid5_resize, 4833 #ifdef CONFIG_MD_RAID5_RESHAPE 4834 .check_reshape = raid5_check_reshape, 4835 .start_reshape = raid5_start_reshape, 4836 #endif 4837 .quiesce = raid5_quiesce, 4838 }; 4839 static struct mdk_personality raid5_personality = 4840 { 4841 .name = "raid5", 4842 .level = 5, 4843 .owner = THIS_MODULE, 4844 .make_request = make_request, 4845 .run = run, 4846 .stop = stop, 4847 .status = status, 4848 .error_handler = error, 4849 .hot_add_disk = raid5_add_disk, 4850 .hot_remove_disk= raid5_remove_disk, 4851 .spare_active = raid5_spare_active, 4852 .sync_request = sync_request, 4853 .resize = raid5_resize, 4854 #ifdef CONFIG_MD_RAID5_RESHAPE 4855 .check_reshape = raid5_check_reshape, 4856 .start_reshape = raid5_start_reshape, 4857 #endif 4858 .quiesce = raid5_quiesce, 4859 }; 4860 4861 static struct mdk_personality raid4_personality = 4862 { 4863 .name = "raid4", 4864 .level = 4, 4865 .owner = THIS_MODULE, 4866 .make_request = make_request, 4867 .run = run, 4868 .stop = stop, 4869 .status = status, 4870 .error_handler = error, 4871 .hot_add_disk = raid5_add_disk, 4872 .hot_remove_disk= raid5_remove_disk, 4873 .spare_active = raid5_spare_active, 4874 .sync_request = sync_request, 4875 .resize = raid5_resize, 4876 #ifdef CONFIG_MD_RAID5_RESHAPE 4877 .check_reshape = raid5_check_reshape, 4878 .start_reshape = raid5_start_reshape, 4879 #endif 4880 .quiesce = raid5_quiesce, 4881 }; 4882 4883 static int __init raid5_init(void) 4884 { 4885 int e; 4886 4887 e = raid6_select_algo(); 4888 if ( e ) 4889 return e; 4890 register_md_personality(&raid6_personality); 4891 register_md_personality(&raid5_personality); 4892 register_md_personality(&raid4_personality); 4893 return 0; 4894 } 4895 4896 static void raid5_exit(void) 4897 { 4898 unregister_md_personality(&raid6_personality); 4899 unregister_md_personality(&raid5_personality); 4900 unregister_md_personality(&raid4_personality); 4901 } 4902 4903 module_init(raid5_init); 4904 module_exit(raid5_exit); 4905 MODULE_LICENSE("GPL"); 4906 MODULE_ALIAS("md-personality-4"); /* RAID5 */ 4907 MODULE_ALIAS("md-raid5"); 4908 MODULE_ALIAS("md-raid4"); 4909 MODULE_ALIAS("md-level-5"); 4910 MODULE_ALIAS("md-level-4"); 4911 MODULE_ALIAS("md-personality-8"); /* RAID6 */ 4912 MODULE_ALIAS("md-raid6"); 4913 MODULE_ALIAS("md-level-6"); 4914 4915 /* This used to be two separate modules, they were: */ 4916 MODULE_ALIAS("raid5"); 4917 MODULE_ALIAS("raid6"); 4918