1 /* 2 * raid5.c : Multiple Devices driver for Linux 3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman 4 * Copyright (C) 1999, 2000 Ingo Molnar 5 * Copyright (C) 2002, 2003 H. Peter Anvin 6 * 7 * RAID-4/5/6 management functions. 8 * Thanks to Penguin Computing for making the RAID-6 development possible 9 * by donating a test server! 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2, or (at your option) 14 * any later version. 15 * 16 * You should have received a copy of the GNU General Public License 17 * (for example /usr/src/linux/COPYING); if not, write to the Free 18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 */ 20 21 /* 22 * BITMAP UNPLUGGING: 23 * 24 * The sequencing for updating the bitmap reliably is a little 25 * subtle (and I got it wrong the first time) so it deserves some 26 * explanation. 27 * 28 * We group bitmap updates into batches. Each batch has a number. 29 * We may write out several batches at once, but that isn't very important. 30 * conf->bm_write is the number of the last batch successfully written. 31 * conf->bm_flush is the number of the last batch that was closed to 32 * new additions. 33 * When we discover that we will need to write to any block in a stripe 34 * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq 35 * the number of the batch it will be in. This is bm_flush+1. 36 * When we are ready to do a write, if that batch hasn't been written yet, 37 * we plug the array and queue the stripe for later. 38 * When an unplug happens, we increment bm_flush, thus closing the current 39 * batch. 40 * When we notice that bm_flush > bm_write, we write out all pending updates 41 * to the bitmap, and advance bm_write to where bm_flush was. 42 * This may occasionally write a bit out twice, but is sure never to 43 * miss any bits. 44 */ 45 46 #include <linux/module.h> 47 #include <linux/slab.h> 48 #include <linux/highmem.h> 49 #include <linux/bitops.h> 50 #include <linux/kthread.h> 51 #include <asm/atomic.h> 52 #include "raid6.h" 53 54 #include <linux/raid/bitmap.h> 55 #include <linux/async_tx.h> 56 57 /* 58 * Stripe cache 59 */ 60 61 #define NR_STRIPES 256 62 #define STRIPE_SIZE PAGE_SIZE 63 #define STRIPE_SHIFT (PAGE_SHIFT - 9) 64 #define STRIPE_SECTORS (STRIPE_SIZE>>9) 65 #define IO_THRESHOLD 1 66 #define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head)) 67 #define HASH_MASK (NR_HASH - 1) 68 69 #define stripe_hash(conf, sect) (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK])) 70 71 /* bio's attached to a stripe+device for I/O are linked together in bi_sector 72 * order without overlap. There may be several bio's per stripe+device, and 73 * a bio could span several devices. 74 * When walking this list for a particular stripe+device, we must never proceed 75 * beyond a bio that extends past this device, as the next bio might no longer 76 * be valid. 77 * This macro is used to determine the 'next' bio in the list, given the sector 78 * of the current stripe+device 79 */ 80 #define r5_next_bio(bio, sect) ( ( (bio)->bi_sector + ((bio)->bi_size>>9) < sect + STRIPE_SECTORS) ? (bio)->bi_next : NULL) 81 /* 82 * The following can be used to debug the driver 83 */ 84 #define RAID5_PARANOIA 1 85 #if RAID5_PARANOIA && defined(CONFIG_SMP) 86 # define CHECK_DEVLOCK() assert_spin_locked(&conf->device_lock) 87 #else 88 # define CHECK_DEVLOCK() 89 #endif 90 91 #ifdef DEBUG 92 #define inline 93 #define __inline__ 94 #endif 95 96 #if !RAID6_USE_EMPTY_ZERO_PAGE 97 /* In .bss so it's zeroed */ 98 const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256))); 99 #endif 100 101 static inline int raid6_next_disk(int disk, int raid_disks) 102 { 103 disk++; 104 return (disk < raid_disks) ? disk : 0; 105 } 106 107 static void return_io(struct bio *return_bi) 108 { 109 struct bio *bi = return_bi; 110 while (bi) { 111 112 return_bi = bi->bi_next; 113 bi->bi_next = NULL; 114 bi->bi_size = 0; 115 bi->bi_end_io(bi, 116 test_bit(BIO_UPTODATE, &bi->bi_flags) 117 ? 0 : -EIO); 118 bi = return_bi; 119 } 120 } 121 122 static void print_raid5_conf (raid5_conf_t *conf); 123 124 static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh) 125 { 126 if (atomic_dec_and_test(&sh->count)) { 127 BUG_ON(!list_empty(&sh->lru)); 128 BUG_ON(atomic_read(&conf->active_stripes)==0); 129 if (test_bit(STRIPE_HANDLE, &sh->state)) { 130 if (test_bit(STRIPE_DELAYED, &sh->state)) { 131 list_add_tail(&sh->lru, &conf->delayed_list); 132 blk_plug_device(conf->mddev->queue); 133 } else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && 134 sh->bm_seq - conf->seq_write > 0) { 135 list_add_tail(&sh->lru, &conf->bitmap_list); 136 blk_plug_device(conf->mddev->queue); 137 } else { 138 clear_bit(STRIPE_BIT_DELAY, &sh->state); 139 list_add_tail(&sh->lru, &conf->handle_list); 140 } 141 md_wakeup_thread(conf->mddev->thread); 142 } else { 143 BUG_ON(sh->ops.pending); 144 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 145 atomic_dec(&conf->preread_active_stripes); 146 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) 147 md_wakeup_thread(conf->mddev->thread); 148 } 149 atomic_dec(&conf->active_stripes); 150 if (!test_bit(STRIPE_EXPANDING, &sh->state)) { 151 list_add_tail(&sh->lru, &conf->inactive_list); 152 wake_up(&conf->wait_for_stripe); 153 if (conf->retry_read_aligned) 154 md_wakeup_thread(conf->mddev->thread); 155 } 156 } 157 } 158 } 159 static void release_stripe(struct stripe_head *sh) 160 { 161 raid5_conf_t *conf = sh->raid_conf; 162 unsigned long flags; 163 164 spin_lock_irqsave(&conf->device_lock, flags); 165 __release_stripe(conf, sh); 166 spin_unlock_irqrestore(&conf->device_lock, flags); 167 } 168 169 static inline void remove_hash(struct stripe_head *sh) 170 { 171 pr_debug("remove_hash(), stripe %llu\n", 172 (unsigned long long)sh->sector); 173 174 hlist_del_init(&sh->hash); 175 } 176 177 static inline void insert_hash(raid5_conf_t *conf, struct stripe_head *sh) 178 { 179 struct hlist_head *hp = stripe_hash(conf, sh->sector); 180 181 pr_debug("insert_hash(), stripe %llu\n", 182 (unsigned long long)sh->sector); 183 184 CHECK_DEVLOCK(); 185 hlist_add_head(&sh->hash, hp); 186 } 187 188 189 /* find an idle stripe, make sure it is unhashed, and return it. */ 190 static struct stripe_head *get_free_stripe(raid5_conf_t *conf) 191 { 192 struct stripe_head *sh = NULL; 193 struct list_head *first; 194 195 CHECK_DEVLOCK(); 196 if (list_empty(&conf->inactive_list)) 197 goto out; 198 first = conf->inactive_list.next; 199 sh = list_entry(first, struct stripe_head, lru); 200 list_del_init(first); 201 remove_hash(sh); 202 atomic_inc(&conf->active_stripes); 203 out: 204 return sh; 205 } 206 207 static void shrink_buffers(struct stripe_head *sh, int num) 208 { 209 struct page *p; 210 int i; 211 212 for (i=0; i<num ; i++) { 213 p = sh->dev[i].page; 214 if (!p) 215 continue; 216 sh->dev[i].page = NULL; 217 put_page(p); 218 } 219 } 220 221 static int grow_buffers(struct stripe_head *sh, int num) 222 { 223 int i; 224 225 for (i=0; i<num; i++) { 226 struct page *page; 227 228 if (!(page = alloc_page(GFP_KERNEL))) { 229 return 1; 230 } 231 sh->dev[i].page = page; 232 } 233 return 0; 234 } 235 236 static void raid5_build_block (struct stripe_head *sh, int i); 237 238 static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx, int disks) 239 { 240 raid5_conf_t *conf = sh->raid_conf; 241 int i; 242 243 BUG_ON(atomic_read(&sh->count) != 0); 244 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); 245 BUG_ON(sh->ops.pending || sh->ops.ack || sh->ops.complete); 246 247 CHECK_DEVLOCK(); 248 pr_debug("init_stripe called, stripe %llu\n", 249 (unsigned long long)sh->sector); 250 251 remove_hash(sh); 252 253 sh->sector = sector; 254 sh->pd_idx = pd_idx; 255 sh->state = 0; 256 257 sh->disks = disks; 258 259 for (i = sh->disks; i--; ) { 260 struct r5dev *dev = &sh->dev[i]; 261 262 if (dev->toread || dev->read || dev->towrite || dev->written || 263 test_bit(R5_LOCKED, &dev->flags)) { 264 printk(KERN_ERR "sector=%llx i=%d %p %p %p %p %d\n", 265 (unsigned long long)sh->sector, i, dev->toread, 266 dev->read, dev->towrite, dev->written, 267 test_bit(R5_LOCKED, &dev->flags)); 268 BUG(); 269 } 270 dev->flags = 0; 271 raid5_build_block(sh, i); 272 } 273 insert_hash(conf, sh); 274 } 275 276 static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector, int disks) 277 { 278 struct stripe_head *sh; 279 struct hlist_node *hn; 280 281 CHECK_DEVLOCK(); 282 pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector); 283 hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash) 284 if (sh->sector == sector && sh->disks == disks) 285 return sh; 286 pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector); 287 return NULL; 288 } 289 290 static void unplug_slaves(mddev_t *mddev); 291 static void raid5_unplug_device(struct request_queue *q); 292 293 static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector, int disks, 294 int pd_idx, int noblock) 295 { 296 struct stripe_head *sh; 297 298 pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector); 299 300 spin_lock_irq(&conf->device_lock); 301 302 do { 303 wait_event_lock_irq(conf->wait_for_stripe, 304 conf->quiesce == 0, 305 conf->device_lock, /* nothing */); 306 sh = __find_stripe(conf, sector, disks); 307 if (!sh) { 308 if (!conf->inactive_blocked) 309 sh = get_free_stripe(conf); 310 if (noblock && sh == NULL) 311 break; 312 if (!sh) { 313 conf->inactive_blocked = 1; 314 wait_event_lock_irq(conf->wait_for_stripe, 315 !list_empty(&conf->inactive_list) && 316 (atomic_read(&conf->active_stripes) 317 < (conf->max_nr_stripes *3/4) 318 || !conf->inactive_blocked), 319 conf->device_lock, 320 raid5_unplug_device(conf->mddev->queue) 321 ); 322 conf->inactive_blocked = 0; 323 } else 324 init_stripe(sh, sector, pd_idx, disks); 325 } else { 326 if (atomic_read(&sh->count)) { 327 BUG_ON(!list_empty(&sh->lru)); 328 } else { 329 if (!test_bit(STRIPE_HANDLE, &sh->state)) 330 atomic_inc(&conf->active_stripes); 331 if (list_empty(&sh->lru) && 332 !test_bit(STRIPE_EXPANDING, &sh->state)) 333 BUG(); 334 list_del_init(&sh->lru); 335 } 336 } 337 } while (sh == NULL); 338 339 if (sh) 340 atomic_inc(&sh->count); 341 342 spin_unlock_irq(&conf->device_lock); 343 return sh; 344 } 345 346 /* test_and_ack_op() ensures that we only dequeue an operation once */ 347 #define test_and_ack_op(op, pend) \ 348 do { \ 349 if (test_bit(op, &sh->ops.pending) && \ 350 !test_bit(op, &sh->ops.complete)) { \ 351 if (test_and_set_bit(op, &sh->ops.ack)) \ 352 clear_bit(op, &pend); \ 353 else \ 354 ack++; \ 355 } else \ 356 clear_bit(op, &pend); \ 357 } while (0) 358 359 /* find new work to run, do not resubmit work that is already 360 * in flight 361 */ 362 static unsigned long get_stripe_work(struct stripe_head *sh) 363 { 364 unsigned long pending; 365 int ack = 0; 366 367 pending = sh->ops.pending; 368 369 test_and_ack_op(STRIPE_OP_BIOFILL, pending); 370 test_and_ack_op(STRIPE_OP_COMPUTE_BLK, pending); 371 test_and_ack_op(STRIPE_OP_PREXOR, pending); 372 test_and_ack_op(STRIPE_OP_BIODRAIN, pending); 373 test_and_ack_op(STRIPE_OP_POSTXOR, pending); 374 test_and_ack_op(STRIPE_OP_CHECK, pending); 375 if (test_and_clear_bit(STRIPE_OP_IO, &sh->ops.pending)) 376 ack++; 377 378 sh->ops.count -= ack; 379 BUG_ON(sh->ops.count < 0); 380 381 return pending; 382 } 383 384 static void 385 raid5_end_read_request(struct bio *bi, int error); 386 static void 387 raid5_end_write_request(struct bio *bi, int error); 388 389 static void ops_run_io(struct stripe_head *sh) 390 { 391 raid5_conf_t *conf = sh->raid_conf; 392 int i, disks = sh->disks; 393 394 might_sleep(); 395 396 for (i = disks; i--; ) { 397 int rw; 398 struct bio *bi; 399 mdk_rdev_t *rdev; 400 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) 401 rw = WRITE; 402 else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) 403 rw = READ; 404 else 405 continue; 406 407 bi = &sh->dev[i].req; 408 409 bi->bi_rw = rw; 410 if (rw == WRITE) 411 bi->bi_end_io = raid5_end_write_request; 412 else 413 bi->bi_end_io = raid5_end_read_request; 414 415 rcu_read_lock(); 416 rdev = rcu_dereference(conf->disks[i].rdev); 417 if (rdev && test_bit(Faulty, &rdev->flags)) 418 rdev = NULL; 419 if (rdev) 420 atomic_inc(&rdev->nr_pending); 421 rcu_read_unlock(); 422 423 if (rdev) { 424 if (test_bit(STRIPE_SYNCING, &sh->state) || 425 test_bit(STRIPE_EXPAND_SOURCE, &sh->state) || 426 test_bit(STRIPE_EXPAND_READY, &sh->state)) 427 md_sync_acct(rdev->bdev, STRIPE_SECTORS); 428 429 bi->bi_bdev = rdev->bdev; 430 pr_debug("%s: for %llu schedule op %ld on disc %d\n", 431 __FUNCTION__, (unsigned long long)sh->sector, 432 bi->bi_rw, i); 433 atomic_inc(&sh->count); 434 bi->bi_sector = sh->sector + rdev->data_offset; 435 bi->bi_flags = 1 << BIO_UPTODATE; 436 bi->bi_vcnt = 1; 437 bi->bi_max_vecs = 1; 438 bi->bi_idx = 0; 439 bi->bi_io_vec = &sh->dev[i].vec; 440 bi->bi_io_vec[0].bv_len = STRIPE_SIZE; 441 bi->bi_io_vec[0].bv_offset = 0; 442 bi->bi_size = STRIPE_SIZE; 443 bi->bi_next = NULL; 444 if (rw == WRITE && 445 test_bit(R5_ReWrite, &sh->dev[i].flags)) 446 atomic_add(STRIPE_SECTORS, 447 &rdev->corrected_errors); 448 generic_make_request(bi); 449 } else { 450 if (rw == WRITE) 451 set_bit(STRIPE_DEGRADED, &sh->state); 452 pr_debug("skip op %ld on disc %d for sector %llu\n", 453 bi->bi_rw, i, (unsigned long long)sh->sector); 454 clear_bit(R5_LOCKED, &sh->dev[i].flags); 455 set_bit(STRIPE_HANDLE, &sh->state); 456 } 457 } 458 } 459 460 static struct dma_async_tx_descriptor * 461 async_copy_data(int frombio, struct bio *bio, struct page *page, 462 sector_t sector, struct dma_async_tx_descriptor *tx) 463 { 464 struct bio_vec *bvl; 465 struct page *bio_page; 466 int i; 467 int page_offset; 468 469 if (bio->bi_sector >= sector) 470 page_offset = (signed)(bio->bi_sector - sector) * 512; 471 else 472 page_offset = (signed)(sector - bio->bi_sector) * -512; 473 bio_for_each_segment(bvl, bio, i) { 474 int len = bio_iovec_idx(bio, i)->bv_len; 475 int clen; 476 int b_offset = 0; 477 478 if (page_offset < 0) { 479 b_offset = -page_offset; 480 page_offset += b_offset; 481 len -= b_offset; 482 } 483 484 if (len > 0 && page_offset + len > STRIPE_SIZE) 485 clen = STRIPE_SIZE - page_offset; 486 else 487 clen = len; 488 489 if (clen > 0) { 490 b_offset += bio_iovec_idx(bio, i)->bv_offset; 491 bio_page = bio_iovec_idx(bio, i)->bv_page; 492 if (frombio) 493 tx = async_memcpy(page, bio_page, page_offset, 494 b_offset, clen, 495 ASYNC_TX_DEP_ACK, 496 tx, NULL, NULL); 497 else 498 tx = async_memcpy(bio_page, page, b_offset, 499 page_offset, clen, 500 ASYNC_TX_DEP_ACK, 501 tx, NULL, NULL); 502 } 503 if (clen < len) /* hit end of page */ 504 break; 505 page_offset += len; 506 } 507 508 return tx; 509 } 510 511 static void ops_complete_biofill(void *stripe_head_ref) 512 { 513 struct stripe_head *sh = stripe_head_ref; 514 struct bio *return_bi = NULL; 515 raid5_conf_t *conf = sh->raid_conf; 516 int i; 517 518 pr_debug("%s: stripe %llu\n", __FUNCTION__, 519 (unsigned long long)sh->sector); 520 521 /* clear completed biofills */ 522 for (i = sh->disks; i--; ) { 523 struct r5dev *dev = &sh->dev[i]; 524 525 /* acknowledge completion of a biofill operation */ 526 /* and check if we need to reply to a read request, 527 * new R5_Wantfill requests are held off until 528 * !test_bit(STRIPE_OP_BIOFILL, &sh->ops.pending) 529 */ 530 if (test_and_clear_bit(R5_Wantfill, &dev->flags)) { 531 struct bio *rbi, *rbi2; 532 533 /* The access to dev->read is outside of the 534 * spin_lock_irq(&conf->device_lock), but is protected 535 * by the STRIPE_OP_BIOFILL pending bit 536 */ 537 BUG_ON(!dev->read); 538 rbi = dev->read; 539 dev->read = NULL; 540 while (rbi && rbi->bi_sector < 541 dev->sector + STRIPE_SECTORS) { 542 rbi2 = r5_next_bio(rbi, dev->sector); 543 spin_lock_irq(&conf->device_lock); 544 if (--rbi->bi_phys_segments == 0) { 545 rbi->bi_next = return_bi; 546 return_bi = rbi; 547 } 548 spin_unlock_irq(&conf->device_lock); 549 rbi = rbi2; 550 } 551 } 552 } 553 clear_bit(STRIPE_OP_BIOFILL, &sh->ops.ack); 554 clear_bit(STRIPE_OP_BIOFILL, &sh->ops.pending); 555 556 return_io(return_bi); 557 558 set_bit(STRIPE_HANDLE, &sh->state); 559 release_stripe(sh); 560 } 561 562 static void ops_run_biofill(struct stripe_head *sh) 563 { 564 struct dma_async_tx_descriptor *tx = NULL; 565 raid5_conf_t *conf = sh->raid_conf; 566 int i; 567 568 pr_debug("%s: stripe %llu\n", __FUNCTION__, 569 (unsigned long long)sh->sector); 570 571 for (i = sh->disks; i--; ) { 572 struct r5dev *dev = &sh->dev[i]; 573 if (test_bit(R5_Wantfill, &dev->flags)) { 574 struct bio *rbi; 575 spin_lock_irq(&conf->device_lock); 576 dev->read = rbi = dev->toread; 577 dev->toread = NULL; 578 spin_unlock_irq(&conf->device_lock); 579 while (rbi && rbi->bi_sector < 580 dev->sector + STRIPE_SECTORS) { 581 tx = async_copy_data(0, rbi, dev->page, 582 dev->sector, tx); 583 rbi = r5_next_bio(rbi, dev->sector); 584 } 585 } 586 } 587 588 atomic_inc(&sh->count); 589 async_trigger_callback(ASYNC_TX_DEP_ACK | ASYNC_TX_ACK, tx, 590 ops_complete_biofill, sh); 591 } 592 593 static void ops_complete_compute5(void *stripe_head_ref) 594 { 595 struct stripe_head *sh = stripe_head_ref; 596 int target = sh->ops.target; 597 struct r5dev *tgt = &sh->dev[target]; 598 599 pr_debug("%s: stripe %llu\n", __FUNCTION__, 600 (unsigned long long)sh->sector); 601 602 set_bit(R5_UPTODATE, &tgt->flags); 603 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 604 clear_bit(R5_Wantcompute, &tgt->flags); 605 set_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete); 606 set_bit(STRIPE_HANDLE, &sh->state); 607 release_stripe(sh); 608 } 609 610 static struct dma_async_tx_descriptor * 611 ops_run_compute5(struct stripe_head *sh, unsigned long pending) 612 { 613 /* kernel stack size limits the total number of disks */ 614 int disks = sh->disks; 615 struct page *xor_srcs[disks]; 616 int target = sh->ops.target; 617 struct r5dev *tgt = &sh->dev[target]; 618 struct page *xor_dest = tgt->page; 619 int count = 0; 620 struct dma_async_tx_descriptor *tx; 621 int i; 622 623 pr_debug("%s: stripe %llu block: %d\n", 624 __FUNCTION__, (unsigned long long)sh->sector, target); 625 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 626 627 for (i = disks; i--; ) 628 if (i != target) 629 xor_srcs[count++] = sh->dev[i].page; 630 631 atomic_inc(&sh->count); 632 633 if (unlikely(count == 1)) 634 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, 635 0, NULL, ops_complete_compute5, sh); 636 else 637 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, 638 ASYNC_TX_XOR_ZERO_DST, NULL, 639 ops_complete_compute5, sh); 640 641 /* ack now if postxor is not set to be run */ 642 if (tx && !test_bit(STRIPE_OP_POSTXOR, &pending)) 643 async_tx_ack(tx); 644 645 return tx; 646 } 647 648 static void ops_complete_prexor(void *stripe_head_ref) 649 { 650 struct stripe_head *sh = stripe_head_ref; 651 652 pr_debug("%s: stripe %llu\n", __FUNCTION__, 653 (unsigned long long)sh->sector); 654 655 set_bit(STRIPE_OP_PREXOR, &sh->ops.complete); 656 } 657 658 static struct dma_async_tx_descriptor * 659 ops_run_prexor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) 660 { 661 /* kernel stack size limits the total number of disks */ 662 int disks = sh->disks; 663 struct page *xor_srcs[disks]; 664 int count = 0, pd_idx = sh->pd_idx, i; 665 666 /* existing parity data subtracted */ 667 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; 668 669 pr_debug("%s: stripe %llu\n", __FUNCTION__, 670 (unsigned long long)sh->sector); 671 672 for (i = disks; i--; ) { 673 struct r5dev *dev = &sh->dev[i]; 674 /* Only process blocks that are known to be uptodate */ 675 if (dev->towrite && test_bit(R5_Wantprexor, &dev->flags)) 676 xor_srcs[count++] = dev->page; 677 } 678 679 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, 680 ASYNC_TX_DEP_ACK | ASYNC_TX_XOR_DROP_DST, tx, 681 ops_complete_prexor, sh); 682 683 return tx; 684 } 685 686 static struct dma_async_tx_descriptor * 687 ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) 688 { 689 int disks = sh->disks; 690 int pd_idx = sh->pd_idx, i; 691 692 /* check if prexor is active which means only process blocks 693 * that are part of a read-modify-write (Wantprexor) 694 */ 695 int prexor = test_bit(STRIPE_OP_PREXOR, &sh->ops.pending); 696 697 pr_debug("%s: stripe %llu\n", __FUNCTION__, 698 (unsigned long long)sh->sector); 699 700 for (i = disks; i--; ) { 701 struct r5dev *dev = &sh->dev[i]; 702 struct bio *chosen; 703 int towrite; 704 705 towrite = 0; 706 if (prexor) { /* rmw */ 707 if (dev->towrite && 708 test_bit(R5_Wantprexor, &dev->flags)) 709 towrite = 1; 710 } else { /* rcw */ 711 if (i != pd_idx && dev->towrite && 712 test_bit(R5_LOCKED, &dev->flags)) 713 towrite = 1; 714 } 715 716 if (towrite) { 717 struct bio *wbi; 718 719 spin_lock(&sh->lock); 720 chosen = dev->towrite; 721 dev->towrite = NULL; 722 BUG_ON(dev->written); 723 wbi = dev->written = chosen; 724 spin_unlock(&sh->lock); 725 726 while (wbi && wbi->bi_sector < 727 dev->sector + STRIPE_SECTORS) { 728 tx = async_copy_data(1, wbi, dev->page, 729 dev->sector, tx); 730 wbi = r5_next_bio(wbi, dev->sector); 731 } 732 } 733 } 734 735 return tx; 736 } 737 738 static void ops_complete_postxor(void *stripe_head_ref) 739 { 740 struct stripe_head *sh = stripe_head_ref; 741 742 pr_debug("%s: stripe %llu\n", __FUNCTION__, 743 (unsigned long long)sh->sector); 744 745 set_bit(STRIPE_OP_POSTXOR, &sh->ops.complete); 746 set_bit(STRIPE_HANDLE, &sh->state); 747 release_stripe(sh); 748 } 749 750 static void ops_complete_write(void *stripe_head_ref) 751 { 752 struct stripe_head *sh = stripe_head_ref; 753 int disks = sh->disks, i, pd_idx = sh->pd_idx; 754 755 pr_debug("%s: stripe %llu\n", __FUNCTION__, 756 (unsigned long long)sh->sector); 757 758 for (i = disks; i--; ) { 759 struct r5dev *dev = &sh->dev[i]; 760 if (dev->written || i == pd_idx) 761 set_bit(R5_UPTODATE, &dev->flags); 762 } 763 764 set_bit(STRIPE_OP_BIODRAIN, &sh->ops.complete); 765 set_bit(STRIPE_OP_POSTXOR, &sh->ops.complete); 766 767 set_bit(STRIPE_HANDLE, &sh->state); 768 release_stripe(sh); 769 } 770 771 static void 772 ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) 773 { 774 /* kernel stack size limits the total number of disks */ 775 int disks = sh->disks; 776 struct page *xor_srcs[disks]; 777 778 int count = 0, pd_idx = sh->pd_idx, i; 779 struct page *xor_dest; 780 int prexor = test_bit(STRIPE_OP_PREXOR, &sh->ops.pending); 781 unsigned long flags; 782 dma_async_tx_callback callback; 783 784 pr_debug("%s: stripe %llu\n", __FUNCTION__, 785 (unsigned long long)sh->sector); 786 787 /* check if prexor is active which means only process blocks 788 * that are part of a read-modify-write (written) 789 */ 790 if (prexor) { 791 xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; 792 for (i = disks; i--; ) { 793 struct r5dev *dev = &sh->dev[i]; 794 if (dev->written) 795 xor_srcs[count++] = dev->page; 796 } 797 } else { 798 xor_dest = sh->dev[pd_idx].page; 799 for (i = disks; i--; ) { 800 struct r5dev *dev = &sh->dev[i]; 801 if (i != pd_idx) 802 xor_srcs[count++] = dev->page; 803 } 804 } 805 806 /* check whether this postxor is part of a write */ 807 callback = test_bit(STRIPE_OP_BIODRAIN, &sh->ops.pending) ? 808 ops_complete_write : ops_complete_postxor; 809 810 /* 1/ if we prexor'd then the dest is reused as a source 811 * 2/ if we did not prexor then we are redoing the parity 812 * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST 813 * for the synchronous xor case 814 */ 815 flags = ASYNC_TX_DEP_ACK | ASYNC_TX_ACK | 816 (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST); 817 818 atomic_inc(&sh->count); 819 820 if (unlikely(count == 1)) { 821 flags &= ~(ASYNC_TX_XOR_DROP_DST | ASYNC_TX_XOR_ZERO_DST); 822 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, 823 flags, tx, callback, sh); 824 } else 825 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, 826 flags, tx, callback, sh); 827 } 828 829 static void ops_complete_check(void *stripe_head_ref) 830 { 831 struct stripe_head *sh = stripe_head_ref; 832 int pd_idx = sh->pd_idx; 833 834 pr_debug("%s: stripe %llu\n", __FUNCTION__, 835 (unsigned long long)sh->sector); 836 837 if (test_and_clear_bit(STRIPE_OP_MOD_DMA_CHECK, &sh->ops.pending) && 838 sh->ops.zero_sum_result == 0) 839 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 840 841 set_bit(STRIPE_OP_CHECK, &sh->ops.complete); 842 set_bit(STRIPE_HANDLE, &sh->state); 843 release_stripe(sh); 844 } 845 846 static void ops_run_check(struct stripe_head *sh) 847 { 848 /* kernel stack size limits the total number of disks */ 849 int disks = sh->disks; 850 struct page *xor_srcs[disks]; 851 struct dma_async_tx_descriptor *tx; 852 853 int count = 0, pd_idx = sh->pd_idx, i; 854 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; 855 856 pr_debug("%s: stripe %llu\n", __FUNCTION__, 857 (unsigned long long)sh->sector); 858 859 for (i = disks; i--; ) { 860 struct r5dev *dev = &sh->dev[i]; 861 if (i != pd_idx) 862 xor_srcs[count++] = dev->page; 863 } 864 865 tx = async_xor_zero_sum(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, 866 &sh->ops.zero_sum_result, 0, NULL, NULL, NULL); 867 868 if (tx) 869 set_bit(STRIPE_OP_MOD_DMA_CHECK, &sh->ops.pending); 870 else 871 clear_bit(STRIPE_OP_MOD_DMA_CHECK, &sh->ops.pending); 872 873 atomic_inc(&sh->count); 874 tx = async_trigger_callback(ASYNC_TX_DEP_ACK | ASYNC_TX_ACK, tx, 875 ops_complete_check, sh); 876 } 877 878 static void raid5_run_ops(struct stripe_head *sh, unsigned long pending) 879 { 880 int overlap_clear = 0, i, disks = sh->disks; 881 struct dma_async_tx_descriptor *tx = NULL; 882 883 if (test_bit(STRIPE_OP_BIOFILL, &pending)) { 884 ops_run_biofill(sh); 885 overlap_clear++; 886 } 887 888 if (test_bit(STRIPE_OP_COMPUTE_BLK, &pending)) 889 tx = ops_run_compute5(sh, pending); 890 891 if (test_bit(STRIPE_OP_PREXOR, &pending)) 892 tx = ops_run_prexor(sh, tx); 893 894 if (test_bit(STRIPE_OP_BIODRAIN, &pending)) { 895 tx = ops_run_biodrain(sh, tx); 896 overlap_clear++; 897 } 898 899 if (test_bit(STRIPE_OP_POSTXOR, &pending)) 900 ops_run_postxor(sh, tx); 901 902 if (test_bit(STRIPE_OP_CHECK, &pending)) 903 ops_run_check(sh); 904 905 if (test_bit(STRIPE_OP_IO, &pending)) 906 ops_run_io(sh); 907 908 if (overlap_clear) 909 for (i = disks; i--; ) { 910 struct r5dev *dev = &sh->dev[i]; 911 if (test_and_clear_bit(R5_Overlap, &dev->flags)) 912 wake_up(&sh->raid_conf->wait_for_overlap); 913 } 914 } 915 916 static int grow_one_stripe(raid5_conf_t *conf) 917 { 918 struct stripe_head *sh; 919 sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL); 920 if (!sh) 921 return 0; 922 memset(sh, 0, sizeof(*sh) + (conf->raid_disks-1)*sizeof(struct r5dev)); 923 sh->raid_conf = conf; 924 spin_lock_init(&sh->lock); 925 926 if (grow_buffers(sh, conf->raid_disks)) { 927 shrink_buffers(sh, conf->raid_disks); 928 kmem_cache_free(conf->slab_cache, sh); 929 return 0; 930 } 931 sh->disks = conf->raid_disks; 932 /* we just created an active stripe so... */ 933 atomic_set(&sh->count, 1); 934 atomic_inc(&conf->active_stripes); 935 INIT_LIST_HEAD(&sh->lru); 936 release_stripe(sh); 937 return 1; 938 } 939 940 static int grow_stripes(raid5_conf_t *conf, int num) 941 { 942 struct kmem_cache *sc; 943 int devs = conf->raid_disks; 944 945 sprintf(conf->cache_name[0], "raid5-%s", mdname(conf->mddev)); 946 sprintf(conf->cache_name[1], "raid5-%s-alt", mdname(conf->mddev)); 947 conf->active_name = 0; 948 sc = kmem_cache_create(conf->cache_name[conf->active_name], 949 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev), 950 0, 0, NULL); 951 if (!sc) 952 return 1; 953 conf->slab_cache = sc; 954 conf->pool_size = devs; 955 while (num--) 956 if (!grow_one_stripe(conf)) 957 return 1; 958 return 0; 959 } 960 961 #ifdef CONFIG_MD_RAID5_RESHAPE 962 static int resize_stripes(raid5_conf_t *conf, int newsize) 963 { 964 /* Make all the stripes able to hold 'newsize' devices. 965 * New slots in each stripe get 'page' set to a new page. 966 * 967 * This happens in stages: 968 * 1/ create a new kmem_cache and allocate the required number of 969 * stripe_heads. 970 * 2/ gather all the old stripe_heads and tranfer the pages across 971 * to the new stripe_heads. This will have the side effect of 972 * freezing the array as once all stripe_heads have been collected, 973 * no IO will be possible. Old stripe heads are freed once their 974 * pages have been transferred over, and the old kmem_cache is 975 * freed when all stripes are done. 976 * 3/ reallocate conf->disks to be suitable bigger. If this fails, 977 * we simple return a failre status - no need to clean anything up. 978 * 4/ allocate new pages for the new slots in the new stripe_heads. 979 * If this fails, we don't bother trying the shrink the 980 * stripe_heads down again, we just leave them as they are. 981 * As each stripe_head is processed the new one is released into 982 * active service. 983 * 984 * Once step2 is started, we cannot afford to wait for a write, 985 * so we use GFP_NOIO allocations. 986 */ 987 struct stripe_head *osh, *nsh; 988 LIST_HEAD(newstripes); 989 struct disk_info *ndisks; 990 int err = 0; 991 struct kmem_cache *sc; 992 int i; 993 994 if (newsize <= conf->pool_size) 995 return 0; /* never bother to shrink */ 996 997 md_allow_write(conf->mddev); 998 999 /* Step 1 */ 1000 sc = kmem_cache_create(conf->cache_name[1-conf->active_name], 1001 sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev), 1002 0, 0, NULL); 1003 if (!sc) 1004 return -ENOMEM; 1005 1006 for (i = conf->max_nr_stripes; i; i--) { 1007 nsh = kmem_cache_alloc(sc, GFP_KERNEL); 1008 if (!nsh) 1009 break; 1010 1011 memset(nsh, 0, sizeof(*nsh) + (newsize-1)*sizeof(struct r5dev)); 1012 1013 nsh->raid_conf = conf; 1014 spin_lock_init(&nsh->lock); 1015 1016 list_add(&nsh->lru, &newstripes); 1017 } 1018 if (i) { 1019 /* didn't get enough, give up */ 1020 while (!list_empty(&newstripes)) { 1021 nsh = list_entry(newstripes.next, struct stripe_head, lru); 1022 list_del(&nsh->lru); 1023 kmem_cache_free(sc, nsh); 1024 } 1025 kmem_cache_destroy(sc); 1026 return -ENOMEM; 1027 } 1028 /* Step 2 - Must use GFP_NOIO now. 1029 * OK, we have enough stripes, start collecting inactive 1030 * stripes and copying them over 1031 */ 1032 list_for_each_entry(nsh, &newstripes, lru) { 1033 spin_lock_irq(&conf->device_lock); 1034 wait_event_lock_irq(conf->wait_for_stripe, 1035 !list_empty(&conf->inactive_list), 1036 conf->device_lock, 1037 unplug_slaves(conf->mddev) 1038 ); 1039 osh = get_free_stripe(conf); 1040 spin_unlock_irq(&conf->device_lock); 1041 atomic_set(&nsh->count, 1); 1042 for(i=0; i<conf->pool_size; i++) 1043 nsh->dev[i].page = osh->dev[i].page; 1044 for( ; i<newsize; i++) 1045 nsh->dev[i].page = NULL; 1046 kmem_cache_free(conf->slab_cache, osh); 1047 } 1048 kmem_cache_destroy(conf->slab_cache); 1049 1050 /* Step 3. 1051 * At this point, we are holding all the stripes so the array 1052 * is completely stalled, so now is a good time to resize 1053 * conf->disks. 1054 */ 1055 ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO); 1056 if (ndisks) { 1057 for (i=0; i<conf->raid_disks; i++) 1058 ndisks[i] = conf->disks[i]; 1059 kfree(conf->disks); 1060 conf->disks = ndisks; 1061 } else 1062 err = -ENOMEM; 1063 1064 /* Step 4, return new stripes to service */ 1065 while(!list_empty(&newstripes)) { 1066 nsh = list_entry(newstripes.next, struct stripe_head, lru); 1067 list_del_init(&nsh->lru); 1068 for (i=conf->raid_disks; i < newsize; i++) 1069 if (nsh->dev[i].page == NULL) { 1070 struct page *p = alloc_page(GFP_NOIO); 1071 nsh->dev[i].page = p; 1072 if (!p) 1073 err = -ENOMEM; 1074 } 1075 release_stripe(nsh); 1076 } 1077 /* critical section pass, GFP_NOIO no longer needed */ 1078 1079 conf->slab_cache = sc; 1080 conf->active_name = 1-conf->active_name; 1081 conf->pool_size = newsize; 1082 return err; 1083 } 1084 #endif 1085 1086 static int drop_one_stripe(raid5_conf_t *conf) 1087 { 1088 struct stripe_head *sh; 1089 1090 spin_lock_irq(&conf->device_lock); 1091 sh = get_free_stripe(conf); 1092 spin_unlock_irq(&conf->device_lock); 1093 if (!sh) 1094 return 0; 1095 BUG_ON(atomic_read(&sh->count)); 1096 shrink_buffers(sh, conf->pool_size); 1097 kmem_cache_free(conf->slab_cache, sh); 1098 atomic_dec(&conf->active_stripes); 1099 return 1; 1100 } 1101 1102 static void shrink_stripes(raid5_conf_t *conf) 1103 { 1104 while (drop_one_stripe(conf)) 1105 ; 1106 1107 if (conf->slab_cache) 1108 kmem_cache_destroy(conf->slab_cache); 1109 conf->slab_cache = NULL; 1110 } 1111 1112 static void raid5_end_read_request(struct bio * bi, int error) 1113 { 1114 struct stripe_head *sh = bi->bi_private; 1115 raid5_conf_t *conf = sh->raid_conf; 1116 int disks = sh->disks, i; 1117 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 1118 char b[BDEVNAME_SIZE]; 1119 mdk_rdev_t *rdev; 1120 1121 1122 for (i=0 ; i<disks; i++) 1123 if (bi == &sh->dev[i].req) 1124 break; 1125 1126 pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n", 1127 (unsigned long long)sh->sector, i, atomic_read(&sh->count), 1128 uptodate); 1129 if (i == disks) { 1130 BUG(); 1131 return; 1132 } 1133 1134 if (uptodate) { 1135 set_bit(R5_UPTODATE, &sh->dev[i].flags); 1136 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 1137 rdev = conf->disks[i].rdev; 1138 printk(KERN_INFO "raid5:%s: read error corrected (%lu sectors at %llu on %s)\n", 1139 mdname(conf->mddev), STRIPE_SECTORS, 1140 (unsigned long long)sh->sector + rdev->data_offset, 1141 bdevname(rdev->bdev, b)); 1142 clear_bit(R5_ReadError, &sh->dev[i].flags); 1143 clear_bit(R5_ReWrite, &sh->dev[i].flags); 1144 } 1145 if (atomic_read(&conf->disks[i].rdev->read_errors)) 1146 atomic_set(&conf->disks[i].rdev->read_errors, 0); 1147 } else { 1148 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b); 1149 int retry = 0; 1150 rdev = conf->disks[i].rdev; 1151 1152 clear_bit(R5_UPTODATE, &sh->dev[i].flags); 1153 atomic_inc(&rdev->read_errors); 1154 if (conf->mddev->degraded) 1155 printk(KERN_WARNING "raid5:%s: read error not correctable (sector %llu on %s).\n", 1156 mdname(conf->mddev), 1157 (unsigned long long)sh->sector + rdev->data_offset, 1158 bdn); 1159 else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) 1160 /* Oh, no!!! */ 1161 printk(KERN_WARNING "raid5:%s: read error NOT corrected!! (sector %llu on %s).\n", 1162 mdname(conf->mddev), 1163 (unsigned long long)sh->sector + rdev->data_offset, 1164 bdn); 1165 else if (atomic_read(&rdev->read_errors) 1166 > conf->max_nr_stripes) 1167 printk(KERN_WARNING 1168 "raid5:%s: Too many read errors, failing device %s.\n", 1169 mdname(conf->mddev), bdn); 1170 else 1171 retry = 1; 1172 if (retry) 1173 set_bit(R5_ReadError, &sh->dev[i].flags); 1174 else { 1175 clear_bit(R5_ReadError, &sh->dev[i].flags); 1176 clear_bit(R5_ReWrite, &sh->dev[i].flags); 1177 md_error(conf->mddev, rdev); 1178 } 1179 } 1180 rdev_dec_pending(conf->disks[i].rdev, conf->mddev); 1181 clear_bit(R5_LOCKED, &sh->dev[i].flags); 1182 set_bit(STRIPE_HANDLE, &sh->state); 1183 release_stripe(sh); 1184 } 1185 1186 static void raid5_end_write_request (struct bio *bi, int error) 1187 { 1188 struct stripe_head *sh = bi->bi_private; 1189 raid5_conf_t *conf = sh->raid_conf; 1190 int disks = sh->disks, i; 1191 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 1192 1193 for (i=0 ; i<disks; i++) 1194 if (bi == &sh->dev[i].req) 1195 break; 1196 1197 pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n", 1198 (unsigned long long)sh->sector, i, atomic_read(&sh->count), 1199 uptodate); 1200 if (i == disks) { 1201 BUG(); 1202 return; 1203 } 1204 1205 if (!uptodate) 1206 md_error(conf->mddev, conf->disks[i].rdev); 1207 1208 rdev_dec_pending(conf->disks[i].rdev, conf->mddev); 1209 1210 clear_bit(R5_LOCKED, &sh->dev[i].flags); 1211 set_bit(STRIPE_HANDLE, &sh->state); 1212 release_stripe(sh); 1213 } 1214 1215 1216 static sector_t compute_blocknr(struct stripe_head *sh, int i); 1217 1218 static void raid5_build_block (struct stripe_head *sh, int i) 1219 { 1220 struct r5dev *dev = &sh->dev[i]; 1221 1222 bio_init(&dev->req); 1223 dev->req.bi_io_vec = &dev->vec; 1224 dev->req.bi_vcnt++; 1225 dev->req.bi_max_vecs++; 1226 dev->vec.bv_page = dev->page; 1227 dev->vec.bv_len = STRIPE_SIZE; 1228 dev->vec.bv_offset = 0; 1229 1230 dev->req.bi_sector = sh->sector; 1231 dev->req.bi_private = sh; 1232 1233 dev->flags = 0; 1234 dev->sector = compute_blocknr(sh, i); 1235 } 1236 1237 static void error(mddev_t *mddev, mdk_rdev_t *rdev) 1238 { 1239 char b[BDEVNAME_SIZE]; 1240 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 1241 pr_debug("raid5: error called\n"); 1242 1243 if (!test_bit(Faulty, &rdev->flags)) { 1244 set_bit(MD_CHANGE_DEVS, &mddev->flags); 1245 if (test_and_clear_bit(In_sync, &rdev->flags)) { 1246 unsigned long flags; 1247 spin_lock_irqsave(&conf->device_lock, flags); 1248 mddev->degraded++; 1249 spin_unlock_irqrestore(&conf->device_lock, flags); 1250 /* 1251 * if recovery was running, make sure it aborts. 1252 */ 1253 set_bit(MD_RECOVERY_ERR, &mddev->recovery); 1254 } 1255 set_bit(Faulty, &rdev->flags); 1256 printk (KERN_ALERT 1257 "raid5: Disk failure on %s, disabling device." 1258 " Operation continuing on %d devices\n", 1259 bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded); 1260 } 1261 } 1262 1263 /* 1264 * Input: a 'big' sector number, 1265 * Output: index of the data and parity disk, and the sector # in them. 1266 */ 1267 static sector_t raid5_compute_sector(sector_t r_sector, unsigned int raid_disks, 1268 unsigned int data_disks, unsigned int * dd_idx, 1269 unsigned int * pd_idx, raid5_conf_t *conf) 1270 { 1271 long stripe; 1272 unsigned long chunk_number; 1273 unsigned int chunk_offset; 1274 sector_t new_sector; 1275 int sectors_per_chunk = conf->chunk_size >> 9; 1276 1277 /* First compute the information on this sector */ 1278 1279 /* 1280 * Compute the chunk number and the sector offset inside the chunk 1281 */ 1282 chunk_offset = sector_div(r_sector, sectors_per_chunk); 1283 chunk_number = r_sector; 1284 BUG_ON(r_sector != chunk_number); 1285 1286 /* 1287 * Compute the stripe number 1288 */ 1289 stripe = chunk_number / data_disks; 1290 1291 /* 1292 * Compute the data disk and parity disk indexes inside the stripe 1293 */ 1294 *dd_idx = chunk_number % data_disks; 1295 1296 /* 1297 * Select the parity disk based on the user selected algorithm. 1298 */ 1299 switch(conf->level) { 1300 case 4: 1301 *pd_idx = data_disks; 1302 break; 1303 case 5: 1304 switch (conf->algorithm) { 1305 case ALGORITHM_LEFT_ASYMMETRIC: 1306 *pd_idx = data_disks - stripe % raid_disks; 1307 if (*dd_idx >= *pd_idx) 1308 (*dd_idx)++; 1309 break; 1310 case ALGORITHM_RIGHT_ASYMMETRIC: 1311 *pd_idx = stripe % raid_disks; 1312 if (*dd_idx >= *pd_idx) 1313 (*dd_idx)++; 1314 break; 1315 case ALGORITHM_LEFT_SYMMETRIC: 1316 *pd_idx = data_disks - stripe % raid_disks; 1317 *dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks; 1318 break; 1319 case ALGORITHM_RIGHT_SYMMETRIC: 1320 *pd_idx = stripe % raid_disks; 1321 *dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks; 1322 break; 1323 default: 1324 printk(KERN_ERR "raid5: unsupported algorithm %d\n", 1325 conf->algorithm); 1326 } 1327 break; 1328 case 6: 1329 1330 /**** FIX THIS ****/ 1331 switch (conf->algorithm) { 1332 case ALGORITHM_LEFT_ASYMMETRIC: 1333 *pd_idx = raid_disks - 1 - (stripe % raid_disks); 1334 if (*pd_idx == raid_disks-1) 1335 (*dd_idx)++; /* Q D D D P */ 1336 else if (*dd_idx >= *pd_idx) 1337 (*dd_idx) += 2; /* D D P Q D */ 1338 break; 1339 case ALGORITHM_RIGHT_ASYMMETRIC: 1340 *pd_idx = stripe % raid_disks; 1341 if (*pd_idx == raid_disks-1) 1342 (*dd_idx)++; /* Q D D D P */ 1343 else if (*dd_idx >= *pd_idx) 1344 (*dd_idx) += 2; /* D D P Q D */ 1345 break; 1346 case ALGORITHM_LEFT_SYMMETRIC: 1347 *pd_idx = raid_disks - 1 - (stripe % raid_disks); 1348 *dd_idx = (*pd_idx + 2 + *dd_idx) % raid_disks; 1349 break; 1350 case ALGORITHM_RIGHT_SYMMETRIC: 1351 *pd_idx = stripe % raid_disks; 1352 *dd_idx = (*pd_idx + 2 + *dd_idx) % raid_disks; 1353 break; 1354 default: 1355 printk (KERN_CRIT "raid6: unsupported algorithm %d\n", 1356 conf->algorithm); 1357 } 1358 break; 1359 } 1360 1361 /* 1362 * Finally, compute the new sector number 1363 */ 1364 new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset; 1365 return new_sector; 1366 } 1367 1368 1369 static sector_t compute_blocknr(struct stripe_head *sh, int i) 1370 { 1371 raid5_conf_t *conf = sh->raid_conf; 1372 int raid_disks = sh->disks; 1373 int data_disks = raid_disks - conf->max_degraded; 1374 sector_t new_sector = sh->sector, check; 1375 int sectors_per_chunk = conf->chunk_size >> 9; 1376 sector_t stripe; 1377 int chunk_offset; 1378 int chunk_number, dummy1, dummy2, dd_idx = i; 1379 sector_t r_sector; 1380 1381 1382 chunk_offset = sector_div(new_sector, sectors_per_chunk); 1383 stripe = new_sector; 1384 BUG_ON(new_sector != stripe); 1385 1386 if (i == sh->pd_idx) 1387 return 0; 1388 switch(conf->level) { 1389 case 4: break; 1390 case 5: 1391 switch (conf->algorithm) { 1392 case ALGORITHM_LEFT_ASYMMETRIC: 1393 case ALGORITHM_RIGHT_ASYMMETRIC: 1394 if (i > sh->pd_idx) 1395 i--; 1396 break; 1397 case ALGORITHM_LEFT_SYMMETRIC: 1398 case ALGORITHM_RIGHT_SYMMETRIC: 1399 if (i < sh->pd_idx) 1400 i += raid_disks; 1401 i -= (sh->pd_idx + 1); 1402 break; 1403 default: 1404 printk(KERN_ERR "raid5: unsupported algorithm %d\n", 1405 conf->algorithm); 1406 } 1407 break; 1408 case 6: 1409 if (i == raid6_next_disk(sh->pd_idx, raid_disks)) 1410 return 0; /* It is the Q disk */ 1411 switch (conf->algorithm) { 1412 case ALGORITHM_LEFT_ASYMMETRIC: 1413 case ALGORITHM_RIGHT_ASYMMETRIC: 1414 if (sh->pd_idx == raid_disks-1) 1415 i--; /* Q D D D P */ 1416 else if (i > sh->pd_idx) 1417 i -= 2; /* D D P Q D */ 1418 break; 1419 case ALGORITHM_LEFT_SYMMETRIC: 1420 case ALGORITHM_RIGHT_SYMMETRIC: 1421 if (sh->pd_idx == raid_disks-1) 1422 i--; /* Q D D D P */ 1423 else { 1424 /* D D P Q D */ 1425 if (i < sh->pd_idx) 1426 i += raid_disks; 1427 i -= (sh->pd_idx + 2); 1428 } 1429 break; 1430 default: 1431 printk (KERN_CRIT "raid6: unsupported algorithm %d\n", 1432 conf->algorithm); 1433 } 1434 break; 1435 } 1436 1437 chunk_number = stripe * data_disks + i; 1438 r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset; 1439 1440 check = raid5_compute_sector (r_sector, raid_disks, data_disks, &dummy1, &dummy2, conf); 1441 if (check != sh->sector || dummy1 != dd_idx || dummy2 != sh->pd_idx) { 1442 printk(KERN_ERR "compute_blocknr: map not correct\n"); 1443 return 0; 1444 } 1445 return r_sector; 1446 } 1447 1448 1449 1450 /* 1451 * Copy data between a page in the stripe cache, and one or more bion 1452 * The page could align with the middle of the bio, or there could be 1453 * several bion, each with several bio_vecs, which cover part of the page 1454 * Multiple bion are linked together on bi_next. There may be extras 1455 * at the end of this list. We ignore them. 1456 */ 1457 static void copy_data(int frombio, struct bio *bio, 1458 struct page *page, 1459 sector_t sector) 1460 { 1461 char *pa = page_address(page); 1462 struct bio_vec *bvl; 1463 int i; 1464 int page_offset; 1465 1466 if (bio->bi_sector >= sector) 1467 page_offset = (signed)(bio->bi_sector - sector) * 512; 1468 else 1469 page_offset = (signed)(sector - bio->bi_sector) * -512; 1470 bio_for_each_segment(bvl, bio, i) { 1471 int len = bio_iovec_idx(bio,i)->bv_len; 1472 int clen; 1473 int b_offset = 0; 1474 1475 if (page_offset < 0) { 1476 b_offset = -page_offset; 1477 page_offset += b_offset; 1478 len -= b_offset; 1479 } 1480 1481 if (len > 0 && page_offset + len > STRIPE_SIZE) 1482 clen = STRIPE_SIZE - page_offset; 1483 else clen = len; 1484 1485 if (clen > 0) { 1486 char *ba = __bio_kmap_atomic(bio, i, KM_USER0); 1487 if (frombio) 1488 memcpy(pa+page_offset, ba+b_offset, clen); 1489 else 1490 memcpy(ba+b_offset, pa+page_offset, clen); 1491 __bio_kunmap_atomic(ba, KM_USER0); 1492 } 1493 if (clen < len) /* hit end of page */ 1494 break; 1495 page_offset += len; 1496 } 1497 } 1498 1499 #define check_xor() do { \ 1500 if (count == MAX_XOR_BLOCKS) { \ 1501 xor_blocks(count, STRIPE_SIZE, dest, ptr);\ 1502 count = 0; \ 1503 } \ 1504 } while(0) 1505 1506 static void compute_parity6(struct stripe_head *sh, int method) 1507 { 1508 raid6_conf_t *conf = sh->raid_conf; 1509 int i, pd_idx = sh->pd_idx, qd_idx, d0_idx, disks = sh->disks, count; 1510 struct bio *chosen; 1511 /**** FIX THIS: This could be very bad if disks is close to 256 ****/ 1512 void *ptrs[disks]; 1513 1514 qd_idx = raid6_next_disk(pd_idx, disks); 1515 d0_idx = raid6_next_disk(qd_idx, disks); 1516 1517 pr_debug("compute_parity, stripe %llu, method %d\n", 1518 (unsigned long long)sh->sector, method); 1519 1520 switch(method) { 1521 case READ_MODIFY_WRITE: 1522 BUG(); /* READ_MODIFY_WRITE N/A for RAID-6 */ 1523 case RECONSTRUCT_WRITE: 1524 for (i= disks; i-- ;) 1525 if ( i != pd_idx && i != qd_idx && sh->dev[i].towrite ) { 1526 chosen = sh->dev[i].towrite; 1527 sh->dev[i].towrite = NULL; 1528 1529 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 1530 wake_up(&conf->wait_for_overlap); 1531 1532 BUG_ON(sh->dev[i].written); 1533 sh->dev[i].written = chosen; 1534 } 1535 break; 1536 case CHECK_PARITY: 1537 BUG(); /* Not implemented yet */ 1538 } 1539 1540 for (i = disks; i--;) 1541 if (sh->dev[i].written) { 1542 sector_t sector = sh->dev[i].sector; 1543 struct bio *wbi = sh->dev[i].written; 1544 while (wbi && wbi->bi_sector < sector + STRIPE_SECTORS) { 1545 copy_data(1, wbi, sh->dev[i].page, sector); 1546 wbi = r5_next_bio(wbi, sector); 1547 } 1548 1549 set_bit(R5_LOCKED, &sh->dev[i].flags); 1550 set_bit(R5_UPTODATE, &sh->dev[i].flags); 1551 } 1552 1553 // switch(method) { 1554 // case RECONSTRUCT_WRITE: 1555 // case CHECK_PARITY: 1556 // case UPDATE_PARITY: 1557 /* Note that unlike RAID-5, the ordering of the disks matters greatly. */ 1558 /* FIX: Is this ordering of drives even remotely optimal? */ 1559 count = 0; 1560 i = d0_idx; 1561 do { 1562 ptrs[count++] = page_address(sh->dev[i].page); 1563 if (count <= disks-2 && !test_bit(R5_UPTODATE, &sh->dev[i].flags)) 1564 printk("block %d/%d not uptodate on parity calc\n", i,count); 1565 i = raid6_next_disk(i, disks); 1566 } while ( i != d0_idx ); 1567 // break; 1568 // } 1569 1570 raid6_call.gen_syndrome(disks, STRIPE_SIZE, ptrs); 1571 1572 switch(method) { 1573 case RECONSTRUCT_WRITE: 1574 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 1575 set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags); 1576 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); 1577 set_bit(R5_LOCKED, &sh->dev[qd_idx].flags); 1578 break; 1579 case UPDATE_PARITY: 1580 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 1581 set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags); 1582 break; 1583 } 1584 } 1585 1586 1587 /* Compute one missing block */ 1588 static void compute_block_1(struct stripe_head *sh, int dd_idx, int nozero) 1589 { 1590 int i, count, disks = sh->disks; 1591 void *ptr[MAX_XOR_BLOCKS], *dest, *p; 1592 int pd_idx = sh->pd_idx; 1593 int qd_idx = raid6_next_disk(pd_idx, disks); 1594 1595 pr_debug("compute_block_1, stripe %llu, idx %d\n", 1596 (unsigned long long)sh->sector, dd_idx); 1597 1598 if ( dd_idx == qd_idx ) { 1599 /* We're actually computing the Q drive */ 1600 compute_parity6(sh, UPDATE_PARITY); 1601 } else { 1602 dest = page_address(sh->dev[dd_idx].page); 1603 if (!nozero) memset(dest, 0, STRIPE_SIZE); 1604 count = 0; 1605 for (i = disks ; i--; ) { 1606 if (i == dd_idx || i == qd_idx) 1607 continue; 1608 p = page_address(sh->dev[i].page); 1609 if (test_bit(R5_UPTODATE, &sh->dev[i].flags)) 1610 ptr[count++] = p; 1611 else 1612 printk("compute_block() %d, stripe %llu, %d" 1613 " not present\n", dd_idx, 1614 (unsigned long long)sh->sector, i); 1615 1616 check_xor(); 1617 } 1618 if (count) 1619 xor_blocks(count, STRIPE_SIZE, dest, ptr); 1620 if (!nozero) set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags); 1621 else clear_bit(R5_UPTODATE, &sh->dev[dd_idx].flags); 1622 } 1623 } 1624 1625 /* Compute two missing blocks */ 1626 static void compute_block_2(struct stripe_head *sh, int dd_idx1, int dd_idx2) 1627 { 1628 int i, count, disks = sh->disks; 1629 int pd_idx = sh->pd_idx; 1630 int qd_idx = raid6_next_disk(pd_idx, disks); 1631 int d0_idx = raid6_next_disk(qd_idx, disks); 1632 int faila, failb; 1633 1634 /* faila and failb are disk numbers relative to d0_idx */ 1635 /* pd_idx become disks-2 and qd_idx become disks-1 */ 1636 faila = (dd_idx1 < d0_idx) ? dd_idx1+(disks-d0_idx) : dd_idx1-d0_idx; 1637 failb = (dd_idx2 < d0_idx) ? dd_idx2+(disks-d0_idx) : dd_idx2-d0_idx; 1638 1639 BUG_ON(faila == failb); 1640 if ( failb < faila ) { int tmp = faila; faila = failb; failb = tmp; } 1641 1642 pr_debug("compute_block_2, stripe %llu, idx %d,%d (%d,%d)\n", 1643 (unsigned long long)sh->sector, dd_idx1, dd_idx2, faila, failb); 1644 1645 if ( failb == disks-1 ) { 1646 /* Q disk is one of the missing disks */ 1647 if ( faila == disks-2 ) { 1648 /* Missing P+Q, just recompute */ 1649 compute_parity6(sh, UPDATE_PARITY); 1650 return; 1651 } else { 1652 /* We're missing D+Q; recompute D from P */ 1653 compute_block_1(sh, (dd_idx1 == qd_idx) ? dd_idx2 : dd_idx1, 0); 1654 compute_parity6(sh, UPDATE_PARITY); /* Is this necessary? */ 1655 return; 1656 } 1657 } 1658 1659 /* We're missing D+P or D+D; build pointer table */ 1660 { 1661 /**** FIX THIS: This could be very bad if disks is close to 256 ****/ 1662 void *ptrs[disks]; 1663 1664 count = 0; 1665 i = d0_idx; 1666 do { 1667 ptrs[count++] = page_address(sh->dev[i].page); 1668 i = raid6_next_disk(i, disks); 1669 if (i != dd_idx1 && i != dd_idx2 && 1670 !test_bit(R5_UPTODATE, &sh->dev[i].flags)) 1671 printk("compute_2 with missing block %d/%d\n", count, i); 1672 } while ( i != d0_idx ); 1673 1674 if ( failb == disks-2 ) { 1675 /* We're missing D+P. */ 1676 raid6_datap_recov(disks, STRIPE_SIZE, faila, ptrs); 1677 } else { 1678 /* We're missing D+D. */ 1679 raid6_2data_recov(disks, STRIPE_SIZE, faila, failb, ptrs); 1680 } 1681 1682 /* Both the above update both missing blocks */ 1683 set_bit(R5_UPTODATE, &sh->dev[dd_idx1].flags); 1684 set_bit(R5_UPTODATE, &sh->dev[dd_idx2].flags); 1685 } 1686 } 1687 1688 static int 1689 handle_write_operations5(struct stripe_head *sh, int rcw, int expand) 1690 { 1691 int i, pd_idx = sh->pd_idx, disks = sh->disks; 1692 int locked = 0; 1693 1694 if (rcw) { 1695 /* if we are not expanding this is a proper write request, and 1696 * there will be bios with new data to be drained into the 1697 * stripe cache 1698 */ 1699 if (!expand) { 1700 set_bit(STRIPE_OP_BIODRAIN, &sh->ops.pending); 1701 sh->ops.count++; 1702 } 1703 1704 set_bit(STRIPE_OP_POSTXOR, &sh->ops.pending); 1705 sh->ops.count++; 1706 1707 for (i = disks; i--; ) { 1708 struct r5dev *dev = &sh->dev[i]; 1709 1710 if (dev->towrite) { 1711 set_bit(R5_LOCKED, &dev->flags); 1712 if (!expand) 1713 clear_bit(R5_UPTODATE, &dev->flags); 1714 locked++; 1715 } 1716 } 1717 } else { 1718 BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) || 1719 test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags))); 1720 1721 set_bit(STRIPE_OP_PREXOR, &sh->ops.pending); 1722 set_bit(STRIPE_OP_BIODRAIN, &sh->ops.pending); 1723 set_bit(STRIPE_OP_POSTXOR, &sh->ops.pending); 1724 1725 sh->ops.count += 3; 1726 1727 for (i = disks; i--; ) { 1728 struct r5dev *dev = &sh->dev[i]; 1729 if (i == pd_idx) 1730 continue; 1731 1732 /* For a read-modify write there may be blocks that are 1733 * locked for reading while others are ready to be 1734 * written so we distinguish these blocks by the 1735 * R5_Wantprexor bit 1736 */ 1737 if (dev->towrite && 1738 (test_bit(R5_UPTODATE, &dev->flags) || 1739 test_bit(R5_Wantcompute, &dev->flags))) { 1740 set_bit(R5_Wantprexor, &dev->flags); 1741 set_bit(R5_LOCKED, &dev->flags); 1742 clear_bit(R5_UPTODATE, &dev->flags); 1743 locked++; 1744 } 1745 } 1746 } 1747 1748 /* keep the parity disk locked while asynchronous operations 1749 * are in flight 1750 */ 1751 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); 1752 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 1753 locked++; 1754 1755 pr_debug("%s: stripe %llu locked: %d pending: %lx\n", 1756 __FUNCTION__, (unsigned long long)sh->sector, 1757 locked, sh->ops.pending); 1758 1759 return locked; 1760 } 1761 1762 /* 1763 * Each stripe/dev can have one or more bion attached. 1764 * toread/towrite point to the first in a chain. 1765 * The bi_next chain must be in order. 1766 */ 1767 static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite) 1768 { 1769 struct bio **bip; 1770 raid5_conf_t *conf = sh->raid_conf; 1771 int firstwrite=0; 1772 1773 pr_debug("adding bh b#%llu to stripe s#%llu\n", 1774 (unsigned long long)bi->bi_sector, 1775 (unsigned long long)sh->sector); 1776 1777 1778 spin_lock(&sh->lock); 1779 spin_lock_irq(&conf->device_lock); 1780 if (forwrite) { 1781 bip = &sh->dev[dd_idx].towrite; 1782 if (*bip == NULL && sh->dev[dd_idx].written == NULL) 1783 firstwrite = 1; 1784 } else 1785 bip = &sh->dev[dd_idx].toread; 1786 while (*bip && (*bip)->bi_sector < bi->bi_sector) { 1787 if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector) 1788 goto overlap; 1789 bip = & (*bip)->bi_next; 1790 } 1791 if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9)) 1792 goto overlap; 1793 1794 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next); 1795 if (*bip) 1796 bi->bi_next = *bip; 1797 *bip = bi; 1798 bi->bi_phys_segments ++; 1799 spin_unlock_irq(&conf->device_lock); 1800 spin_unlock(&sh->lock); 1801 1802 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n", 1803 (unsigned long long)bi->bi_sector, 1804 (unsigned long long)sh->sector, dd_idx); 1805 1806 if (conf->mddev->bitmap && firstwrite) { 1807 bitmap_startwrite(conf->mddev->bitmap, sh->sector, 1808 STRIPE_SECTORS, 0); 1809 sh->bm_seq = conf->seq_flush+1; 1810 set_bit(STRIPE_BIT_DELAY, &sh->state); 1811 } 1812 1813 if (forwrite) { 1814 /* check if page is covered */ 1815 sector_t sector = sh->dev[dd_idx].sector; 1816 for (bi=sh->dev[dd_idx].towrite; 1817 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS && 1818 bi && bi->bi_sector <= sector; 1819 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) { 1820 if (bi->bi_sector + (bi->bi_size>>9) >= sector) 1821 sector = bi->bi_sector + (bi->bi_size>>9); 1822 } 1823 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS) 1824 set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags); 1825 } 1826 return 1; 1827 1828 overlap: 1829 set_bit(R5_Overlap, &sh->dev[dd_idx].flags); 1830 spin_unlock_irq(&conf->device_lock); 1831 spin_unlock(&sh->lock); 1832 return 0; 1833 } 1834 1835 static void end_reshape(raid5_conf_t *conf); 1836 1837 static int page_is_zero(struct page *p) 1838 { 1839 char *a = page_address(p); 1840 return ((*(u32*)a) == 0 && 1841 memcmp(a, a+4, STRIPE_SIZE-4)==0); 1842 } 1843 1844 static int stripe_to_pdidx(sector_t stripe, raid5_conf_t *conf, int disks) 1845 { 1846 int sectors_per_chunk = conf->chunk_size >> 9; 1847 int pd_idx, dd_idx; 1848 int chunk_offset = sector_div(stripe, sectors_per_chunk); 1849 1850 raid5_compute_sector(stripe * (disks - conf->max_degraded) 1851 *sectors_per_chunk + chunk_offset, 1852 disks, disks - conf->max_degraded, 1853 &dd_idx, &pd_idx, conf); 1854 return pd_idx; 1855 } 1856 1857 static void 1858 handle_requests_to_failed_array(raid5_conf_t *conf, struct stripe_head *sh, 1859 struct stripe_head_state *s, int disks, 1860 struct bio **return_bi) 1861 { 1862 int i; 1863 for (i = disks; i--; ) { 1864 struct bio *bi; 1865 int bitmap_end = 0; 1866 1867 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 1868 mdk_rdev_t *rdev; 1869 rcu_read_lock(); 1870 rdev = rcu_dereference(conf->disks[i].rdev); 1871 if (rdev && test_bit(In_sync, &rdev->flags)) 1872 /* multiple read failures in one stripe */ 1873 md_error(conf->mddev, rdev); 1874 rcu_read_unlock(); 1875 } 1876 spin_lock_irq(&conf->device_lock); 1877 /* fail all writes first */ 1878 bi = sh->dev[i].towrite; 1879 sh->dev[i].towrite = NULL; 1880 if (bi) { 1881 s->to_write--; 1882 bitmap_end = 1; 1883 } 1884 1885 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 1886 wake_up(&conf->wait_for_overlap); 1887 1888 while (bi && bi->bi_sector < 1889 sh->dev[i].sector + STRIPE_SECTORS) { 1890 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); 1891 clear_bit(BIO_UPTODATE, &bi->bi_flags); 1892 if (--bi->bi_phys_segments == 0) { 1893 md_write_end(conf->mddev); 1894 bi->bi_next = *return_bi; 1895 *return_bi = bi; 1896 } 1897 bi = nextbi; 1898 } 1899 /* and fail all 'written' */ 1900 bi = sh->dev[i].written; 1901 sh->dev[i].written = NULL; 1902 if (bi) bitmap_end = 1; 1903 while (bi && bi->bi_sector < 1904 sh->dev[i].sector + STRIPE_SECTORS) { 1905 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); 1906 clear_bit(BIO_UPTODATE, &bi->bi_flags); 1907 if (--bi->bi_phys_segments == 0) { 1908 md_write_end(conf->mddev); 1909 bi->bi_next = *return_bi; 1910 *return_bi = bi; 1911 } 1912 bi = bi2; 1913 } 1914 1915 /* fail any reads if this device is non-operational and 1916 * the data has not reached the cache yet. 1917 */ 1918 if (!test_bit(R5_Wantfill, &sh->dev[i].flags) && 1919 (!test_bit(R5_Insync, &sh->dev[i].flags) || 1920 test_bit(R5_ReadError, &sh->dev[i].flags))) { 1921 bi = sh->dev[i].toread; 1922 sh->dev[i].toread = NULL; 1923 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 1924 wake_up(&conf->wait_for_overlap); 1925 if (bi) s->to_read--; 1926 while (bi && bi->bi_sector < 1927 sh->dev[i].sector + STRIPE_SECTORS) { 1928 struct bio *nextbi = 1929 r5_next_bio(bi, sh->dev[i].sector); 1930 clear_bit(BIO_UPTODATE, &bi->bi_flags); 1931 if (--bi->bi_phys_segments == 0) { 1932 bi->bi_next = *return_bi; 1933 *return_bi = bi; 1934 } 1935 bi = nextbi; 1936 } 1937 } 1938 spin_unlock_irq(&conf->device_lock); 1939 if (bitmap_end) 1940 bitmap_endwrite(conf->mddev->bitmap, sh->sector, 1941 STRIPE_SECTORS, 0, 0); 1942 } 1943 1944 } 1945 1946 /* __handle_issuing_new_read_requests5 - returns 0 if there are no more disks 1947 * to process 1948 */ 1949 static int __handle_issuing_new_read_requests5(struct stripe_head *sh, 1950 struct stripe_head_state *s, int disk_idx, int disks) 1951 { 1952 struct r5dev *dev = &sh->dev[disk_idx]; 1953 struct r5dev *failed_dev = &sh->dev[s->failed_num]; 1954 1955 /* don't schedule compute operations or reads on the parity block while 1956 * a check is in flight 1957 */ 1958 if ((disk_idx == sh->pd_idx) && 1959 test_bit(STRIPE_OP_CHECK, &sh->ops.pending)) 1960 return ~0; 1961 1962 /* is the data in this block needed, and can we get it? */ 1963 if (!test_bit(R5_LOCKED, &dev->flags) && 1964 !test_bit(R5_UPTODATE, &dev->flags) && (dev->toread || 1965 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) || 1966 s->syncing || s->expanding || (s->failed && 1967 (failed_dev->toread || (failed_dev->towrite && 1968 !test_bit(R5_OVERWRITE, &failed_dev->flags) 1969 ))))) { 1970 /* 1/ We would like to get this block, possibly by computing it, 1971 * but we might not be able to. 1972 * 1973 * 2/ Since parity check operations potentially make the parity 1974 * block !uptodate it will need to be refreshed before any 1975 * compute operations on data disks are scheduled. 1976 * 1977 * 3/ We hold off parity block re-reads until check operations 1978 * have quiesced. 1979 */ 1980 if ((s->uptodate == disks - 1) && 1981 !test_bit(STRIPE_OP_CHECK, &sh->ops.pending)) { 1982 set_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending); 1983 set_bit(R5_Wantcompute, &dev->flags); 1984 sh->ops.target = disk_idx; 1985 s->req_compute = 1; 1986 sh->ops.count++; 1987 /* Careful: from this point on 'uptodate' is in the eye 1988 * of raid5_run_ops which services 'compute' operations 1989 * before writes. R5_Wantcompute flags a block that will 1990 * be R5_UPTODATE by the time it is needed for a 1991 * subsequent operation. 1992 */ 1993 s->uptodate++; 1994 return 0; /* uptodate + compute == disks */ 1995 } else if ((s->uptodate < disks - 1) && 1996 test_bit(R5_Insync, &dev->flags)) { 1997 /* Note: we hold off compute operations while checks are 1998 * in flight, but we still prefer 'compute' over 'read' 1999 * hence we only read if (uptodate < * disks-1) 2000 */ 2001 set_bit(R5_LOCKED, &dev->flags); 2002 set_bit(R5_Wantread, &dev->flags); 2003 if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending)) 2004 sh->ops.count++; 2005 s->locked++; 2006 pr_debug("Reading block %d (sync=%d)\n", disk_idx, 2007 s->syncing); 2008 } 2009 } 2010 2011 return ~0; 2012 } 2013 2014 static void handle_issuing_new_read_requests5(struct stripe_head *sh, 2015 struct stripe_head_state *s, int disks) 2016 { 2017 int i; 2018 2019 /* Clear completed compute operations. Parity recovery 2020 * (STRIPE_OP_MOD_REPAIR_PD) implies a write-back which is handled 2021 * later on in this routine 2022 */ 2023 if (test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete) && 2024 !test_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending)) { 2025 clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete); 2026 clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.ack); 2027 clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending); 2028 } 2029 2030 /* look for blocks to read/compute, skip this if a compute 2031 * is already in flight, or if the stripe contents are in the 2032 * midst of changing due to a write 2033 */ 2034 if (!test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending) && 2035 !test_bit(STRIPE_OP_PREXOR, &sh->ops.pending) && 2036 !test_bit(STRIPE_OP_POSTXOR, &sh->ops.pending)) { 2037 for (i = disks; i--; ) 2038 if (__handle_issuing_new_read_requests5( 2039 sh, s, i, disks) == 0) 2040 break; 2041 } 2042 set_bit(STRIPE_HANDLE, &sh->state); 2043 } 2044 2045 static void handle_issuing_new_read_requests6(struct stripe_head *sh, 2046 struct stripe_head_state *s, struct r6_state *r6s, 2047 int disks) 2048 { 2049 int i; 2050 for (i = disks; i--; ) { 2051 struct r5dev *dev = &sh->dev[i]; 2052 if (!test_bit(R5_LOCKED, &dev->flags) && 2053 !test_bit(R5_UPTODATE, &dev->flags) && 2054 (dev->toread || (dev->towrite && 2055 !test_bit(R5_OVERWRITE, &dev->flags)) || 2056 s->syncing || s->expanding || 2057 (s->failed >= 1 && 2058 (sh->dev[r6s->failed_num[0]].toread || 2059 s->to_write)) || 2060 (s->failed >= 2 && 2061 (sh->dev[r6s->failed_num[1]].toread || 2062 s->to_write)))) { 2063 /* we would like to get this block, possibly 2064 * by computing it, but we might not be able to 2065 */ 2066 if (s->uptodate == disks-1) { 2067 pr_debug("Computing stripe %llu block %d\n", 2068 (unsigned long long)sh->sector, i); 2069 compute_block_1(sh, i, 0); 2070 s->uptodate++; 2071 } else if ( s->uptodate == disks-2 && s->failed >= 2 ) { 2072 /* Computing 2-failure is *very* expensive; only 2073 * do it if failed >= 2 2074 */ 2075 int other; 2076 for (other = disks; other--; ) { 2077 if (other == i) 2078 continue; 2079 if (!test_bit(R5_UPTODATE, 2080 &sh->dev[other].flags)) 2081 break; 2082 } 2083 BUG_ON(other < 0); 2084 pr_debug("Computing stripe %llu blocks %d,%d\n", 2085 (unsigned long long)sh->sector, 2086 i, other); 2087 compute_block_2(sh, i, other); 2088 s->uptodate += 2; 2089 } else if (test_bit(R5_Insync, &dev->flags)) { 2090 set_bit(R5_LOCKED, &dev->flags); 2091 set_bit(R5_Wantread, &dev->flags); 2092 s->locked++; 2093 pr_debug("Reading block %d (sync=%d)\n", 2094 i, s->syncing); 2095 } 2096 } 2097 } 2098 set_bit(STRIPE_HANDLE, &sh->state); 2099 } 2100 2101 2102 /* handle_completed_write_requests 2103 * any written block on an uptodate or failed drive can be returned. 2104 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but 2105 * never LOCKED, so we don't need to test 'failed' directly. 2106 */ 2107 static void handle_completed_write_requests(raid5_conf_t *conf, 2108 struct stripe_head *sh, int disks, struct bio **return_bi) 2109 { 2110 int i; 2111 struct r5dev *dev; 2112 2113 for (i = disks; i--; ) 2114 if (sh->dev[i].written) { 2115 dev = &sh->dev[i]; 2116 if (!test_bit(R5_LOCKED, &dev->flags) && 2117 test_bit(R5_UPTODATE, &dev->flags)) { 2118 /* We can return any write requests */ 2119 struct bio *wbi, *wbi2; 2120 int bitmap_end = 0; 2121 pr_debug("Return write for disc %d\n", i); 2122 spin_lock_irq(&conf->device_lock); 2123 wbi = dev->written; 2124 dev->written = NULL; 2125 while (wbi && wbi->bi_sector < 2126 dev->sector + STRIPE_SECTORS) { 2127 wbi2 = r5_next_bio(wbi, dev->sector); 2128 if (--wbi->bi_phys_segments == 0) { 2129 md_write_end(conf->mddev); 2130 wbi->bi_next = *return_bi; 2131 *return_bi = wbi; 2132 } 2133 wbi = wbi2; 2134 } 2135 if (dev->towrite == NULL) 2136 bitmap_end = 1; 2137 spin_unlock_irq(&conf->device_lock); 2138 if (bitmap_end) 2139 bitmap_endwrite(conf->mddev->bitmap, 2140 sh->sector, 2141 STRIPE_SECTORS, 2142 !test_bit(STRIPE_DEGRADED, &sh->state), 2143 0); 2144 } 2145 } 2146 } 2147 2148 static void handle_issuing_new_write_requests5(raid5_conf_t *conf, 2149 struct stripe_head *sh, struct stripe_head_state *s, int disks) 2150 { 2151 int rmw = 0, rcw = 0, i; 2152 for (i = disks; i--; ) { 2153 /* would I have to read this buffer for read_modify_write */ 2154 struct r5dev *dev = &sh->dev[i]; 2155 if ((dev->towrite || i == sh->pd_idx) && 2156 !test_bit(R5_LOCKED, &dev->flags) && 2157 !(test_bit(R5_UPTODATE, &dev->flags) || 2158 test_bit(R5_Wantcompute, &dev->flags))) { 2159 if (test_bit(R5_Insync, &dev->flags)) 2160 rmw++; 2161 else 2162 rmw += 2*disks; /* cannot read it */ 2163 } 2164 /* Would I have to read this buffer for reconstruct_write */ 2165 if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx && 2166 !test_bit(R5_LOCKED, &dev->flags) && 2167 !(test_bit(R5_UPTODATE, &dev->flags) || 2168 test_bit(R5_Wantcompute, &dev->flags))) { 2169 if (test_bit(R5_Insync, &dev->flags)) rcw++; 2170 else 2171 rcw += 2*disks; 2172 } 2173 } 2174 pr_debug("for sector %llu, rmw=%d rcw=%d\n", 2175 (unsigned long long)sh->sector, rmw, rcw); 2176 set_bit(STRIPE_HANDLE, &sh->state); 2177 if (rmw < rcw && rmw > 0) 2178 /* prefer read-modify-write, but need to get some data */ 2179 for (i = disks; i--; ) { 2180 struct r5dev *dev = &sh->dev[i]; 2181 if ((dev->towrite || i == sh->pd_idx) && 2182 !test_bit(R5_LOCKED, &dev->flags) && 2183 !(test_bit(R5_UPTODATE, &dev->flags) || 2184 test_bit(R5_Wantcompute, &dev->flags)) && 2185 test_bit(R5_Insync, &dev->flags)) { 2186 if ( 2187 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 2188 pr_debug("Read_old block " 2189 "%d for r-m-w\n", i); 2190 set_bit(R5_LOCKED, &dev->flags); 2191 set_bit(R5_Wantread, &dev->flags); 2192 if (!test_and_set_bit( 2193 STRIPE_OP_IO, &sh->ops.pending)) 2194 sh->ops.count++; 2195 s->locked++; 2196 } else { 2197 set_bit(STRIPE_DELAYED, &sh->state); 2198 set_bit(STRIPE_HANDLE, &sh->state); 2199 } 2200 } 2201 } 2202 if (rcw <= rmw && rcw > 0) 2203 /* want reconstruct write, but need to get some data */ 2204 for (i = disks; i--; ) { 2205 struct r5dev *dev = &sh->dev[i]; 2206 if (!test_bit(R5_OVERWRITE, &dev->flags) && 2207 i != sh->pd_idx && 2208 !test_bit(R5_LOCKED, &dev->flags) && 2209 !(test_bit(R5_UPTODATE, &dev->flags) || 2210 test_bit(R5_Wantcompute, &dev->flags)) && 2211 test_bit(R5_Insync, &dev->flags)) { 2212 if ( 2213 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 2214 pr_debug("Read_old block " 2215 "%d for Reconstruct\n", i); 2216 set_bit(R5_LOCKED, &dev->flags); 2217 set_bit(R5_Wantread, &dev->flags); 2218 if (!test_and_set_bit( 2219 STRIPE_OP_IO, &sh->ops.pending)) 2220 sh->ops.count++; 2221 s->locked++; 2222 } else { 2223 set_bit(STRIPE_DELAYED, &sh->state); 2224 set_bit(STRIPE_HANDLE, &sh->state); 2225 } 2226 } 2227 } 2228 /* now if nothing is locked, and if we have enough data, 2229 * we can start a write request 2230 */ 2231 /* since handle_stripe can be called at any time we need to handle the 2232 * case where a compute block operation has been submitted and then a 2233 * subsequent call wants to start a write request. raid5_run_ops only 2234 * handles the case where compute block and postxor are requested 2235 * simultaneously. If this is not the case then new writes need to be 2236 * held off until the compute completes. 2237 */ 2238 if ((s->req_compute || 2239 !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending)) && 2240 (s->locked == 0 && (rcw == 0 || rmw == 0) && 2241 !test_bit(STRIPE_BIT_DELAY, &sh->state))) 2242 s->locked += handle_write_operations5(sh, rcw == 0, 0); 2243 } 2244 2245 static void handle_issuing_new_write_requests6(raid5_conf_t *conf, 2246 struct stripe_head *sh, struct stripe_head_state *s, 2247 struct r6_state *r6s, int disks) 2248 { 2249 int rcw = 0, must_compute = 0, pd_idx = sh->pd_idx, i; 2250 int qd_idx = r6s->qd_idx; 2251 for (i = disks; i--; ) { 2252 struct r5dev *dev = &sh->dev[i]; 2253 /* Would I have to read this buffer for reconstruct_write */ 2254 if (!test_bit(R5_OVERWRITE, &dev->flags) 2255 && i != pd_idx && i != qd_idx 2256 && (!test_bit(R5_LOCKED, &dev->flags) 2257 ) && 2258 !test_bit(R5_UPTODATE, &dev->flags)) { 2259 if (test_bit(R5_Insync, &dev->flags)) rcw++; 2260 else { 2261 pr_debug("raid6: must_compute: " 2262 "disk %d flags=%#lx\n", i, dev->flags); 2263 must_compute++; 2264 } 2265 } 2266 } 2267 pr_debug("for sector %llu, rcw=%d, must_compute=%d\n", 2268 (unsigned long long)sh->sector, rcw, must_compute); 2269 set_bit(STRIPE_HANDLE, &sh->state); 2270 2271 if (rcw > 0) 2272 /* want reconstruct write, but need to get some data */ 2273 for (i = disks; i--; ) { 2274 struct r5dev *dev = &sh->dev[i]; 2275 if (!test_bit(R5_OVERWRITE, &dev->flags) 2276 && !(s->failed == 0 && (i == pd_idx || i == qd_idx)) 2277 && !test_bit(R5_LOCKED, &dev->flags) && 2278 !test_bit(R5_UPTODATE, &dev->flags) && 2279 test_bit(R5_Insync, &dev->flags)) { 2280 if ( 2281 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 2282 pr_debug("Read_old stripe %llu " 2283 "block %d for Reconstruct\n", 2284 (unsigned long long)sh->sector, i); 2285 set_bit(R5_LOCKED, &dev->flags); 2286 set_bit(R5_Wantread, &dev->flags); 2287 s->locked++; 2288 } else { 2289 pr_debug("Request delayed stripe %llu " 2290 "block %d for Reconstruct\n", 2291 (unsigned long long)sh->sector, i); 2292 set_bit(STRIPE_DELAYED, &sh->state); 2293 set_bit(STRIPE_HANDLE, &sh->state); 2294 } 2295 } 2296 } 2297 /* now if nothing is locked, and if we have enough data, we can start a 2298 * write request 2299 */ 2300 if (s->locked == 0 && rcw == 0 && 2301 !test_bit(STRIPE_BIT_DELAY, &sh->state)) { 2302 if (must_compute > 0) { 2303 /* We have failed blocks and need to compute them */ 2304 switch (s->failed) { 2305 case 0: 2306 BUG(); 2307 case 1: 2308 compute_block_1(sh, r6s->failed_num[0], 0); 2309 break; 2310 case 2: 2311 compute_block_2(sh, r6s->failed_num[0], 2312 r6s->failed_num[1]); 2313 break; 2314 default: /* This request should have been failed? */ 2315 BUG(); 2316 } 2317 } 2318 2319 pr_debug("Computing parity for stripe %llu\n", 2320 (unsigned long long)sh->sector); 2321 compute_parity6(sh, RECONSTRUCT_WRITE); 2322 /* now every locked buffer is ready to be written */ 2323 for (i = disks; i--; ) 2324 if (test_bit(R5_LOCKED, &sh->dev[i].flags)) { 2325 pr_debug("Writing stripe %llu block %d\n", 2326 (unsigned long long)sh->sector, i); 2327 s->locked++; 2328 set_bit(R5_Wantwrite, &sh->dev[i].flags); 2329 } 2330 /* after a RECONSTRUCT_WRITE, the stripe MUST be in-sync */ 2331 set_bit(STRIPE_INSYNC, &sh->state); 2332 2333 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 2334 atomic_dec(&conf->preread_active_stripes); 2335 if (atomic_read(&conf->preread_active_stripes) < 2336 IO_THRESHOLD) 2337 md_wakeup_thread(conf->mddev->thread); 2338 } 2339 } 2340 } 2341 2342 static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh, 2343 struct stripe_head_state *s, int disks) 2344 { 2345 set_bit(STRIPE_HANDLE, &sh->state); 2346 /* Take one of the following actions: 2347 * 1/ start a check parity operation if (uptodate == disks) 2348 * 2/ finish a check parity operation and act on the result 2349 * 3/ skip to the writeback section if we previously 2350 * initiated a recovery operation 2351 */ 2352 if (s->failed == 0 && 2353 !test_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending)) { 2354 if (!test_and_set_bit(STRIPE_OP_CHECK, &sh->ops.pending)) { 2355 BUG_ON(s->uptodate != disks); 2356 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); 2357 sh->ops.count++; 2358 s->uptodate--; 2359 } else if ( 2360 test_and_clear_bit(STRIPE_OP_CHECK, &sh->ops.complete)) { 2361 clear_bit(STRIPE_OP_CHECK, &sh->ops.ack); 2362 clear_bit(STRIPE_OP_CHECK, &sh->ops.pending); 2363 2364 if (sh->ops.zero_sum_result == 0) 2365 /* parity is correct (on disc, 2366 * not in buffer any more) 2367 */ 2368 set_bit(STRIPE_INSYNC, &sh->state); 2369 else { 2370 conf->mddev->resync_mismatches += 2371 STRIPE_SECTORS; 2372 if (test_bit( 2373 MD_RECOVERY_CHECK, &conf->mddev->recovery)) 2374 /* don't try to repair!! */ 2375 set_bit(STRIPE_INSYNC, &sh->state); 2376 else { 2377 set_bit(STRIPE_OP_COMPUTE_BLK, 2378 &sh->ops.pending); 2379 set_bit(STRIPE_OP_MOD_REPAIR_PD, 2380 &sh->ops.pending); 2381 set_bit(R5_Wantcompute, 2382 &sh->dev[sh->pd_idx].flags); 2383 sh->ops.target = sh->pd_idx; 2384 sh->ops.count++; 2385 s->uptodate++; 2386 } 2387 } 2388 } 2389 } 2390 2391 /* check if we can clear a parity disk reconstruct */ 2392 if (test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete) && 2393 test_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending)) { 2394 2395 clear_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending); 2396 clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete); 2397 clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.ack); 2398 clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending); 2399 } 2400 2401 /* Wait for check parity and compute block operations to complete 2402 * before write-back 2403 */ 2404 if (!test_bit(STRIPE_INSYNC, &sh->state) && 2405 !test_bit(STRIPE_OP_CHECK, &sh->ops.pending) && 2406 !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending)) { 2407 struct r5dev *dev; 2408 /* either failed parity check, or recovery is happening */ 2409 if (s->failed == 0) 2410 s->failed_num = sh->pd_idx; 2411 dev = &sh->dev[s->failed_num]; 2412 BUG_ON(!test_bit(R5_UPTODATE, &dev->flags)); 2413 BUG_ON(s->uptodate != disks); 2414 2415 set_bit(R5_LOCKED, &dev->flags); 2416 set_bit(R5_Wantwrite, &dev->flags); 2417 if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending)) 2418 sh->ops.count++; 2419 2420 clear_bit(STRIPE_DEGRADED, &sh->state); 2421 s->locked++; 2422 set_bit(STRIPE_INSYNC, &sh->state); 2423 } 2424 } 2425 2426 2427 static void handle_parity_checks6(raid5_conf_t *conf, struct stripe_head *sh, 2428 struct stripe_head_state *s, 2429 struct r6_state *r6s, struct page *tmp_page, 2430 int disks) 2431 { 2432 int update_p = 0, update_q = 0; 2433 struct r5dev *dev; 2434 int pd_idx = sh->pd_idx; 2435 int qd_idx = r6s->qd_idx; 2436 2437 set_bit(STRIPE_HANDLE, &sh->state); 2438 2439 BUG_ON(s->failed > 2); 2440 BUG_ON(s->uptodate < disks); 2441 /* Want to check and possibly repair P and Q. 2442 * However there could be one 'failed' device, in which 2443 * case we can only check one of them, possibly using the 2444 * other to generate missing data 2445 */ 2446 2447 /* If !tmp_page, we cannot do the calculations, 2448 * but as we have set STRIPE_HANDLE, we will soon be called 2449 * by stripe_handle with a tmp_page - just wait until then. 2450 */ 2451 if (tmp_page) { 2452 if (s->failed == r6s->q_failed) { 2453 /* The only possible failed device holds 'Q', so it 2454 * makes sense to check P (If anything else were failed, 2455 * we would have used P to recreate it). 2456 */ 2457 compute_block_1(sh, pd_idx, 1); 2458 if (!page_is_zero(sh->dev[pd_idx].page)) { 2459 compute_block_1(sh, pd_idx, 0); 2460 update_p = 1; 2461 } 2462 } 2463 if (!r6s->q_failed && s->failed < 2) { 2464 /* q is not failed, and we didn't use it to generate 2465 * anything, so it makes sense to check it 2466 */ 2467 memcpy(page_address(tmp_page), 2468 page_address(sh->dev[qd_idx].page), 2469 STRIPE_SIZE); 2470 compute_parity6(sh, UPDATE_PARITY); 2471 if (memcmp(page_address(tmp_page), 2472 page_address(sh->dev[qd_idx].page), 2473 STRIPE_SIZE) != 0) { 2474 clear_bit(STRIPE_INSYNC, &sh->state); 2475 update_q = 1; 2476 } 2477 } 2478 if (update_p || update_q) { 2479 conf->mddev->resync_mismatches += STRIPE_SECTORS; 2480 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) 2481 /* don't try to repair!! */ 2482 update_p = update_q = 0; 2483 } 2484 2485 /* now write out any block on a failed drive, 2486 * or P or Q if they need it 2487 */ 2488 2489 if (s->failed == 2) { 2490 dev = &sh->dev[r6s->failed_num[1]]; 2491 s->locked++; 2492 set_bit(R5_LOCKED, &dev->flags); 2493 set_bit(R5_Wantwrite, &dev->flags); 2494 } 2495 if (s->failed >= 1) { 2496 dev = &sh->dev[r6s->failed_num[0]]; 2497 s->locked++; 2498 set_bit(R5_LOCKED, &dev->flags); 2499 set_bit(R5_Wantwrite, &dev->flags); 2500 } 2501 2502 if (update_p) { 2503 dev = &sh->dev[pd_idx]; 2504 s->locked++; 2505 set_bit(R5_LOCKED, &dev->flags); 2506 set_bit(R5_Wantwrite, &dev->flags); 2507 } 2508 if (update_q) { 2509 dev = &sh->dev[qd_idx]; 2510 s->locked++; 2511 set_bit(R5_LOCKED, &dev->flags); 2512 set_bit(R5_Wantwrite, &dev->flags); 2513 } 2514 clear_bit(STRIPE_DEGRADED, &sh->state); 2515 2516 set_bit(STRIPE_INSYNC, &sh->state); 2517 } 2518 } 2519 2520 static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh, 2521 struct r6_state *r6s) 2522 { 2523 int i; 2524 2525 /* We have read all the blocks in this stripe and now we need to 2526 * copy some of them into a target stripe for expand. 2527 */ 2528 struct dma_async_tx_descriptor *tx = NULL; 2529 clear_bit(STRIPE_EXPAND_SOURCE, &sh->state); 2530 for (i = 0; i < sh->disks; i++) 2531 if (i != sh->pd_idx && (!r6s || i != r6s->qd_idx)) { 2532 int dd_idx, pd_idx, j; 2533 struct stripe_head *sh2; 2534 2535 sector_t bn = compute_blocknr(sh, i); 2536 sector_t s = raid5_compute_sector(bn, conf->raid_disks, 2537 conf->raid_disks - 2538 conf->max_degraded, &dd_idx, 2539 &pd_idx, conf); 2540 sh2 = get_active_stripe(conf, s, conf->raid_disks, 2541 pd_idx, 1); 2542 if (sh2 == NULL) 2543 /* so far only the early blocks of this stripe 2544 * have been requested. When later blocks 2545 * get requested, we will try again 2546 */ 2547 continue; 2548 if (!test_bit(STRIPE_EXPANDING, &sh2->state) || 2549 test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) { 2550 /* must have already done this block */ 2551 release_stripe(sh2); 2552 continue; 2553 } 2554 2555 /* place all the copies on one channel */ 2556 tx = async_memcpy(sh2->dev[dd_idx].page, 2557 sh->dev[i].page, 0, 0, STRIPE_SIZE, 2558 ASYNC_TX_DEP_ACK, tx, NULL, NULL); 2559 2560 set_bit(R5_Expanded, &sh2->dev[dd_idx].flags); 2561 set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags); 2562 for (j = 0; j < conf->raid_disks; j++) 2563 if (j != sh2->pd_idx && 2564 (!r6s || j != raid6_next_disk(sh2->pd_idx, 2565 sh2->disks)) && 2566 !test_bit(R5_Expanded, &sh2->dev[j].flags)) 2567 break; 2568 if (j == conf->raid_disks) { 2569 set_bit(STRIPE_EXPAND_READY, &sh2->state); 2570 set_bit(STRIPE_HANDLE, &sh2->state); 2571 } 2572 release_stripe(sh2); 2573 2574 } 2575 /* done submitting copies, wait for them to complete */ 2576 if (tx) { 2577 async_tx_ack(tx); 2578 dma_wait_for_async_tx(tx); 2579 } 2580 } 2581 2582 /* 2583 * handle_stripe - do things to a stripe. 2584 * 2585 * We lock the stripe and then examine the state of various bits 2586 * to see what needs to be done. 2587 * Possible results: 2588 * return some read request which now have data 2589 * return some write requests which are safely on disc 2590 * schedule a read on some buffers 2591 * schedule a write of some buffers 2592 * return confirmation of parity correctness 2593 * 2594 * buffers are taken off read_list or write_list, and bh_cache buffers 2595 * get BH_Lock set before the stripe lock is released. 2596 * 2597 */ 2598 2599 static void handle_stripe5(struct stripe_head *sh) 2600 { 2601 raid5_conf_t *conf = sh->raid_conf; 2602 int disks = sh->disks, i; 2603 struct bio *return_bi = NULL; 2604 struct stripe_head_state s; 2605 struct r5dev *dev; 2606 unsigned long pending = 0; 2607 2608 memset(&s, 0, sizeof(s)); 2609 pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d " 2610 "ops=%lx:%lx:%lx\n", (unsigned long long)sh->sector, sh->state, 2611 atomic_read(&sh->count), sh->pd_idx, 2612 sh->ops.pending, sh->ops.ack, sh->ops.complete); 2613 2614 spin_lock(&sh->lock); 2615 clear_bit(STRIPE_HANDLE, &sh->state); 2616 clear_bit(STRIPE_DELAYED, &sh->state); 2617 2618 s.syncing = test_bit(STRIPE_SYNCING, &sh->state); 2619 s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state); 2620 s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state); 2621 /* Now to look around and see what can be done */ 2622 2623 rcu_read_lock(); 2624 for (i=disks; i--; ) { 2625 mdk_rdev_t *rdev; 2626 struct r5dev *dev = &sh->dev[i]; 2627 clear_bit(R5_Insync, &dev->flags); 2628 2629 pr_debug("check %d: state 0x%lx toread %p read %p write %p " 2630 "written %p\n", i, dev->flags, dev->toread, dev->read, 2631 dev->towrite, dev->written); 2632 2633 /* maybe we can request a biofill operation 2634 * 2635 * new wantfill requests are only permitted while 2636 * STRIPE_OP_BIOFILL is clear 2637 */ 2638 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread && 2639 !test_bit(STRIPE_OP_BIOFILL, &sh->ops.pending)) 2640 set_bit(R5_Wantfill, &dev->flags); 2641 2642 /* now count some things */ 2643 if (test_bit(R5_LOCKED, &dev->flags)) s.locked++; 2644 if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++; 2645 if (test_bit(R5_Wantcompute, &dev->flags)) s.compute++; 2646 2647 if (test_bit(R5_Wantfill, &dev->flags)) 2648 s.to_fill++; 2649 else if (dev->toread) 2650 s.to_read++; 2651 if (dev->towrite) { 2652 s.to_write++; 2653 if (!test_bit(R5_OVERWRITE, &dev->flags)) 2654 s.non_overwrite++; 2655 } 2656 if (dev->written) 2657 s.written++; 2658 rdev = rcu_dereference(conf->disks[i].rdev); 2659 if (!rdev || !test_bit(In_sync, &rdev->flags)) { 2660 /* The ReadError flag will just be confusing now */ 2661 clear_bit(R5_ReadError, &dev->flags); 2662 clear_bit(R5_ReWrite, &dev->flags); 2663 } 2664 if (!rdev || !test_bit(In_sync, &rdev->flags) 2665 || test_bit(R5_ReadError, &dev->flags)) { 2666 s.failed++; 2667 s.failed_num = i; 2668 } else 2669 set_bit(R5_Insync, &dev->flags); 2670 } 2671 rcu_read_unlock(); 2672 2673 if (s.to_fill && !test_and_set_bit(STRIPE_OP_BIOFILL, &sh->ops.pending)) 2674 sh->ops.count++; 2675 2676 pr_debug("locked=%d uptodate=%d to_read=%d" 2677 " to_write=%d failed=%d failed_num=%d\n", 2678 s.locked, s.uptodate, s.to_read, s.to_write, 2679 s.failed, s.failed_num); 2680 /* check if the array has lost two devices and, if so, some requests might 2681 * need to be failed 2682 */ 2683 if (s.failed > 1 && s.to_read+s.to_write+s.written) 2684 handle_requests_to_failed_array(conf, sh, &s, disks, 2685 &return_bi); 2686 if (s.failed > 1 && s.syncing) { 2687 md_done_sync(conf->mddev, STRIPE_SECTORS,0); 2688 clear_bit(STRIPE_SYNCING, &sh->state); 2689 s.syncing = 0; 2690 } 2691 2692 /* might be able to return some write requests if the parity block 2693 * is safe, or on a failed drive 2694 */ 2695 dev = &sh->dev[sh->pd_idx]; 2696 if ( s.written && 2697 ((test_bit(R5_Insync, &dev->flags) && 2698 !test_bit(R5_LOCKED, &dev->flags) && 2699 test_bit(R5_UPTODATE, &dev->flags)) || 2700 (s.failed == 1 && s.failed_num == sh->pd_idx))) 2701 handle_completed_write_requests(conf, sh, disks, &return_bi); 2702 2703 /* Now we might consider reading some blocks, either to check/generate 2704 * parity, or to satisfy requests 2705 * or to load a block that is being partially written. 2706 */ 2707 if (s.to_read || s.non_overwrite || 2708 (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding || 2709 test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending)) 2710 handle_issuing_new_read_requests5(sh, &s, disks); 2711 2712 /* Now we check to see if any write operations have recently 2713 * completed 2714 */ 2715 2716 /* leave prexor set until postxor is done, allows us to distinguish 2717 * a rmw from a rcw during biodrain 2718 */ 2719 if (test_bit(STRIPE_OP_PREXOR, &sh->ops.complete) && 2720 test_bit(STRIPE_OP_POSTXOR, &sh->ops.complete)) { 2721 2722 clear_bit(STRIPE_OP_PREXOR, &sh->ops.complete); 2723 clear_bit(STRIPE_OP_PREXOR, &sh->ops.ack); 2724 clear_bit(STRIPE_OP_PREXOR, &sh->ops.pending); 2725 2726 for (i = disks; i--; ) 2727 clear_bit(R5_Wantprexor, &sh->dev[i].flags); 2728 } 2729 2730 /* if only POSTXOR is set then this is an 'expand' postxor */ 2731 if (test_bit(STRIPE_OP_BIODRAIN, &sh->ops.complete) && 2732 test_bit(STRIPE_OP_POSTXOR, &sh->ops.complete)) { 2733 2734 clear_bit(STRIPE_OP_BIODRAIN, &sh->ops.complete); 2735 clear_bit(STRIPE_OP_BIODRAIN, &sh->ops.ack); 2736 clear_bit(STRIPE_OP_BIODRAIN, &sh->ops.pending); 2737 2738 clear_bit(STRIPE_OP_POSTXOR, &sh->ops.complete); 2739 clear_bit(STRIPE_OP_POSTXOR, &sh->ops.ack); 2740 clear_bit(STRIPE_OP_POSTXOR, &sh->ops.pending); 2741 2742 /* All the 'written' buffers and the parity block are ready to 2743 * be written back to disk 2744 */ 2745 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags)); 2746 for (i = disks; i--; ) { 2747 dev = &sh->dev[i]; 2748 if (test_bit(R5_LOCKED, &dev->flags) && 2749 (i == sh->pd_idx || dev->written)) { 2750 pr_debug("Writing block %d\n", i); 2751 set_bit(R5_Wantwrite, &dev->flags); 2752 if (!test_and_set_bit( 2753 STRIPE_OP_IO, &sh->ops.pending)) 2754 sh->ops.count++; 2755 if (!test_bit(R5_Insync, &dev->flags) || 2756 (i == sh->pd_idx && s.failed == 0)) 2757 set_bit(STRIPE_INSYNC, &sh->state); 2758 } 2759 } 2760 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 2761 atomic_dec(&conf->preread_active_stripes); 2762 if (atomic_read(&conf->preread_active_stripes) < 2763 IO_THRESHOLD) 2764 md_wakeup_thread(conf->mddev->thread); 2765 } 2766 } 2767 2768 /* Now to consider new write requests and what else, if anything 2769 * should be read. We do not handle new writes when: 2770 * 1/ A 'write' operation (copy+xor) is already in flight. 2771 * 2/ A 'check' operation is in flight, as it may clobber the parity 2772 * block. 2773 */ 2774 if (s.to_write && !test_bit(STRIPE_OP_POSTXOR, &sh->ops.pending) && 2775 !test_bit(STRIPE_OP_CHECK, &sh->ops.pending)) 2776 handle_issuing_new_write_requests5(conf, sh, &s, disks); 2777 2778 /* maybe we need to check and possibly fix the parity for this stripe 2779 * Any reads will already have been scheduled, so we just see if enough 2780 * data is available. The parity check is held off while parity 2781 * dependent operations are in flight. 2782 */ 2783 if ((s.syncing && s.locked == 0 && 2784 !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending) && 2785 !test_bit(STRIPE_INSYNC, &sh->state)) || 2786 test_bit(STRIPE_OP_CHECK, &sh->ops.pending) || 2787 test_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending)) 2788 handle_parity_checks5(conf, sh, &s, disks); 2789 2790 if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) { 2791 md_done_sync(conf->mddev, STRIPE_SECTORS,1); 2792 clear_bit(STRIPE_SYNCING, &sh->state); 2793 } 2794 2795 /* If the failed drive is just a ReadError, then we might need to progress 2796 * the repair/check process 2797 */ 2798 if (s.failed == 1 && !conf->mddev->ro && 2799 test_bit(R5_ReadError, &sh->dev[s.failed_num].flags) 2800 && !test_bit(R5_LOCKED, &sh->dev[s.failed_num].flags) 2801 && test_bit(R5_UPTODATE, &sh->dev[s.failed_num].flags) 2802 ) { 2803 dev = &sh->dev[s.failed_num]; 2804 if (!test_bit(R5_ReWrite, &dev->flags)) { 2805 set_bit(R5_Wantwrite, &dev->flags); 2806 if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending)) 2807 sh->ops.count++; 2808 set_bit(R5_ReWrite, &dev->flags); 2809 set_bit(R5_LOCKED, &dev->flags); 2810 s.locked++; 2811 } else { 2812 /* let's read it back */ 2813 set_bit(R5_Wantread, &dev->flags); 2814 if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending)) 2815 sh->ops.count++; 2816 set_bit(R5_LOCKED, &dev->flags); 2817 s.locked++; 2818 } 2819 } 2820 2821 /* Finish postxor operations initiated by the expansion 2822 * process 2823 */ 2824 if (test_bit(STRIPE_OP_POSTXOR, &sh->ops.complete) && 2825 !test_bit(STRIPE_OP_BIODRAIN, &sh->ops.pending)) { 2826 2827 clear_bit(STRIPE_EXPANDING, &sh->state); 2828 2829 clear_bit(STRIPE_OP_POSTXOR, &sh->ops.pending); 2830 clear_bit(STRIPE_OP_POSTXOR, &sh->ops.ack); 2831 clear_bit(STRIPE_OP_POSTXOR, &sh->ops.complete); 2832 2833 for (i = conf->raid_disks; i--; ) { 2834 set_bit(R5_Wantwrite, &sh->dev[i].flags); 2835 if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending)) 2836 sh->ops.count++; 2837 } 2838 } 2839 2840 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) && 2841 !test_bit(STRIPE_OP_POSTXOR, &sh->ops.pending)) { 2842 /* Need to write out all blocks after computing parity */ 2843 sh->disks = conf->raid_disks; 2844 sh->pd_idx = stripe_to_pdidx(sh->sector, conf, 2845 conf->raid_disks); 2846 s.locked += handle_write_operations5(sh, 1, 1); 2847 } else if (s.expanded && 2848 !test_bit(STRIPE_OP_POSTXOR, &sh->ops.pending)) { 2849 clear_bit(STRIPE_EXPAND_READY, &sh->state); 2850 atomic_dec(&conf->reshape_stripes); 2851 wake_up(&conf->wait_for_overlap); 2852 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); 2853 } 2854 2855 if (s.expanding && s.locked == 0) 2856 handle_stripe_expansion(conf, sh, NULL); 2857 2858 if (sh->ops.count) 2859 pending = get_stripe_work(sh); 2860 2861 spin_unlock(&sh->lock); 2862 2863 if (pending) 2864 raid5_run_ops(sh, pending); 2865 2866 return_io(return_bi); 2867 2868 } 2869 2870 static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page) 2871 { 2872 raid6_conf_t *conf = sh->raid_conf; 2873 int disks = sh->disks; 2874 struct bio *return_bi = NULL; 2875 int i, pd_idx = sh->pd_idx; 2876 struct stripe_head_state s; 2877 struct r6_state r6s; 2878 struct r5dev *dev, *pdev, *qdev; 2879 2880 r6s.qd_idx = raid6_next_disk(pd_idx, disks); 2881 pr_debug("handling stripe %llu, state=%#lx cnt=%d, " 2882 "pd_idx=%d, qd_idx=%d\n", 2883 (unsigned long long)sh->sector, sh->state, 2884 atomic_read(&sh->count), pd_idx, r6s.qd_idx); 2885 memset(&s, 0, sizeof(s)); 2886 2887 spin_lock(&sh->lock); 2888 clear_bit(STRIPE_HANDLE, &sh->state); 2889 clear_bit(STRIPE_DELAYED, &sh->state); 2890 2891 s.syncing = test_bit(STRIPE_SYNCING, &sh->state); 2892 s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state); 2893 s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state); 2894 /* Now to look around and see what can be done */ 2895 2896 rcu_read_lock(); 2897 for (i=disks; i--; ) { 2898 mdk_rdev_t *rdev; 2899 dev = &sh->dev[i]; 2900 clear_bit(R5_Insync, &dev->flags); 2901 2902 pr_debug("check %d: state 0x%lx read %p write %p written %p\n", 2903 i, dev->flags, dev->toread, dev->towrite, dev->written); 2904 /* maybe we can reply to a read */ 2905 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread) { 2906 struct bio *rbi, *rbi2; 2907 pr_debug("Return read for disc %d\n", i); 2908 spin_lock_irq(&conf->device_lock); 2909 rbi = dev->toread; 2910 dev->toread = NULL; 2911 if (test_and_clear_bit(R5_Overlap, &dev->flags)) 2912 wake_up(&conf->wait_for_overlap); 2913 spin_unlock_irq(&conf->device_lock); 2914 while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) { 2915 copy_data(0, rbi, dev->page, dev->sector); 2916 rbi2 = r5_next_bio(rbi, dev->sector); 2917 spin_lock_irq(&conf->device_lock); 2918 if (--rbi->bi_phys_segments == 0) { 2919 rbi->bi_next = return_bi; 2920 return_bi = rbi; 2921 } 2922 spin_unlock_irq(&conf->device_lock); 2923 rbi = rbi2; 2924 } 2925 } 2926 2927 /* now count some things */ 2928 if (test_bit(R5_LOCKED, &dev->flags)) s.locked++; 2929 if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++; 2930 2931 2932 if (dev->toread) 2933 s.to_read++; 2934 if (dev->towrite) { 2935 s.to_write++; 2936 if (!test_bit(R5_OVERWRITE, &dev->flags)) 2937 s.non_overwrite++; 2938 } 2939 if (dev->written) 2940 s.written++; 2941 rdev = rcu_dereference(conf->disks[i].rdev); 2942 if (!rdev || !test_bit(In_sync, &rdev->flags)) { 2943 /* The ReadError flag will just be confusing now */ 2944 clear_bit(R5_ReadError, &dev->flags); 2945 clear_bit(R5_ReWrite, &dev->flags); 2946 } 2947 if (!rdev || !test_bit(In_sync, &rdev->flags) 2948 || test_bit(R5_ReadError, &dev->flags)) { 2949 if (s.failed < 2) 2950 r6s.failed_num[s.failed] = i; 2951 s.failed++; 2952 } else 2953 set_bit(R5_Insync, &dev->flags); 2954 } 2955 rcu_read_unlock(); 2956 pr_debug("locked=%d uptodate=%d to_read=%d" 2957 " to_write=%d failed=%d failed_num=%d,%d\n", 2958 s.locked, s.uptodate, s.to_read, s.to_write, s.failed, 2959 r6s.failed_num[0], r6s.failed_num[1]); 2960 /* check if the array has lost >2 devices and, if so, some requests 2961 * might need to be failed 2962 */ 2963 if (s.failed > 2 && s.to_read+s.to_write+s.written) 2964 handle_requests_to_failed_array(conf, sh, &s, disks, 2965 &return_bi); 2966 if (s.failed > 2 && s.syncing) { 2967 md_done_sync(conf->mddev, STRIPE_SECTORS,0); 2968 clear_bit(STRIPE_SYNCING, &sh->state); 2969 s.syncing = 0; 2970 } 2971 2972 /* 2973 * might be able to return some write requests if the parity blocks 2974 * are safe, or on a failed drive 2975 */ 2976 pdev = &sh->dev[pd_idx]; 2977 r6s.p_failed = (s.failed >= 1 && r6s.failed_num[0] == pd_idx) 2978 || (s.failed >= 2 && r6s.failed_num[1] == pd_idx); 2979 qdev = &sh->dev[r6s.qd_idx]; 2980 r6s.q_failed = (s.failed >= 1 && r6s.failed_num[0] == r6s.qd_idx) 2981 || (s.failed >= 2 && r6s.failed_num[1] == r6s.qd_idx); 2982 2983 if ( s.written && 2984 ( r6s.p_failed || ((test_bit(R5_Insync, &pdev->flags) 2985 && !test_bit(R5_LOCKED, &pdev->flags) 2986 && test_bit(R5_UPTODATE, &pdev->flags)))) && 2987 ( r6s.q_failed || ((test_bit(R5_Insync, &qdev->flags) 2988 && !test_bit(R5_LOCKED, &qdev->flags) 2989 && test_bit(R5_UPTODATE, &qdev->flags))))) 2990 handle_completed_write_requests(conf, sh, disks, &return_bi); 2991 2992 /* Now we might consider reading some blocks, either to check/generate 2993 * parity, or to satisfy requests 2994 * or to load a block that is being partially written. 2995 */ 2996 if (s.to_read || s.non_overwrite || (s.to_write && s.failed) || 2997 (s.syncing && (s.uptodate < disks)) || s.expanding) 2998 handle_issuing_new_read_requests6(sh, &s, &r6s, disks); 2999 3000 /* now to consider writing and what else, if anything should be read */ 3001 if (s.to_write) 3002 handle_issuing_new_write_requests6(conf, sh, &s, &r6s, disks); 3003 3004 /* maybe we need to check and possibly fix the parity for this stripe 3005 * Any reads will already have been scheduled, so we just see if enough 3006 * data is available 3007 */ 3008 if (s.syncing && s.locked == 0 && !test_bit(STRIPE_INSYNC, &sh->state)) 3009 handle_parity_checks6(conf, sh, &s, &r6s, tmp_page, disks); 3010 3011 if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) { 3012 md_done_sync(conf->mddev, STRIPE_SECTORS,1); 3013 clear_bit(STRIPE_SYNCING, &sh->state); 3014 } 3015 3016 /* If the failed drives are just a ReadError, then we might need 3017 * to progress the repair/check process 3018 */ 3019 if (s.failed <= 2 && !conf->mddev->ro) 3020 for (i = 0; i < s.failed; i++) { 3021 dev = &sh->dev[r6s.failed_num[i]]; 3022 if (test_bit(R5_ReadError, &dev->flags) 3023 && !test_bit(R5_LOCKED, &dev->flags) 3024 && test_bit(R5_UPTODATE, &dev->flags) 3025 ) { 3026 if (!test_bit(R5_ReWrite, &dev->flags)) { 3027 set_bit(R5_Wantwrite, &dev->flags); 3028 set_bit(R5_ReWrite, &dev->flags); 3029 set_bit(R5_LOCKED, &dev->flags); 3030 } else { 3031 /* let's read it back */ 3032 set_bit(R5_Wantread, &dev->flags); 3033 set_bit(R5_LOCKED, &dev->flags); 3034 } 3035 } 3036 } 3037 3038 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state)) { 3039 /* Need to write out all blocks after computing P&Q */ 3040 sh->disks = conf->raid_disks; 3041 sh->pd_idx = stripe_to_pdidx(sh->sector, conf, 3042 conf->raid_disks); 3043 compute_parity6(sh, RECONSTRUCT_WRITE); 3044 for (i = conf->raid_disks ; i-- ; ) { 3045 set_bit(R5_LOCKED, &sh->dev[i].flags); 3046 s.locked++; 3047 set_bit(R5_Wantwrite, &sh->dev[i].flags); 3048 } 3049 clear_bit(STRIPE_EXPANDING, &sh->state); 3050 } else if (s.expanded) { 3051 clear_bit(STRIPE_EXPAND_READY, &sh->state); 3052 atomic_dec(&conf->reshape_stripes); 3053 wake_up(&conf->wait_for_overlap); 3054 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); 3055 } 3056 3057 if (s.expanding && s.locked == 0) 3058 handle_stripe_expansion(conf, sh, &r6s); 3059 3060 spin_unlock(&sh->lock); 3061 3062 return_io(return_bi); 3063 3064 for (i=disks; i-- ;) { 3065 int rw; 3066 struct bio *bi; 3067 mdk_rdev_t *rdev; 3068 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) 3069 rw = WRITE; 3070 else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) 3071 rw = READ; 3072 else 3073 continue; 3074 3075 bi = &sh->dev[i].req; 3076 3077 bi->bi_rw = rw; 3078 if (rw == WRITE) 3079 bi->bi_end_io = raid5_end_write_request; 3080 else 3081 bi->bi_end_io = raid5_end_read_request; 3082 3083 rcu_read_lock(); 3084 rdev = rcu_dereference(conf->disks[i].rdev); 3085 if (rdev && test_bit(Faulty, &rdev->flags)) 3086 rdev = NULL; 3087 if (rdev) 3088 atomic_inc(&rdev->nr_pending); 3089 rcu_read_unlock(); 3090 3091 if (rdev) { 3092 if (s.syncing || s.expanding || s.expanded) 3093 md_sync_acct(rdev->bdev, STRIPE_SECTORS); 3094 3095 bi->bi_bdev = rdev->bdev; 3096 pr_debug("for %llu schedule op %ld on disc %d\n", 3097 (unsigned long long)sh->sector, bi->bi_rw, i); 3098 atomic_inc(&sh->count); 3099 bi->bi_sector = sh->sector + rdev->data_offset; 3100 bi->bi_flags = 1 << BIO_UPTODATE; 3101 bi->bi_vcnt = 1; 3102 bi->bi_max_vecs = 1; 3103 bi->bi_idx = 0; 3104 bi->bi_io_vec = &sh->dev[i].vec; 3105 bi->bi_io_vec[0].bv_len = STRIPE_SIZE; 3106 bi->bi_io_vec[0].bv_offset = 0; 3107 bi->bi_size = STRIPE_SIZE; 3108 bi->bi_next = NULL; 3109 if (rw == WRITE && 3110 test_bit(R5_ReWrite, &sh->dev[i].flags)) 3111 atomic_add(STRIPE_SECTORS, &rdev->corrected_errors); 3112 generic_make_request(bi); 3113 } else { 3114 if (rw == WRITE) 3115 set_bit(STRIPE_DEGRADED, &sh->state); 3116 pr_debug("skip op %ld on disc %d for sector %llu\n", 3117 bi->bi_rw, i, (unsigned long long)sh->sector); 3118 clear_bit(R5_LOCKED, &sh->dev[i].flags); 3119 set_bit(STRIPE_HANDLE, &sh->state); 3120 } 3121 } 3122 } 3123 3124 static void handle_stripe(struct stripe_head *sh, struct page *tmp_page) 3125 { 3126 if (sh->raid_conf->level == 6) 3127 handle_stripe6(sh, tmp_page); 3128 else 3129 handle_stripe5(sh); 3130 } 3131 3132 3133 3134 static void raid5_activate_delayed(raid5_conf_t *conf) 3135 { 3136 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) { 3137 while (!list_empty(&conf->delayed_list)) { 3138 struct list_head *l = conf->delayed_list.next; 3139 struct stripe_head *sh; 3140 sh = list_entry(l, struct stripe_head, lru); 3141 list_del_init(l); 3142 clear_bit(STRIPE_DELAYED, &sh->state); 3143 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 3144 atomic_inc(&conf->preread_active_stripes); 3145 list_add_tail(&sh->lru, &conf->handle_list); 3146 } 3147 } 3148 } 3149 3150 static void activate_bit_delay(raid5_conf_t *conf) 3151 { 3152 /* device_lock is held */ 3153 struct list_head head; 3154 list_add(&head, &conf->bitmap_list); 3155 list_del_init(&conf->bitmap_list); 3156 while (!list_empty(&head)) { 3157 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru); 3158 list_del_init(&sh->lru); 3159 atomic_inc(&sh->count); 3160 __release_stripe(conf, sh); 3161 } 3162 } 3163 3164 static void unplug_slaves(mddev_t *mddev) 3165 { 3166 raid5_conf_t *conf = mddev_to_conf(mddev); 3167 int i; 3168 3169 rcu_read_lock(); 3170 for (i=0; i<mddev->raid_disks; i++) { 3171 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); 3172 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { 3173 struct request_queue *r_queue = bdev_get_queue(rdev->bdev); 3174 3175 atomic_inc(&rdev->nr_pending); 3176 rcu_read_unlock(); 3177 3178 if (r_queue->unplug_fn) 3179 r_queue->unplug_fn(r_queue); 3180 3181 rdev_dec_pending(rdev, mddev); 3182 rcu_read_lock(); 3183 } 3184 } 3185 rcu_read_unlock(); 3186 } 3187 3188 static void raid5_unplug_device(struct request_queue *q) 3189 { 3190 mddev_t *mddev = q->queuedata; 3191 raid5_conf_t *conf = mddev_to_conf(mddev); 3192 unsigned long flags; 3193 3194 spin_lock_irqsave(&conf->device_lock, flags); 3195 3196 if (blk_remove_plug(q)) { 3197 conf->seq_flush++; 3198 raid5_activate_delayed(conf); 3199 } 3200 md_wakeup_thread(mddev->thread); 3201 3202 spin_unlock_irqrestore(&conf->device_lock, flags); 3203 3204 unplug_slaves(mddev); 3205 } 3206 3207 static int raid5_issue_flush(struct request_queue *q, struct gendisk *disk, 3208 sector_t *error_sector) 3209 { 3210 mddev_t *mddev = q->queuedata; 3211 raid5_conf_t *conf = mddev_to_conf(mddev); 3212 int i, ret = 0; 3213 3214 rcu_read_lock(); 3215 for (i=0; i<mddev->raid_disks && ret == 0; i++) { 3216 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); 3217 if (rdev && !test_bit(Faulty, &rdev->flags)) { 3218 struct block_device *bdev = rdev->bdev; 3219 struct request_queue *r_queue = bdev_get_queue(bdev); 3220 3221 if (!r_queue->issue_flush_fn) 3222 ret = -EOPNOTSUPP; 3223 else { 3224 atomic_inc(&rdev->nr_pending); 3225 rcu_read_unlock(); 3226 ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk, 3227 error_sector); 3228 rdev_dec_pending(rdev, mddev); 3229 rcu_read_lock(); 3230 } 3231 } 3232 } 3233 rcu_read_unlock(); 3234 return ret; 3235 } 3236 3237 static int raid5_congested(void *data, int bits) 3238 { 3239 mddev_t *mddev = data; 3240 raid5_conf_t *conf = mddev_to_conf(mddev); 3241 3242 /* No difference between reads and writes. Just check 3243 * how busy the stripe_cache is 3244 */ 3245 if (conf->inactive_blocked) 3246 return 1; 3247 if (conf->quiesce) 3248 return 1; 3249 if (list_empty_careful(&conf->inactive_list)) 3250 return 1; 3251 3252 return 0; 3253 } 3254 3255 /* We want read requests to align with chunks where possible, 3256 * but write requests don't need to. 3257 */ 3258 static int raid5_mergeable_bvec(struct request_queue *q, struct bio *bio, struct bio_vec *biovec) 3259 { 3260 mddev_t *mddev = q->queuedata; 3261 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); 3262 int max; 3263 unsigned int chunk_sectors = mddev->chunk_size >> 9; 3264 unsigned int bio_sectors = bio->bi_size >> 9; 3265 3266 if (bio_data_dir(bio) == WRITE) 3267 return biovec->bv_len; /* always allow writes to be mergeable */ 3268 3269 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; 3270 if (max < 0) max = 0; 3271 if (max <= biovec->bv_len && bio_sectors == 0) 3272 return biovec->bv_len; 3273 else 3274 return max; 3275 } 3276 3277 3278 static int in_chunk_boundary(mddev_t *mddev, struct bio *bio) 3279 { 3280 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); 3281 unsigned int chunk_sectors = mddev->chunk_size >> 9; 3282 unsigned int bio_sectors = bio->bi_size >> 9; 3283 3284 return chunk_sectors >= 3285 ((sector & (chunk_sectors - 1)) + bio_sectors); 3286 } 3287 3288 /* 3289 * add bio to the retry LIFO ( in O(1) ... we are in interrupt ) 3290 * later sampled by raid5d. 3291 */ 3292 static void add_bio_to_retry(struct bio *bi,raid5_conf_t *conf) 3293 { 3294 unsigned long flags; 3295 3296 spin_lock_irqsave(&conf->device_lock, flags); 3297 3298 bi->bi_next = conf->retry_read_aligned_list; 3299 conf->retry_read_aligned_list = bi; 3300 3301 spin_unlock_irqrestore(&conf->device_lock, flags); 3302 md_wakeup_thread(conf->mddev->thread); 3303 } 3304 3305 3306 static struct bio *remove_bio_from_retry(raid5_conf_t *conf) 3307 { 3308 struct bio *bi; 3309 3310 bi = conf->retry_read_aligned; 3311 if (bi) { 3312 conf->retry_read_aligned = NULL; 3313 return bi; 3314 } 3315 bi = conf->retry_read_aligned_list; 3316 if(bi) { 3317 conf->retry_read_aligned_list = bi->bi_next; 3318 bi->bi_next = NULL; 3319 bi->bi_phys_segments = 1; /* biased count of active stripes */ 3320 bi->bi_hw_segments = 0; /* count of processed stripes */ 3321 } 3322 3323 return bi; 3324 } 3325 3326 3327 /* 3328 * The "raid5_align_endio" should check if the read succeeded and if it 3329 * did, call bio_endio on the original bio (having bio_put the new bio 3330 * first). 3331 * If the read failed.. 3332 */ 3333 static void raid5_align_endio(struct bio *bi, int error) 3334 { 3335 struct bio* raid_bi = bi->bi_private; 3336 mddev_t *mddev; 3337 raid5_conf_t *conf; 3338 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 3339 mdk_rdev_t *rdev; 3340 3341 bio_put(bi); 3342 3343 mddev = raid_bi->bi_bdev->bd_disk->queue->queuedata; 3344 conf = mddev_to_conf(mddev); 3345 rdev = (void*)raid_bi->bi_next; 3346 raid_bi->bi_next = NULL; 3347 3348 rdev_dec_pending(rdev, conf->mddev); 3349 3350 if (!error && uptodate) { 3351 bio_endio(raid_bi, 0); 3352 if (atomic_dec_and_test(&conf->active_aligned_reads)) 3353 wake_up(&conf->wait_for_stripe); 3354 return; 3355 } 3356 3357 3358 pr_debug("raid5_align_endio : io error...handing IO for a retry\n"); 3359 3360 add_bio_to_retry(raid_bi, conf); 3361 } 3362 3363 static int bio_fits_rdev(struct bio *bi) 3364 { 3365 struct request_queue *q = bdev_get_queue(bi->bi_bdev); 3366 3367 if ((bi->bi_size>>9) > q->max_sectors) 3368 return 0; 3369 blk_recount_segments(q, bi); 3370 if (bi->bi_phys_segments > q->max_phys_segments || 3371 bi->bi_hw_segments > q->max_hw_segments) 3372 return 0; 3373 3374 if (q->merge_bvec_fn) 3375 /* it's too hard to apply the merge_bvec_fn at this stage, 3376 * just just give up 3377 */ 3378 return 0; 3379 3380 return 1; 3381 } 3382 3383 3384 static int chunk_aligned_read(struct request_queue *q, struct bio * raid_bio) 3385 { 3386 mddev_t *mddev = q->queuedata; 3387 raid5_conf_t *conf = mddev_to_conf(mddev); 3388 const unsigned int raid_disks = conf->raid_disks; 3389 const unsigned int data_disks = raid_disks - conf->max_degraded; 3390 unsigned int dd_idx, pd_idx; 3391 struct bio* align_bi; 3392 mdk_rdev_t *rdev; 3393 3394 if (!in_chunk_boundary(mddev, raid_bio)) { 3395 pr_debug("chunk_aligned_read : non aligned\n"); 3396 return 0; 3397 } 3398 /* 3399 * use bio_clone to make a copy of the bio 3400 */ 3401 align_bi = bio_clone(raid_bio, GFP_NOIO); 3402 if (!align_bi) 3403 return 0; 3404 /* 3405 * set bi_end_io to a new function, and set bi_private to the 3406 * original bio. 3407 */ 3408 align_bi->bi_end_io = raid5_align_endio; 3409 align_bi->bi_private = raid_bio; 3410 /* 3411 * compute position 3412 */ 3413 align_bi->bi_sector = raid5_compute_sector(raid_bio->bi_sector, 3414 raid_disks, 3415 data_disks, 3416 &dd_idx, 3417 &pd_idx, 3418 conf); 3419 3420 rcu_read_lock(); 3421 rdev = rcu_dereference(conf->disks[dd_idx].rdev); 3422 if (rdev && test_bit(In_sync, &rdev->flags)) { 3423 atomic_inc(&rdev->nr_pending); 3424 rcu_read_unlock(); 3425 raid_bio->bi_next = (void*)rdev; 3426 align_bi->bi_bdev = rdev->bdev; 3427 align_bi->bi_flags &= ~(1 << BIO_SEG_VALID); 3428 align_bi->bi_sector += rdev->data_offset; 3429 3430 if (!bio_fits_rdev(align_bi)) { 3431 /* too big in some way */ 3432 bio_put(align_bi); 3433 rdev_dec_pending(rdev, mddev); 3434 return 0; 3435 } 3436 3437 spin_lock_irq(&conf->device_lock); 3438 wait_event_lock_irq(conf->wait_for_stripe, 3439 conf->quiesce == 0, 3440 conf->device_lock, /* nothing */); 3441 atomic_inc(&conf->active_aligned_reads); 3442 spin_unlock_irq(&conf->device_lock); 3443 3444 generic_make_request(align_bi); 3445 return 1; 3446 } else { 3447 rcu_read_unlock(); 3448 bio_put(align_bi); 3449 return 0; 3450 } 3451 } 3452 3453 3454 static int make_request(struct request_queue *q, struct bio * bi) 3455 { 3456 mddev_t *mddev = q->queuedata; 3457 raid5_conf_t *conf = mddev_to_conf(mddev); 3458 unsigned int dd_idx, pd_idx; 3459 sector_t new_sector; 3460 sector_t logical_sector, last_sector; 3461 struct stripe_head *sh; 3462 const int rw = bio_data_dir(bi); 3463 int remaining; 3464 3465 if (unlikely(bio_barrier(bi))) { 3466 bio_endio(bi, -EOPNOTSUPP); 3467 return 0; 3468 } 3469 3470 md_write_start(mddev, bi); 3471 3472 disk_stat_inc(mddev->gendisk, ios[rw]); 3473 disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bi)); 3474 3475 if (rw == READ && 3476 mddev->reshape_position == MaxSector && 3477 chunk_aligned_read(q,bi)) 3478 return 0; 3479 3480 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 3481 last_sector = bi->bi_sector + (bi->bi_size>>9); 3482 bi->bi_next = NULL; 3483 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ 3484 3485 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) { 3486 DEFINE_WAIT(w); 3487 int disks, data_disks; 3488 3489 retry: 3490 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); 3491 if (likely(conf->expand_progress == MaxSector)) 3492 disks = conf->raid_disks; 3493 else { 3494 /* spinlock is needed as expand_progress may be 3495 * 64bit on a 32bit platform, and so it might be 3496 * possible to see a half-updated value 3497 * Ofcourse expand_progress could change after 3498 * the lock is dropped, so once we get a reference 3499 * to the stripe that we think it is, we will have 3500 * to check again. 3501 */ 3502 spin_lock_irq(&conf->device_lock); 3503 disks = conf->raid_disks; 3504 if (logical_sector >= conf->expand_progress) 3505 disks = conf->previous_raid_disks; 3506 else { 3507 if (logical_sector >= conf->expand_lo) { 3508 spin_unlock_irq(&conf->device_lock); 3509 schedule(); 3510 goto retry; 3511 } 3512 } 3513 spin_unlock_irq(&conf->device_lock); 3514 } 3515 data_disks = disks - conf->max_degraded; 3516 3517 new_sector = raid5_compute_sector(logical_sector, disks, data_disks, 3518 &dd_idx, &pd_idx, conf); 3519 pr_debug("raid5: make_request, sector %llu logical %llu\n", 3520 (unsigned long long)new_sector, 3521 (unsigned long long)logical_sector); 3522 3523 sh = get_active_stripe(conf, new_sector, disks, pd_idx, (bi->bi_rw&RWA_MASK)); 3524 if (sh) { 3525 if (unlikely(conf->expand_progress != MaxSector)) { 3526 /* expansion might have moved on while waiting for a 3527 * stripe, so we must do the range check again. 3528 * Expansion could still move past after this 3529 * test, but as we are holding a reference to 3530 * 'sh', we know that if that happens, 3531 * STRIPE_EXPANDING will get set and the expansion 3532 * won't proceed until we finish with the stripe. 3533 */ 3534 int must_retry = 0; 3535 spin_lock_irq(&conf->device_lock); 3536 if (logical_sector < conf->expand_progress && 3537 disks == conf->previous_raid_disks) 3538 /* mismatch, need to try again */ 3539 must_retry = 1; 3540 spin_unlock_irq(&conf->device_lock); 3541 if (must_retry) { 3542 release_stripe(sh); 3543 goto retry; 3544 } 3545 } 3546 /* FIXME what if we get a false positive because these 3547 * are being updated. 3548 */ 3549 if (logical_sector >= mddev->suspend_lo && 3550 logical_sector < mddev->suspend_hi) { 3551 release_stripe(sh); 3552 schedule(); 3553 goto retry; 3554 } 3555 3556 if (test_bit(STRIPE_EXPANDING, &sh->state) || 3557 !add_stripe_bio(sh, bi, dd_idx, (bi->bi_rw&RW_MASK))) { 3558 /* Stripe is busy expanding or 3559 * add failed due to overlap. Flush everything 3560 * and wait a while 3561 */ 3562 raid5_unplug_device(mddev->queue); 3563 release_stripe(sh); 3564 schedule(); 3565 goto retry; 3566 } 3567 finish_wait(&conf->wait_for_overlap, &w); 3568 handle_stripe(sh, NULL); 3569 release_stripe(sh); 3570 } else { 3571 /* cannot get stripe for read-ahead, just give-up */ 3572 clear_bit(BIO_UPTODATE, &bi->bi_flags); 3573 finish_wait(&conf->wait_for_overlap, &w); 3574 break; 3575 } 3576 3577 } 3578 spin_lock_irq(&conf->device_lock); 3579 remaining = --bi->bi_phys_segments; 3580 spin_unlock_irq(&conf->device_lock); 3581 if (remaining == 0) { 3582 3583 if ( rw == WRITE ) 3584 md_write_end(mddev); 3585 3586 bi->bi_end_io(bi, 3587 test_bit(BIO_UPTODATE, &bi->bi_flags) 3588 ? 0 : -EIO); 3589 } 3590 return 0; 3591 } 3592 3593 static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped) 3594 { 3595 /* reshaping is quite different to recovery/resync so it is 3596 * handled quite separately ... here. 3597 * 3598 * On each call to sync_request, we gather one chunk worth of 3599 * destination stripes and flag them as expanding. 3600 * Then we find all the source stripes and request reads. 3601 * As the reads complete, handle_stripe will copy the data 3602 * into the destination stripe and release that stripe. 3603 */ 3604 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 3605 struct stripe_head *sh; 3606 int pd_idx; 3607 sector_t first_sector, last_sector; 3608 int raid_disks = conf->previous_raid_disks; 3609 int data_disks = raid_disks - conf->max_degraded; 3610 int new_data_disks = conf->raid_disks - conf->max_degraded; 3611 int i; 3612 int dd_idx; 3613 sector_t writepos, safepos, gap; 3614 3615 if (sector_nr == 0 && 3616 conf->expand_progress != 0) { 3617 /* restarting in the middle, skip the initial sectors */ 3618 sector_nr = conf->expand_progress; 3619 sector_div(sector_nr, new_data_disks); 3620 *skipped = 1; 3621 return sector_nr; 3622 } 3623 3624 /* we update the metadata when there is more than 3Meg 3625 * in the block range (that is rather arbitrary, should 3626 * probably be time based) or when the data about to be 3627 * copied would over-write the source of the data at 3628 * the front of the range. 3629 * i.e. one new_stripe forward from expand_progress new_maps 3630 * to after where expand_lo old_maps to 3631 */ 3632 writepos = conf->expand_progress + 3633 conf->chunk_size/512*(new_data_disks); 3634 sector_div(writepos, new_data_disks); 3635 safepos = conf->expand_lo; 3636 sector_div(safepos, data_disks); 3637 gap = conf->expand_progress - conf->expand_lo; 3638 3639 if (writepos >= safepos || 3640 gap > (new_data_disks)*3000*2 /*3Meg*/) { 3641 /* Cannot proceed until we've updated the superblock... */ 3642 wait_event(conf->wait_for_overlap, 3643 atomic_read(&conf->reshape_stripes)==0); 3644 mddev->reshape_position = conf->expand_progress; 3645 set_bit(MD_CHANGE_DEVS, &mddev->flags); 3646 md_wakeup_thread(mddev->thread); 3647 wait_event(mddev->sb_wait, mddev->flags == 0 || 3648 kthread_should_stop()); 3649 spin_lock_irq(&conf->device_lock); 3650 conf->expand_lo = mddev->reshape_position; 3651 spin_unlock_irq(&conf->device_lock); 3652 wake_up(&conf->wait_for_overlap); 3653 } 3654 3655 for (i=0; i < conf->chunk_size/512; i+= STRIPE_SECTORS) { 3656 int j; 3657 int skipped = 0; 3658 pd_idx = stripe_to_pdidx(sector_nr+i, conf, conf->raid_disks); 3659 sh = get_active_stripe(conf, sector_nr+i, 3660 conf->raid_disks, pd_idx, 0); 3661 set_bit(STRIPE_EXPANDING, &sh->state); 3662 atomic_inc(&conf->reshape_stripes); 3663 /* If any of this stripe is beyond the end of the old 3664 * array, then we need to zero those blocks 3665 */ 3666 for (j=sh->disks; j--;) { 3667 sector_t s; 3668 if (j == sh->pd_idx) 3669 continue; 3670 if (conf->level == 6 && 3671 j == raid6_next_disk(sh->pd_idx, sh->disks)) 3672 continue; 3673 s = compute_blocknr(sh, j); 3674 if (s < (mddev->array_size<<1)) { 3675 skipped = 1; 3676 continue; 3677 } 3678 memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE); 3679 set_bit(R5_Expanded, &sh->dev[j].flags); 3680 set_bit(R5_UPTODATE, &sh->dev[j].flags); 3681 } 3682 if (!skipped) { 3683 set_bit(STRIPE_EXPAND_READY, &sh->state); 3684 set_bit(STRIPE_HANDLE, &sh->state); 3685 } 3686 release_stripe(sh); 3687 } 3688 spin_lock_irq(&conf->device_lock); 3689 conf->expand_progress = (sector_nr + i) * new_data_disks; 3690 spin_unlock_irq(&conf->device_lock); 3691 /* Ok, those stripe are ready. We can start scheduling 3692 * reads on the source stripes. 3693 * The source stripes are determined by mapping the first and last 3694 * block on the destination stripes. 3695 */ 3696 first_sector = 3697 raid5_compute_sector(sector_nr*(new_data_disks), 3698 raid_disks, data_disks, 3699 &dd_idx, &pd_idx, conf); 3700 last_sector = 3701 raid5_compute_sector((sector_nr+conf->chunk_size/512) 3702 *(new_data_disks) -1, 3703 raid_disks, data_disks, 3704 &dd_idx, &pd_idx, conf); 3705 if (last_sector >= (mddev->size<<1)) 3706 last_sector = (mddev->size<<1)-1; 3707 while (first_sector <= last_sector) { 3708 pd_idx = stripe_to_pdidx(first_sector, conf, 3709 conf->previous_raid_disks); 3710 sh = get_active_stripe(conf, first_sector, 3711 conf->previous_raid_disks, pd_idx, 0); 3712 set_bit(STRIPE_EXPAND_SOURCE, &sh->state); 3713 set_bit(STRIPE_HANDLE, &sh->state); 3714 release_stripe(sh); 3715 first_sector += STRIPE_SECTORS; 3716 } 3717 return conf->chunk_size>>9; 3718 } 3719 3720 /* FIXME go_faster isn't used */ 3721 static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster) 3722 { 3723 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 3724 struct stripe_head *sh; 3725 int pd_idx; 3726 int raid_disks = conf->raid_disks; 3727 sector_t max_sector = mddev->size << 1; 3728 int sync_blocks; 3729 int still_degraded = 0; 3730 int i; 3731 3732 if (sector_nr >= max_sector) { 3733 /* just being told to finish up .. nothing much to do */ 3734 unplug_slaves(mddev); 3735 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { 3736 end_reshape(conf); 3737 return 0; 3738 } 3739 3740 if (mddev->curr_resync < max_sector) /* aborted */ 3741 bitmap_end_sync(mddev->bitmap, mddev->curr_resync, 3742 &sync_blocks, 1); 3743 else /* completed sync */ 3744 conf->fullsync = 0; 3745 bitmap_close_sync(mddev->bitmap); 3746 3747 return 0; 3748 } 3749 3750 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 3751 return reshape_request(mddev, sector_nr, skipped); 3752 3753 /* if there is too many failed drives and we are trying 3754 * to resync, then assert that we are finished, because there is 3755 * nothing we can do. 3756 */ 3757 if (mddev->degraded >= conf->max_degraded && 3758 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 3759 sector_t rv = (mddev->size << 1) - sector_nr; 3760 *skipped = 1; 3761 return rv; 3762 } 3763 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && 3764 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && 3765 !conf->fullsync && sync_blocks >= STRIPE_SECTORS) { 3766 /* we can skip this block, and probably more */ 3767 sync_blocks /= STRIPE_SECTORS; 3768 *skipped = 1; 3769 return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */ 3770 } 3771 3772 pd_idx = stripe_to_pdidx(sector_nr, conf, raid_disks); 3773 sh = get_active_stripe(conf, sector_nr, raid_disks, pd_idx, 1); 3774 if (sh == NULL) { 3775 sh = get_active_stripe(conf, sector_nr, raid_disks, pd_idx, 0); 3776 /* make sure we don't swamp the stripe cache if someone else 3777 * is trying to get access 3778 */ 3779 schedule_timeout_uninterruptible(1); 3780 } 3781 /* Need to check if array will still be degraded after recovery/resync 3782 * We don't need to check the 'failed' flag as when that gets set, 3783 * recovery aborts. 3784 */ 3785 for (i=0; i<mddev->raid_disks; i++) 3786 if (conf->disks[i].rdev == NULL) 3787 still_degraded = 1; 3788 3789 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded); 3790 3791 spin_lock(&sh->lock); 3792 set_bit(STRIPE_SYNCING, &sh->state); 3793 clear_bit(STRIPE_INSYNC, &sh->state); 3794 spin_unlock(&sh->lock); 3795 3796 handle_stripe(sh, NULL); 3797 release_stripe(sh); 3798 3799 return STRIPE_SECTORS; 3800 } 3801 3802 static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio) 3803 { 3804 /* We may not be able to submit a whole bio at once as there 3805 * may not be enough stripe_heads available. 3806 * We cannot pre-allocate enough stripe_heads as we may need 3807 * more than exist in the cache (if we allow ever large chunks). 3808 * So we do one stripe head at a time and record in 3809 * ->bi_hw_segments how many have been done. 3810 * 3811 * We *know* that this entire raid_bio is in one chunk, so 3812 * it will be only one 'dd_idx' and only need one call to raid5_compute_sector. 3813 */ 3814 struct stripe_head *sh; 3815 int dd_idx, pd_idx; 3816 sector_t sector, logical_sector, last_sector; 3817 int scnt = 0; 3818 int remaining; 3819 int handled = 0; 3820 3821 logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 3822 sector = raid5_compute_sector( logical_sector, 3823 conf->raid_disks, 3824 conf->raid_disks - conf->max_degraded, 3825 &dd_idx, 3826 &pd_idx, 3827 conf); 3828 last_sector = raid_bio->bi_sector + (raid_bio->bi_size>>9); 3829 3830 for (; logical_sector < last_sector; 3831 logical_sector += STRIPE_SECTORS, 3832 sector += STRIPE_SECTORS, 3833 scnt++) { 3834 3835 if (scnt < raid_bio->bi_hw_segments) 3836 /* already done this stripe */ 3837 continue; 3838 3839 sh = get_active_stripe(conf, sector, conf->raid_disks, pd_idx, 1); 3840 3841 if (!sh) { 3842 /* failed to get a stripe - must wait */ 3843 raid_bio->bi_hw_segments = scnt; 3844 conf->retry_read_aligned = raid_bio; 3845 return handled; 3846 } 3847 3848 set_bit(R5_ReadError, &sh->dev[dd_idx].flags); 3849 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) { 3850 release_stripe(sh); 3851 raid_bio->bi_hw_segments = scnt; 3852 conf->retry_read_aligned = raid_bio; 3853 return handled; 3854 } 3855 3856 handle_stripe(sh, NULL); 3857 release_stripe(sh); 3858 handled++; 3859 } 3860 spin_lock_irq(&conf->device_lock); 3861 remaining = --raid_bio->bi_phys_segments; 3862 spin_unlock_irq(&conf->device_lock); 3863 if (remaining == 0) { 3864 3865 raid_bio->bi_end_io(raid_bio, 3866 test_bit(BIO_UPTODATE, &raid_bio->bi_flags) 3867 ? 0 : -EIO); 3868 } 3869 if (atomic_dec_and_test(&conf->active_aligned_reads)) 3870 wake_up(&conf->wait_for_stripe); 3871 return handled; 3872 } 3873 3874 3875 3876 /* 3877 * This is our raid5 kernel thread. 3878 * 3879 * We scan the hash table for stripes which can be handled now. 3880 * During the scan, completed stripes are saved for us by the interrupt 3881 * handler, so that they will not have to wait for our next wakeup. 3882 */ 3883 static void raid5d (mddev_t *mddev) 3884 { 3885 struct stripe_head *sh; 3886 raid5_conf_t *conf = mddev_to_conf(mddev); 3887 int handled; 3888 3889 pr_debug("+++ raid5d active\n"); 3890 3891 md_check_recovery(mddev); 3892 3893 handled = 0; 3894 spin_lock_irq(&conf->device_lock); 3895 while (1) { 3896 struct list_head *first; 3897 struct bio *bio; 3898 3899 if (conf->seq_flush != conf->seq_write) { 3900 int seq = conf->seq_flush; 3901 spin_unlock_irq(&conf->device_lock); 3902 bitmap_unplug(mddev->bitmap); 3903 spin_lock_irq(&conf->device_lock); 3904 conf->seq_write = seq; 3905 activate_bit_delay(conf); 3906 } 3907 3908 if (list_empty(&conf->handle_list) && 3909 atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD && 3910 !blk_queue_plugged(mddev->queue) && 3911 !list_empty(&conf->delayed_list)) 3912 raid5_activate_delayed(conf); 3913 3914 while ((bio = remove_bio_from_retry(conf))) { 3915 int ok; 3916 spin_unlock_irq(&conf->device_lock); 3917 ok = retry_aligned_read(conf, bio); 3918 spin_lock_irq(&conf->device_lock); 3919 if (!ok) 3920 break; 3921 handled++; 3922 } 3923 3924 if (list_empty(&conf->handle_list)) { 3925 async_tx_issue_pending_all(); 3926 break; 3927 } 3928 3929 first = conf->handle_list.next; 3930 sh = list_entry(first, struct stripe_head, lru); 3931 3932 list_del_init(first); 3933 atomic_inc(&sh->count); 3934 BUG_ON(atomic_read(&sh->count)!= 1); 3935 spin_unlock_irq(&conf->device_lock); 3936 3937 handled++; 3938 handle_stripe(sh, conf->spare_page); 3939 release_stripe(sh); 3940 3941 spin_lock_irq(&conf->device_lock); 3942 } 3943 pr_debug("%d stripes handled\n", handled); 3944 3945 spin_unlock_irq(&conf->device_lock); 3946 3947 unplug_slaves(mddev); 3948 3949 pr_debug("--- raid5d inactive\n"); 3950 } 3951 3952 static ssize_t 3953 raid5_show_stripe_cache_size(mddev_t *mddev, char *page) 3954 { 3955 raid5_conf_t *conf = mddev_to_conf(mddev); 3956 if (conf) 3957 return sprintf(page, "%d\n", conf->max_nr_stripes); 3958 else 3959 return 0; 3960 } 3961 3962 static ssize_t 3963 raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len) 3964 { 3965 raid5_conf_t *conf = mddev_to_conf(mddev); 3966 char *end; 3967 int new; 3968 if (len >= PAGE_SIZE) 3969 return -EINVAL; 3970 if (!conf) 3971 return -ENODEV; 3972 3973 new = simple_strtoul(page, &end, 10); 3974 if (!*page || (*end && *end != '\n') ) 3975 return -EINVAL; 3976 if (new <= 16 || new > 32768) 3977 return -EINVAL; 3978 while (new < conf->max_nr_stripes) { 3979 if (drop_one_stripe(conf)) 3980 conf->max_nr_stripes--; 3981 else 3982 break; 3983 } 3984 md_allow_write(mddev); 3985 while (new > conf->max_nr_stripes) { 3986 if (grow_one_stripe(conf)) 3987 conf->max_nr_stripes++; 3988 else break; 3989 } 3990 return len; 3991 } 3992 3993 static struct md_sysfs_entry 3994 raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR, 3995 raid5_show_stripe_cache_size, 3996 raid5_store_stripe_cache_size); 3997 3998 static ssize_t 3999 stripe_cache_active_show(mddev_t *mddev, char *page) 4000 { 4001 raid5_conf_t *conf = mddev_to_conf(mddev); 4002 if (conf) 4003 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes)); 4004 else 4005 return 0; 4006 } 4007 4008 static struct md_sysfs_entry 4009 raid5_stripecache_active = __ATTR_RO(stripe_cache_active); 4010 4011 static struct attribute *raid5_attrs[] = { 4012 &raid5_stripecache_size.attr, 4013 &raid5_stripecache_active.attr, 4014 NULL, 4015 }; 4016 static struct attribute_group raid5_attrs_group = { 4017 .name = NULL, 4018 .attrs = raid5_attrs, 4019 }; 4020 4021 static int run(mddev_t *mddev) 4022 { 4023 raid5_conf_t *conf; 4024 int raid_disk, memory; 4025 mdk_rdev_t *rdev; 4026 struct disk_info *disk; 4027 struct list_head *tmp; 4028 int working_disks = 0; 4029 4030 if (mddev->level != 5 && mddev->level != 4 && mddev->level != 6) { 4031 printk(KERN_ERR "raid5: %s: raid level not set to 4/5/6 (%d)\n", 4032 mdname(mddev), mddev->level); 4033 return -EIO; 4034 } 4035 4036 if (mddev->reshape_position != MaxSector) { 4037 /* Check that we can continue the reshape. 4038 * Currently only disks can change, it must 4039 * increase, and we must be past the point where 4040 * a stripe over-writes itself 4041 */ 4042 sector_t here_new, here_old; 4043 int old_disks; 4044 int max_degraded = (mddev->level == 5 ? 1 : 2); 4045 4046 if (mddev->new_level != mddev->level || 4047 mddev->new_layout != mddev->layout || 4048 mddev->new_chunk != mddev->chunk_size) { 4049 printk(KERN_ERR "raid5: %s: unsupported reshape " 4050 "required - aborting.\n", 4051 mdname(mddev)); 4052 return -EINVAL; 4053 } 4054 if (mddev->delta_disks <= 0) { 4055 printk(KERN_ERR "raid5: %s: unsupported reshape " 4056 "(reduce disks) required - aborting.\n", 4057 mdname(mddev)); 4058 return -EINVAL; 4059 } 4060 old_disks = mddev->raid_disks - mddev->delta_disks; 4061 /* reshape_position must be on a new-stripe boundary, and one 4062 * further up in new geometry must map after here in old 4063 * geometry. 4064 */ 4065 here_new = mddev->reshape_position; 4066 if (sector_div(here_new, (mddev->chunk_size>>9)* 4067 (mddev->raid_disks - max_degraded))) { 4068 printk(KERN_ERR "raid5: reshape_position not " 4069 "on a stripe boundary\n"); 4070 return -EINVAL; 4071 } 4072 /* here_new is the stripe we will write to */ 4073 here_old = mddev->reshape_position; 4074 sector_div(here_old, (mddev->chunk_size>>9)* 4075 (old_disks-max_degraded)); 4076 /* here_old is the first stripe that we might need to read 4077 * from */ 4078 if (here_new >= here_old) { 4079 /* Reading from the same stripe as writing to - bad */ 4080 printk(KERN_ERR "raid5: reshape_position too early for " 4081 "auto-recovery - aborting.\n"); 4082 return -EINVAL; 4083 } 4084 printk(KERN_INFO "raid5: reshape will continue\n"); 4085 /* OK, we should be able to continue; */ 4086 } 4087 4088 4089 mddev->private = kzalloc(sizeof (raid5_conf_t), GFP_KERNEL); 4090 if ((conf = mddev->private) == NULL) 4091 goto abort; 4092 if (mddev->reshape_position == MaxSector) { 4093 conf->previous_raid_disks = conf->raid_disks = mddev->raid_disks; 4094 } else { 4095 conf->raid_disks = mddev->raid_disks; 4096 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks; 4097 } 4098 4099 conf->disks = kzalloc(conf->raid_disks * sizeof(struct disk_info), 4100 GFP_KERNEL); 4101 if (!conf->disks) 4102 goto abort; 4103 4104 conf->mddev = mddev; 4105 4106 if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) 4107 goto abort; 4108 4109 if (mddev->level == 6) { 4110 conf->spare_page = alloc_page(GFP_KERNEL); 4111 if (!conf->spare_page) 4112 goto abort; 4113 } 4114 spin_lock_init(&conf->device_lock); 4115 init_waitqueue_head(&conf->wait_for_stripe); 4116 init_waitqueue_head(&conf->wait_for_overlap); 4117 INIT_LIST_HEAD(&conf->handle_list); 4118 INIT_LIST_HEAD(&conf->delayed_list); 4119 INIT_LIST_HEAD(&conf->bitmap_list); 4120 INIT_LIST_HEAD(&conf->inactive_list); 4121 atomic_set(&conf->active_stripes, 0); 4122 atomic_set(&conf->preread_active_stripes, 0); 4123 atomic_set(&conf->active_aligned_reads, 0); 4124 4125 pr_debug("raid5: run(%s) called.\n", mdname(mddev)); 4126 4127 ITERATE_RDEV(mddev,rdev,tmp) { 4128 raid_disk = rdev->raid_disk; 4129 if (raid_disk >= conf->raid_disks 4130 || raid_disk < 0) 4131 continue; 4132 disk = conf->disks + raid_disk; 4133 4134 disk->rdev = rdev; 4135 4136 if (test_bit(In_sync, &rdev->flags)) { 4137 char b[BDEVNAME_SIZE]; 4138 printk(KERN_INFO "raid5: device %s operational as raid" 4139 " disk %d\n", bdevname(rdev->bdev,b), 4140 raid_disk); 4141 working_disks++; 4142 } 4143 } 4144 4145 /* 4146 * 0 for a fully functional array, 1 or 2 for a degraded array. 4147 */ 4148 mddev->degraded = conf->raid_disks - working_disks; 4149 conf->mddev = mddev; 4150 conf->chunk_size = mddev->chunk_size; 4151 conf->level = mddev->level; 4152 if (conf->level == 6) 4153 conf->max_degraded = 2; 4154 else 4155 conf->max_degraded = 1; 4156 conf->algorithm = mddev->layout; 4157 conf->max_nr_stripes = NR_STRIPES; 4158 conf->expand_progress = mddev->reshape_position; 4159 4160 /* device size must be a multiple of chunk size */ 4161 mddev->size &= ~(mddev->chunk_size/1024 -1); 4162 mddev->resync_max_sectors = mddev->size << 1; 4163 4164 if (conf->level == 6 && conf->raid_disks < 4) { 4165 printk(KERN_ERR "raid6: not enough configured devices for %s (%d, minimum 4)\n", 4166 mdname(mddev), conf->raid_disks); 4167 goto abort; 4168 } 4169 if (!conf->chunk_size || conf->chunk_size % 4) { 4170 printk(KERN_ERR "raid5: invalid chunk size %d for %s\n", 4171 conf->chunk_size, mdname(mddev)); 4172 goto abort; 4173 } 4174 if (conf->algorithm > ALGORITHM_RIGHT_SYMMETRIC) { 4175 printk(KERN_ERR 4176 "raid5: unsupported parity algorithm %d for %s\n", 4177 conf->algorithm, mdname(mddev)); 4178 goto abort; 4179 } 4180 if (mddev->degraded > conf->max_degraded) { 4181 printk(KERN_ERR "raid5: not enough operational devices for %s" 4182 " (%d/%d failed)\n", 4183 mdname(mddev), mddev->degraded, conf->raid_disks); 4184 goto abort; 4185 } 4186 4187 if (mddev->degraded > 0 && 4188 mddev->recovery_cp != MaxSector) { 4189 if (mddev->ok_start_degraded) 4190 printk(KERN_WARNING 4191 "raid5: starting dirty degraded array: %s" 4192 "- data corruption possible.\n", 4193 mdname(mddev)); 4194 else { 4195 printk(KERN_ERR 4196 "raid5: cannot start dirty degraded array for %s\n", 4197 mdname(mddev)); 4198 goto abort; 4199 } 4200 } 4201 4202 { 4203 mddev->thread = md_register_thread(raid5d, mddev, "%s_raid5"); 4204 if (!mddev->thread) { 4205 printk(KERN_ERR 4206 "raid5: couldn't allocate thread for %s\n", 4207 mdname(mddev)); 4208 goto abort; 4209 } 4210 } 4211 memory = conf->max_nr_stripes * (sizeof(struct stripe_head) + 4212 conf->raid_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024; 4213 if (grow_stripes(conf, conf->max_nr_stripes)) { 4214 printk(KERN_ERR 4215 "raid5: couldn't allocate %dkB for buffers\n", memory); 4216 shrink_stripes(conf); 4217 md_unregister_thread(mddev->thread); 4218 goto abort; 4219 } else 4220 printk(KERN_INFO "raid5: allocated %dkB for %s\n", 4221 memory, mdname(mddev)); 4222 4223 if (mddev->degraded == 0) 4224 printk("raid5: raid level %d set %s active with %d out of %d" 4225 " devices, algorithm %d\n", conf->level, mdname(mddev), 4226 mddev->raid_disks-mddev->degraded, mddev->raid_disks, 4227 conf->algorithm); 4228 else 4229 printk(KERN_ALERT "raid5: raid level %d set %s active with %d" 4230 " out of %d devices, algorithm %d\n", conf->level, 4231 mdname(mddev), mddev->raid_disks - mddev->degraded, 4232 mddev->raid_disks, conf->algorithm); 4233 4234 print_raid5_conf(conf); 4235 4236 if (conf->expand_progress != MaxSector) { 4237 printk("...ok start reshape thread\n"); 4238 conf->expand_lo = conf->expand_progress; 4239 atomic_set(&conf->reshape_stripes, 0); 4240 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 4241 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 4242 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 4243 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 4244 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 4245 "%s_reshape"); 4246 } 4247 4248 /* read-ahead size must cover two whole stripes, which is 4249 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices 4250 */ 4251 { 4252 int data_disks = conf->previous_raid_disks - conf->max_degraded; 4253 int stripe = data_disks * 4254 (mddev->chunk_size / PAGE_SIZE); 4255 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 4256 mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 4257 } 4258 4259 /* Ok, everything is just fine now */ 4260 if (sysfs_create_group(&mddev->kobj, &raid5_attrs_group)) 4261 printk(KERN_WARNING 4262 "raid5: failed to create sysfs attributes for %s\n", 4263 mdname(mddev)); 4264 4265 mddev->queue->unplug_fn = raid5_unplug_device; 4266 mddev->queue->issue_flush_fn = raid5_issue_flush; 4267 mddev->queue->backing_dev_info.congested_data = mddev; 4268 mddev->queue->backing_dev_info.congested_fn = raid5_congested; 4269 4270 mddev->array_size = mddev->size * (conf->previous_raid_disks - 4271 conf->max_degraded); 4272 4273 blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec); 4274 4275 return 0; 4276 abort: 4277 if (conf) { 4278 print_raid5_conf(conf); 4279 safe_put_page(conf->spare_page); 4280 kfree(conf->disks); 4281 kfree(conf->stripe_hashtbl); 4282 kfree(conf); 4283 } 4284 mddev->private = NULL; 4285 printk(KERN_ALERT "raid5: failed to run raid set %s\n", mdname(mddev)); 4286 return -EIO; 4287 } 4288 4289 4290 4291 static int stop(mddev_t *mddev) 4292 { 4293 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 4294 4295 md_unregister_thread(mddev->thread); 4296 mddev->thread = NULL; 4297 shrink_stripes(conf); 4298 kfree(conf->stripe_hashtbl); 4299 mddev->queue->backing_dev_info.congested_fn = NULL; 4300 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 4301 sysfs_remove_group(&mddev->kobj, &raid5_attrs_group); 4302 kfree(conf->disks); 4303 kfree(conf); 4304 mddev->private = NULL; 4305 return 0; 4306 } 4307 4308 #ifdef DEBUG 4309 static void print_sh (struct seq_file *seq, struct stripe_head *sh) 4310 { 4311 int i; 4312 4313 seq_printf(seq, "sh %llu, pd_idx %d, state %ld.\n", 4314 (unsigned long long)sh->sector, sh->pd_idx, sh->state); 4315 seq_printf(seq, "sh %llu, count %d.\n", 4316 (unsigned long long)sh->sector, atomic_read(&sh->count)); 4317 seq_printf(seq, "sh %llu, ", (unsigned long long)sh->sector); 4318 for (i = 0; i < sh->disks; i++) { 4319 seq_printf(seq, "(cache%d: %p %ld) ", 4320 i, sh->dev[i].page, sh->dev[i].flags); 4321 } 4322 seq_printf(seq, "\n"); 4323 } 4324 4325 static void printall (struct seq_file *seq, raid5_conf_t *conf) 4326 { 4327 struct stripe_head *sh; 4328 struct hlist_node *hn; 4329 int i; 4330 4331 spin_lock_irq(&conf->device_lock); 4332 for (i = 0; i < NR_HASH; i++) { 4333 hlist_for_each_entry(sh, hn, &conf->stripe_hashtbl[i], hash) { 4334 if (sh->raid_conf != conf) 4335 continue; 4336 print_sh(seq, sh); 4337 } 4338 } 4339 spin_unlock_irq(&conf->device_lock); 4340 } 4341 #endif 4342 4343 static void status (struct seq_file *seq, mddev_t *mddev) 4344 { 4345 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 4346 int i; 4347 4348 seq_printf (seq, " level %d, %dk chunk, algorithm %d", mddev->level, mddev->chunk_size >> 10, mddev->layout); 4349 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded); 4350 for (i = 0; i < conf->raid_disks; i++) 4351 seq_printf (seq, "%s", 4352 conf->disks[i].rdev && 4353 test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_"); 4354 seq_printf (seq, "]"); 4355 #ifdef DEBUG 4356 seq_printf (seq, "\n"); 4357 printall(seq, conf); 4358 #endif 4359 } 4360 4361 static void print_raid5_conf (raid5_conf_t *conf) 4362 { 4363 int i; 4364 struct disk_info *tmp; 4365 4366 printk("RAID5 conf printout:\n"); 4367 if (!conf) { 4368 printk("(conf==NULL)\n"); 4369 return; 4370 } 4371 printk(" --- rd:%d wd:%d\n", conf->raid_disks, 4372 conf->raid_disks - conf->mddev->degraded); 4373 4374 for (i = 0; i < conf->raid_disks; i++) { 4375 char b[BDEVNAME_SIZE]; 4376 tmp = conf->disks + i; 4377 if (tmp->rdev) 4378 printk(" disk %d, o:%d, dev:%s\n", 4379 i, !test_bit(Faulty, &tmp->rdev->flags), 4380 bdevname(tmp->rdev->bdev,b)); 4381 } 4382 } 4383 4384 static int raid5_spare_active(mddev_t *mddev) 4385 { 4386 int i; 4387 raid5_conf_t *conf = mddev->private; 4388 struct disk_info *tmp; 4389 4390 for (i = 0; i < conf->raid_disks; i++) { 4391 tmp = conf->disks + i; 4392 if (tmp->rdev 4393 && !test_bit(Faulty, &tmp->rdev->flags) 4394 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) { 4395 unsigned long flags; 4396 spin_lock_irqsave(&conf->device_lock, flags); 4397 mddev->degraded--; 4398 spin_unlock_irqrestore(&conf->device_lock, flags); 4399 } 4400 } 4401 print_raid5_conf(conf); 4402 return 0; 4403 } 4404 4405 static int raid5_remove_disk(mddev_t *mddev, int number) 4406 { 4407 raid5_conf_t *conf = mddev->private; 4408 int err = 0; 4409 mdk_rdev_t *rdev; 4410 struct disk_info *p = conf->disks + number; 4411 4412 print_raid5_conf(conf); 4413 rdev = p->rdev; 4414 if (rdev) { 4415 if (test_bit(In_sync, &rdev->flags) || 4416 atomic_read(&rdev->nr_pending)) { 4417 err = -EBUSY; 4418 goto abort; 4419 } 4420 p->rdev = NULL; 4421 synchronize_rcu(); 4422 if (atomic_read(&rdev->nr_pending)) { 4423 /* lost the race, try later */ 4424 err = -EBUSY; 4425 p->rdev = rdev; 4426 } 4427 } 4428 abort: 4429 4430 print_raid5_conf(conf); 4431 return err; 4432 } 4433 4434 static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) 4435 { 4436 raid5_conf_t *conf = mddev->private; 4437 int found = 0; 4438 int disk; 4439 struct disk_info *p; 4440 4441 if (mddev->degraded > conf->max_degraded) 4442 /* no point adding a device */ 4443 return 0; 4444 4445 /* 4446 * find the disk ... but prefer rdev->saved_raid_disk 4447 * if possible. 4448 */ 4449 if (rdev->saved_raid_disk >= 0 && 4450 conf->disks[rdev->saved_raid_disk].rdev == NULL) 4451 disk = rdev->saved_raid_disk; 4452 else 4453 disk = 0; 4454 for ( ; disk < conf->raid_disks; disk++) 4455 if ((p=conf->disks + disk)->rdev == NULL) { 4456 clear_bit(In_sync, &rdev->flags); 4457 rdev->raid_disk = disk; 4458 found = 1; 4459 if (rdev->saved_raid_disk != disk) 4460 conf->fullsync = 1; 4461 rcu_assign_pointer(p->rdev, rdev); 4462 break; 4463 } 4464 print_raid5_conf(conf); 4465 return found; 4466 } 4467 4468 static int raid5_resize(mddev_t *mddev, sector_t sectors) 4469 { 4470 /* no resync is happening, and there is enough space 4471 * on all devices, so we can resize. 4472 * We need to make sure resync covers any new space. 4473 * If the array is shrinking we should possibly wait until 4474 * any io in the removed space completes, but it hardly seems 4475 * worth it. 4476 */ 4477 raid5_conf_t *conf = mddev_to_conf(mddev); 4478 4479 sectors &= ~((sector_t)mddev->chunk_size/512 - 1); 4480 mddev->array_size = (sectors * (mddev->raid_disks-conf->max_degraded))>>1; 4481 set_capacity(mddev->gendisk, mddev->array_size << 1); 4482 mddev->changed = 1; 4483 if (sectors/2 > mddev->size && mddev->recovery_cp == MaxSector) { 4484 mddev->recovery_cp = mddev->size << 1; 4485 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4486 } 4487 mddev->size = sectors /2; 4488 mddev->resync_max_sectors = sectors; 4489 return 0; 4490 } 4491 4492 #ifdef CONFIG_MD_RAID5_RESHAPE 4493 static int raid5_check_reshape(mddev_t *mddev) 4494 { 4495 raid5_conf_t *conf = mddev_to_conf(mddev); 4496 int err; 4497 4498 if (mddev->delta_disks < 0 || 4499 mddev->new_level != mddev->level) 4500 return -EINVAL; /* Cannot shrink array or change level yet */ 4501 if (mddev->delta_disks == 0) 4502 return 0; /* nothing to do */ 4503 4504 /* Can only proceed if there are plenty of stripe_heads. 4505 * We need a minimum of one full stripe,, and for sensible progress 4506 * it is best to have about 4 times that. 4507 * If we require 4 times, then the default 256 4K stripe_heads will 4508 * allow for chunk sizes up to 256K, which is probably OK. 4509 * If the chunk size is greater, user-space should request more 4510 * stripe_heads first. 4511 */ 4512 if ((mddev->chunk_size / STRIPE_SIZE) * 4 > conf->max_nr_stripes || 4513 (mddev->new_chunk / STRIPE_SIZE) * 4 > conf->max_nr_stripes) { 4514 printk(KERN_WARNING "raid5: reshape: not enough stripes. Needed %lu\n", 4515 (mddev->chunk_size / STRIPE_SIZE)*4); 4516 return -ENOSPC; 4517 } 4518 4519 err = resize_stripes(conf, conf->raid_disks + mddev->delta_disks); 4520 if (err) 4521 return err; 4522 4523 if (mddev->degraded > conf->max_degraded) 4524 return -EINVAL; 4525 /* looks like we might be able to manage this */ 4526 return 0; 4527 } 4528 4529 static int raid5_start_reshape(mddev_t *mddev) 4530 { 4531 raid5_conf_t *conf = mddev_to_conf(mddev); 4532 mdk_rdev_t *rdev; 4533 struct list_head *rtmp; 4534 int spares = 0; 4535 int added_devices = 0; 4536 unsigned long flags; 4537 4538 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4539 return -EBUSY; 4540 4541 ITERATE_RDEV(mddev, rdev, rtmp) 4542 if (rdev->raid_disk < 0 && 4543 !test_bit(Faulty, &rdev->flags)) 4544 spares++; 4545 4546 if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded) 4547 /* Not enough devices even to make a degraded array 4548 * of that size 4549 */ 4550 return -EINVAL; 4551 4552 atomic_set(&conf->reshape_stripes, 0); 4553 spin_lock_irq(&conf->device_lock); 4554 conf->previous_raid_disks = conf->raid_disks; 4555 conf->raid_disks += mddev->delta_disks; 4556 conf->expand_progress = 0; 4557 conf->expand_lo = 0; 4558 spin_unlock_irq(&conf->device_lock); 4559 4560 /* Add some new drives, as many as will fit. 4561 * We know there are enough to make the newly sized array work. 4562 */ 4563 ITERATE_RDEV(mddev, rdev, rtmp) 4564 if (rdev->raid_disk < 0 && 4565 !test_bit(Faulty, &rdev->flags)) { 4566 if (raid5_add_disk(mddev, rdev)) { 4567 char nm[20]; 4568 set_bit(In_sync, &rdev->flags); 4569 added_devices++; 4570 rdev->recovery_offset = 0; 4571 sprintf(nm, "rd%d", rdev->raid_disk); 4572 if (sysfs_create_link(&mddev->kobj, 4573 &rdev->kobj, nm)) 4574 printk(KERN_WARNING 4575 "raid5: failed to create " 4576 " link %s for %s\n", 4577 nm, mdname(mddev)); 4578 } else 4579 break; 4580 } 4581 4582 spin_lock_irqsave(&conf->device_lock, flags); 4583 mddev->degraded = (conf->raid_disks - conf->previous_raid_disks) - added_devices; 4584 spin_unlock_irqrestore(&conf->device_lock, flags); 4585 mddev->raid_disks = conf->raid_disks; 4586 mddev->reshape_position = 0; 4587 set_bit(MD_CHANGE_DEVS, &mddev->flags); 4588 4589 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 4590 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 4591 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 4592 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 4593 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 4594 "%s_reshape"); 4595 if (!mddev->sync_thread) { 4596 mddev->recovery = 0; 4597 spin_lock_irq(&conf->device_lock); 4598 mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks; 4599 conf->expand_progress = MaxSector; 4600 spin_unlock_irq(&conf->device_lock); 4601 return -EAGAIN; 4602 } 4603 md_wakeup_thread(mddev->sync_thread); 4604 md_new_event(mddev); 4605 return 0; 4606 } 4607 #endif 4608 4609 static void end_reshape(raid5_conf_t *conf) 4610 { 4611 struct block_device *bdev; 4612 4613 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { 4614 conf->mddev->array_size = conf->mddev->size * 4615 (conf->raid_disks - conf->max_degraded); 4616 set_capacity(conf->mddev->gendisk, conf->mddev->array_size << 1); 4617 conf->mddev->changed = 1; 4618 4619 bdev = bdget_disk(conf->mddev->gendisk, 0); 4620 if (bdev) { 4621 mutex_lock(&bdev->bd_inode->i_mutex); 4622 i_size_write(bdev->bd_inode, (loff_t)conf->mddev->array_size << 10); 4623 mutex_unlock(&bdev->bd_inode->i_mutex); 4624 bdput(bdev); 4625 } 4626 spin_lock_irq(&conf->device_lock); 4627 conf->expand_progress = MaxSector; 4628 spin_unlock_irq(&conf->device_lock); 4629 conf->mddev->reshape_position = MaxSector; 4630 4631 /* read-ahead size must cover two whole stripes, which is 4632 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices 4633 */ 4634 { 4635 int data_disks = conf->previous_raid_disks - conf->max_degraded; 4636 int stripe = data_disks * 4637 (conf->mddev->chunk_size / PAGE_SIZE); 4638 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 4639 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 4640 } 4641 } 4642 } 4643 4644 static void raid5_quiesce(mddev_t *mddev, int state) 4645 { 4646 raid5_conf_t *conf = mddev_to_conf(mddev); 4647 4648 switch(state) { 4649 case 2: /* resume for a suspend */ 4650 wake_up(&conf->wait_for_overlap); 4651 break; 4652 4653 case 1: /* stop all writes */ 4654 spin_lock_irq(&conf->device_lock); 4655 conf->quiesce = 1; 4656 wait_event_lock_irq(conf->wait_for_stripe, 4657 atomic_read(&conf->active_stripes) == 0 && 4658 atomic_read(&conf->active_aligned_reads) == 0, 4659 conf->device_lock, /* nothing */); 4660 spin_unlock_irq(&conf->device_lock); 4661 break; 4662 4663 case 0: /* re-enable writes */ 4664 spin_lock_irq(&conf->device_lock); 4665 conf->quiesce = 0; 4666 wake_up(&conf->wait_for_stripe); 4667 wake_up(&conf->wait_for_overlap); 4668 spin_unlock_irq(&conf->device_lock); 4669 break; 4670 } 4671 } 4672 4673 static struct mdk_personality raid6_personality = 4674 { 4675 .name = "raid6", 4676 .level = 6, 4677 .owner = THIS_MODULE, 4678 .make_request = make_request, 4679 .run = run, 4680 .stop = stop, 4681 .status = status, 4682 .error_handler = error, 4683 .hot_add_disk = raid5_add_disk, 4684 .hot_remove_disk= raid5_remove_disk, 4685 .spare_active = raid5_spare_active, 4686 .sync_request = sync_request, 4687 .resize = raid5_resize, 4688 #ifdef CONFIG_MD_RAID5_RESHAPE 4689 .check_reshape = raid5_check_reshape, 4690 .start_reshape = raid5_start_reshape, 4691 #endif 4692 .quiesce = raid5_quiesce, 4693 }; 4694 static struct mdk_personality raid5_personality = 4695 { 4696 .name = "raid5", 4697 .level = 5, 4698 .owner = THIS_MODULE, 4699 .make_request = make_request, 4700 .run = run, 4701 .stop = stop, 4702 .status = status, 4703 .error_handler = error, 4704 .hot_add_disk = raid5_add_disk, 4705 .hot_remove_disk= raid5_remove_disk, 4706 .spare_active = raid5_spare_active, 4707 .sync_request = sync_request, 4708 .resize = raid5_resize, 4709 #ifdef CONFIG_MD_RAID5_RESHAPE 4710 .check_reshape = raid5_check_reshape, 4711 .start_reshape = raid5_start_reshape, 4712 #endif 4713 .quiesce = raid5_quiesce, 4714 }; 4715 4716 static struct mdk_personality raid4_personality = 4717 { 4718 .name = "raid4", 4719 .level = 4, 4720 .owner = THIS_MODULE, 4721 .make_request = make_request, 4722 .run = run, 4723 .stop = stop, 4724 .status = status, 4725 .error_handler = error, 4726 .hot_add_disk = raid5_add_disk, 4727 .hot_remove_disk= raid5_remove_disk, 4728 .spare_active = raid5_spare_active, 4729 .sync_request = sync_request, 4730 .resize = raid5_resize, 4731 #ifdef CONFIG_MD_RAID5_RESHAPE 4732 .check_reshape = raid5_check_reshape, 4733 .start_reshape = raid5_start_reshape, 4734 #endif 4735 .quiesce = raid5_quiesce, 4736 }; 4737 4738 static int __init raid5_init(void) 4739 { 4740 int e; 4741 4742 e = raid6_select_algo(); 4743 if ( e ) 4744 return e; 4745 register_md_personality(&raid6_personality); 4746 register_md_personality(&raid5_personality); 4747 register_md_personality(&raid4_personality); 4748 return 0; 4749 } 4750 4751 static void raid5_exit(void) 4752 { 4753 unregister_md_personality(&raid6_personality); 4754 unregister_md_personality(&raid5_personality); 4755 unregister_md_personality(&raid4_personality); 4756 } 4757 4758 module_init(raid5_init); 4759 module_exit(raid5_exit); 4760 MODULE_LICENSE("GPL"); 4761 MODULE_ALIAS("md-personality-4"); /* RAID5 */ 4762 MODULE_ALIAS("md-raid5"); 4763 MODULE_ALIAS("md-raid4"); 4764 MODULE_ALIAS("md-level-5"); 4765 MODULE_ALIAS("md-level-4"); 4766 MODULE_ALIAS("md-personality-8"); /* RAID6 */ 4767 MODULE_ALIAS("md-raid6"); 4768 MODULE_ALIAS("md-level-6"); 4769 4770 /* This used to be two separate modules, they were: */ 4771 MODULE_ALIAS("raid5"); 4772 MODULE_ALIAS("raid6"); 4773