1 /* 2 * raid5.c : Multiple Devices driver for Linux 3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman 4 * Copyright (C) 1999, 2000 Ingo Molnar 5 * Copyright (C) 2002, 2003 H. Peter Anvin 6 * 7 * RAID-4/5/6 management functions. 8 * Thanks to Penguin Computing for making the RAID-6 development possible 9 * by donating a test server! 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2, or (at your option) 14 * any later version. 15 * 16 * You should have received a copy of the GNU General Public License 17 * (for example /usr/src/linux/COPYING); if not, write to the Free 18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 */ 20 21 /* 22 * BITMAP UNPLUGGING: 23 * 24 * The sequencing for updating the bitmap reliably is a little 25 * subtle (and I got it wrong the first time) so it deserves some 26 * explanation. 27 * 28 * We group bitmap updates into batches. Each batch has a number. 29 * We may write out several batches at once, but that isn't very important. 30 * conf->bm_write is the number of the last batch successfully written. 31 * conf->bm_flush is the number of the last batch that was closed to 32 * new additions. 33 * When we discover that we will need to write to any block in a stripe 34 * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq 35 * the number of the batch it will be in. This is bm_flush+1. 36 * When we are ready to do a write, if that batch hasn't been written yet, 37 * we plug the array and queue the stripe for later. 38 * When an unplug happens, we increment bm_flush, thus closing the current 39 * batch. 40 * When we notice that bm_flush > bm_write, we write out all pending updates 41 * to the bitmap, and advance bm_write to where bm_flush was. 42 * This may occasionally write a bit out twice, but is sure never to 43 * miss any bits. 44 */ 45 46 #include <linux/module.h> 47 #include <linux/slab.h> 48 #include <linux/highmem.h> 49 #include <linux/bitops.h> 50 #include <linux/kthread.h> 51 #include <asm/atomic.h> 52 #include "raid6.h" 53 54 #include <linux/raid/bitmap.h> 55 #include <linux/async_tx.h> 56 57 /* 58 * Stripe cache 59 */ 60 61 #define NR_STRIPES 256 62 #define STRIPE_SIZE PAGE_SIZE 63 #define STRIPE_SHIFT (PAGE_SHIFT - 9) 64 #define STRIPE_SECTORS (STRIPE_SIZE>>9) 65 #define IO_THRESHOLD 1 66 #define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head)) 67 #define HASH_MASK (NR_HASH - 1) 68 69 #define stripe_hash(conf, sect) (&((conf)->stripe_hashtbl[((sect) >> STRIPE_SHIFT) & HASH_MASK])) 70 71 /* bio's attached to a stripe+device for I/O are linked together in bi_sector 72 * order without overlap. There may be several bio's per stripe+device, and 73 * a bio could span several devices. 74 * When walking this list for a particular stripe+device, we must never proceed 75 * beyond a bio that extends past this device, as the next bio might no longer 76 * be valid. 77 * This macro is used to determine the 'next' bio in the list, given the sector 78 * of the current stripe+device 79 */ 80 #define r5_next_bio(bio, sect) ( ( (bio)->bi_sector + ((bio)->bi_size>>9) < sect + STRIPE_SECTORS) ? (bio)->bi_next : NULL) 81 /* 82 * The following can be used to debug the driver 83 */ 84 #define RAID5_PARANOIA 1 85 #if RAID5_PARANOIA && defined(CONFIG_SMP) 86 # define CHECK_DEVLOCK() assert_spin_locked(&conf->device_lock) 87 #else 88 # define CHECK_DEVLOCK() 89 #endif 90 91 #ifdef DEBUG 92 #define inline 93 #define __inline__ 94 #endif 95 96 #if !RAID6_USE_EMPTY_ZERO_PAGE 97 /* In .bss so it's zeroed */ 98 const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256))); 99 #endif 100 101 static inline int raid6_next_disk(int disk, int raid_disks) 102 { 103 disk++; 104 return (disk < raid_disks) ? disk : 0; 105 } 106 107 static void return_io(struct bio *return_bi) 108 { 109 struct bio *bi = return_bi; 110 while (bi) { 111 int bytes = bi->bi_size; 112 113 return_bi = bi->bi_next; 114 bi->bi_next = NULL; 115 bi->bi_size = 0; 116 bi->bi_end_io(bi, bytes, 117 test_bit(BIO_UPTODATE, &bi->bi_flags) 118 ? 0 : -EIO); 119 bi = return_bi; 120 } 121 } 122 123 static void print_raid5_conf (raid5_conf_t *conf); 124 125 static void __release_stripe(raid5_conf_t *conf, struct stripe_head *sh) 126 { 127 if (atomic_dec_and_test(&sh->count)) { 128 BUG_ON(!list_empty(&sh->lru)); 129 BUG_ON(atomic_read(&conf->active_stripes)==0); 130 if (test_bit(STRIPE_HANDLE, &sh->state)) { 131 if (test_bit(STRIPE_DELAYED, &sh->state)) { 132 list_add_tail(&sh->lru, &conf->delayed_list); 133 blk_plug_device(conf->mddev->queue); 134 } else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && 135 sh->bm_seq - conf->seq_write > 0) { 136 list_add_tail(&sh->lru, &conf->bitmap_list); 137 blk_plug_device(conf->mddev->queue); 138 } else { 139 clear_bit(STRIPE_BIT_DELAY, &sh->state); 140 list_add_tail(&sh->lru, &conf->handle_list); 141 } 142 md_wakeup_thread(conf->mddev->thread); 143 } else { 144 BUG_ON(sh->ops.pending); 145 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 146 atomic_dec(&conf->preread_active_stripes); 147 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) 148 md_wakeup_thread(conf->mddev->thread); 149 } 150 atomic_dec(&conf->active_stripes); 151 if (!test_bit(STRIPE_EXPANDING, &sh->state)) { 152 list_add_tail(&sh->lru, &conf->inactive_list); 153 wake_up(&conf->wait_for_stripe); 154 if (conf->retry_read_aligned) 155 md_wakeup_thread(conf->mddev->thread); 156 } 157 } 158 } 159 } 160 static void release_stripe(struct stripe_head *sh) 161 { 162 raid5_conf_t *conf = sh->raid_conf; 163 unsigned long flags; 164 165 spin_lock_irqsave(&conf->device_lock, flags); 166 __release_stripe(conf, sh); 167 spin_unlock_irqrestore(&conf->device_lock, flags); 168 } 169 170 static inline void remove_hash(struct stripe_head *sh) 171 { 172 pr_debug("remove_hash(), stripe %llu\n", 173 (unsigned long long)sh->sector); 174 175 hlist_del_init(&sh->hash); 176 } 177 178 static inline void insert_hash(raid5_conf_t *conf, struct stripe_head *sh) 179 { 180 struct hlist_head *hp = stripe_hash(conf, sh->sector); 181 182 pr_debug("insert_hash(), stripe %llu\n", 183 (unsigned long long)sh->sector); 184 185 CHECK_DEVLOCK(); 186 hlist_add_head(&sh->hash, hp); 187 } 188 189 190 /* find an idle stripe, make sure it is unhashed, and return it. */ 191 static struct stripe_head *get_free_stripe(raid5_conf_t *conf) 192 { 193 struct stripe_head *sh = NULL; 194 struct list_head *first; 195 196 CHECK_DEVLOCK(); 197 if (list_empty(&conf->inactive_list)) 198 goto out; 199 first = conf->inactive_list.next; 200 sh = list_entry(first, struct stripe_head, lru); 201 list_del_init(first); 202 remove_hash(sh); 203 atomic_inc(&conf->active_stripes); 204 out: 205 return sh; 206 } 207 208 static void shrink_buffers(struct stripe_head *sh, int num) 209 { 210 struct page *p; 211 int i; 212 213 for (i=0; i<num ; i++) { 214 p = sh->dev[i].page; 215 if (!p) 216 continue; 217 sh->dev[i].page = NULL; 218 put_page(p); 219 } 220 } 221 222 static int grow_buffers(struct stripe_head *sh, int num) 223 { 224 int i; 225 226 for (i=0; i<num; i++) { 227 struct page *page; 228 229 if (!(page = alloc_page(GFP_KERNEL))) { 230 return 1; 231 } 232 sh->dev[i].page = page; 233 } 234 return 0; 235 } 236 237 static void raid5_build_block (struct stripe_head *sh, int i); 238 239 static void init_stripe(struct stripe_head *sh, sector_t sector, int pd_idx, int disks) 240 { 241 raid5_conf_t *conf = sh->raid_conf; 242 int i; 243 244 BUG_ON(atomic_read(&sh->count) != 0); 245 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); 246 BUG_ON(sh->ops.pending || sh->ops.ack || sh->ops.complete); 247 248 CHECK_DEVLOCK(); 249 pr_debug("init_stripe called, stripe %llu\n", 250 (unsigned long long)sh->sector); 251 252 remove_hash(sh); 253 254 sh->sector = sector; 255 sh->pd_idx = pd_idx; 256 sh->state = 0; 257 258 sh->disks = disks; 259 260 for (i = sh->disks; i--; ) { 261 struct r5dev *dev = &sh->dev[i]; 262 263 if (dev->toread || dev->read || dev->towrite || dev->written || 264 test_bit(R5_LOCKED, &dev->flags)) { 265 printk(KERN_ERR "sector=%llx i=%d %p %p %p %p %d\n", 266 (unsigned long long)sh->sector, i, dev->toread, 267 dev->read, dev->towrite, dev->written, 268 test_bit(R5_LOCKED, &dev->flags)); 269 BUG(); 270 } 271 dev->flags = 0; 272 raid5_build_block(sh, i); 273 } 274 insert_hash(conf, sh); 275 } 276 277 static struct stripe_head *__find_stripe(raid5_conf_t *conf, sector_t sector, int disks) 278 { 279 struct stripe_head *sh; 280 struct hlist_node *hn; 281 282 CHECK_DEVLOCK(); 283 pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector); 284 hlist_for_each_entry(sh, hn, stripe_hash(conf, sector), hash) 285 if (sh->sector == sector && sh->disks == disks) 286 return sh; 287 pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector); 288 return NULL; 289 } 290 291 static void unplug_slaves(mddev_t *mddev); 292 static void raid5_unplug_device(request_queue_t *q); 293 294 static struct stripe_head *get_active_stripe(raid5_conf_t *conf, sector_t sector, int disks, 295 int pd_idx, int noblock) 296 { 297 struct stripe_head *sh; 298 299 pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector); 300 301 spin_lock_irq(&conf->device_lock); 302 303 do { 304 wait_event_lock_irq(conf->wait_for_stripe, 305 conf->quiesce == 0, 306 conf->device_lock, /* nothing */); 307 sh = __find_stripe(conf, sector, disks); 308 if (!sh) { 309 if (!conf->inactive_blocked) 310 sh = get_free_stripe(conf); 311 if (noblock && sh == NULL) 312 break; 313 if (!sh) { 314 conf->inactive_blocked = 1; 315 wait_event_lock_irq(conf->wait_for_stripe, 316 !list_empty(&conf->inactive_list) && 317 (atomic_read(&conf->active_stripes) 318 < (conf->max_nr_stripes *3/4) 319 || !conf->inactive_blocked), 320 conf->device_lock, 321 raid5_unplug_device(conf->mddev->queue) 322 ); 323 conf->inactive_blocked = 0; 324 } else 325 init_stripe(sh, sector, pd_idx, disks); 326 } else { 327 if (atomic_read(&sh->count)) { 328 BUG_ON(!list_empty(&sh->lru)); 329 } else { 330 if (!test_bit(STRIPE_HANDLE, &sh->state)) 331 atomic_inc(&conf->active_stripes); 332 if (list_empty(&sh->lru) && 333 !test_bit(STRIPE_EXPANDING, &sh->state)) 334 BUG(); 335 list_del_init(&sh->lru); 336 } 337 } 338 } while (sh == NULL); 339 340 if (sh) 341 atomic_inc(&sh->count); 342 343 spin_unlock_irq(&conf->device_lock); 344 return sh; 345 } 346 347 /* test_and_ack_op() ensures that we only dequeue an operation once */ 348 #define test_and_ack_op(op, pend) \ 349 do { \ 350 if (test_bit(op, &sh->ops.pending) && \ 351 !test_bit(op, &sh->ops.complete)) { \ 352 if (test_and_set_bit(op, &sh->ops.ack)) \ 353 clear_bit(op, &pend); \ 354 else \ 355 ack++; \ 356 } else \ 357 clear_bit(op, &pend); \ 358 } while (0) 359 360 /* find new work to run, do not resubmit work that is already 361 * in flight 362 */ 363 static unsigned long get_stripe_work(struct stripe_head *sh) 364 { 365 unsigned long pending; 366 int ack = 0; 367 368 pending = sh->ops.pending; 369 370 test_and_ack_op(STRIPE_OP_BIOFILL, pending); 371 test_and_ack_op(STRIPE_OP_COMPUTE_BLK, pending); 372 test_and_ack_op(STRIPE_OP_PREXOR, pending); 373 test_and_ack_op(STRIPE_OP_BIODRAIN, pending); 374 test_and_ack_op(STRIPE_OP_POSTXOR, pending); 375 test_and_ack_op(STRIPE_OP_CHECK, pending); 376 if (test_and_clear_bit(STRIPE_OP_IO, &sh->ops.pending)) 377 ack++; 378 379 sh->ops.count -= ack; 380 BUG_ON(sh->ops.count < 0); 381 382 return pending; 383 } 384 385 static int 386 raid5_end_read_request(struct bio *bi, unsigned int bytes_done, int error); 387 static int 388 raid5_end_write_request (struct bio *bi, unsigned int bytes_done, int error); 389 390 static void ops_run_io(struct stripe_head *sh) 391 { 392 raid5_conf_t *conf = sh->raid_conf; 393 int i, disks = sh->disks; 394 395 might_sleep(); 396 397 for (i = disks; i--; ) { 398 int rw; 399 struct bio *bi; 400 mdk_rdev_t *rdev; 401 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) 402 rw = WRITE; 403 else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) 404 rw = READ; 405 else 406 continue; 407 408 bi = &sh->dev[i].req; 409 410 bi->bi_rw = rw; 411 if (rw == WRITE) 412 bi->bi_end_io = raid5_end_write_request; 413 else 414 bi->bi_end_io = raid5_end_read_request; 415 416 rcu_read_lock(); 417 rdev = rcu_dereference(conf->disks[i].rdev); 418 if (rdev && test_bit(Faulty, &rdev->flags)) 419 rdev = NULL; 420 if (rdev) 421 atomic_inc(&rdev->nr_pending); 422 rcu_read_unlock(); 423 424 if (rdev) { 425 if (test_bit(STRIPE_SYNCING, &sh->state) || 426 test_bit(STRIPE_EXPAND_SOURCE, &sh->state) || 427 test_bit(STRIPE_EXPAND_READY, &sh->state)) 428 md_sync_acct(rdev->bdev, STRIPE_SECTORS); 429 430 bi->bi_bdev = rdev->bdev; 431 pr_debug("%s: for %llu schedule op %ld on disc %d\n", 432 __FUNCTION__, (unsigned long long)sh->sector, 433 bi->bi_rw, i); 434 atomic_inc(&sh->count); 435 bi->bi_sector = sh->sector + rdev->data_offset; 436 bi->bi_flags = 1 << BIO_UPTODATE; 437 bi->bi_vcnt = 1; 438 bi->bi_max_vecs = 1; 439 bi->bi_idx = 0; 440 bi->bi_io_vec = &sh->dev[i].vec; 441 bi->bi_io_vec[0].bv_len = STRIPE_SIZE; 442 bi->bi_io_vec[0].bv_offset = 0; 443 bi->bi_size = STRIPE_SIZE; 444 bi->bi_next = NULL; 445 if (rw == WRITE && 446 test_bit(R5_ReWrite, &sh->dev[i].flags)) 447 atomic_add(STRIPE_SECTORS, 448 &rdev->corrected_errors); 449 generic_make_request(bi); 450 } else { 451 if (rw == WRITE) 452 set_bit(STRIPE_DEGRADED, &sh->state); 453 pr_debug("skip op %ld on disc %d for sector %llu\n", 454 bi->bi_rw, i, (unsigned long long)sh->sector); 455 clear_bit(R5_LOCKED, &sh->dev[i].flags); 456 set_bit(STRIPE_HANDLE, &sh->state); 457 } 458 } 459 } 460 461 static struct dma_async_tx_descriptor * 462 async_copy_data(int frombio, struct bio *bio, struct page *page, 463 sector_t sector, struct dma_async_tx_descriptor *tx) 464 { 465 struct bio_vec *bvl; 466 struct page *bio_page; 467 int i; 468 int page_offset; 469 470 if (bio->bi_sector >= sector) 471 page_offset = (signed)(bio->bi_sector - sector) * 512; 472 else 473 page_offset = (signed)(sector - bio->bi_sector) * -512; 474 bio_for_each_segment(bvl, bio, i) { 475 int len = bio_iovec_idx(bio, i)->bv_len; 476 int clen; 477 int b_offset = 0; 478 479 if (page_offset < 0) { 480 b_offset = -page_offset; 481 page_offset += b_offset; 482 len -= b_offset; 483 } 484 485 if (len > 0 && page_offset + len > STRIPE_SIZE) 486 clen = STRIPE_SIZE - page_offset; 487 else 488 clen = len; 489 490 if (clen > 0) { 491 b_offset += bio_iovec_idx(bio, i)->bv_offset; 492 bio_page = bio_iovec_idx(bio, i)->bv_page; 493 if (frombio) 494 tx = async_memcpy(page, bio_page, page_offset, 495 b_offset, clen, 496 ASYNC_TX_DEP_ACK | ASYNC_TX_KMAP_SRC, 497 tx, NULL, NULL); 498 else 499 tx = async_memcpy(bio_page, page, b_offset, 500 page_offset, clen, 501 ASYNC_TX_DEP_ACK | ASYNC_TX_KMAP_DST, 502 tx, NULL, NULL); 503 } 504 if (clen < len) /* hit end of page */ 505 break; 506 page_offset += len; 507 } 508 509 return tx; 510 } 511 512 static void ops_complete_biofill(void *stripe_head_ref) 513 { 514 struct stripe_head *sh = stripe_head_ref; 515 struct bio *return_bi = NULL; 516 raid5_conf_t *conf = sh->raid_conf; 517 int i, more_to_read = 0; 518 519 pr_debug("%s: stripe %llu\n", __FUNCTION__, 520 (unsigned long long)sh->sector); 521 522 /* clear completed biofills */ 523 for (i = sh->disks; i--; ) { 524 struct r5dev *dev = &sh->dev[i]; 525 /* check if this stripe has new incoming reads */ 526 if (dev->toread) 527 more_to_read++; 528 529 /* acknowledge completion of a biofill operation */ 530 /* and check if we need to reply to a read request 531 */ 532 if (test_bit(R5_Wantfill, &dev->flags) && !dev->toread) { 533 struct bio *rbi, *rbi2; 534 clear_bit(R5_Wantfill, &dev->flags); 535 536 /* The access to dev->read is outside of the 537 * spin_lock_irq(&conf->device_lock), but is protected 538 * by the STRIPE_OP_BIOFILL pending bit 539 */ 540 BUG_ON(!dev->read); 541 rbi = dev->read; 542 dev->read = NULL; 543 while (rbi && rbi->bi_sector < 544 dev->sector + STRIPE_SECTORS) { 545 rbi2 = r5_next_bio(rbi, dev->sector); 546 spin_lock_irq(&conf->device_lock); 547 if (--rbi->bi_phys_segments == 0) { 548 rbi->bi_next = return_bi; 549 return_bi = rbi; 550 } 551 spin_unlock_irq(&conf->device_lock); 552 rbi = rbi2; 553 } 554 } 555 } 556 clear_bit(STRIPE_OP_BIOFILL, &sh->ops.ack); 557 clear_bit(STRIPE_OP_BIOFILL, &sh->ops.pending); 558 559 return_io(return_bi); 560 561 if (more_to_read) 562 set_bit(STRIPE_HANDLE, &sh->state); 563 release_stripe(sh); 564 } 565 566 static void ops_run_biofill(struct stripe_head *sh) 567 { 568 struct dma_async_tx_descriptor *tx = NULL; 569 raid5_conf_t *conf = sh->raid_conf; 570 int i; 571 572 pr_debug("%s: stripe %llu\n", __FUNCTION__, 573 (unsigned long long)sh->sector); 574 575 for (i = sh->disks; i--; ) { 576 struct r5dev *dev = &sh->dev[i]; 577 if (test_bit(R5_Wantfill, &dev->flags)) { 578 struct bio *rbi; 579 spin_lock_irq(&conf->device_lock); 580 dev->read = rbi = dev->toread; 581 dev->toread = NULL; 582 spin_unlock_irq(&conf->device_lock); 583 while (rbi && rbi->bi_sector < 584 dev->sector + STRIPE_SECTORS) { 585 tx = async_copy_data(0, rbi, dev->page, 586 dev->sector, tx); 587 rbi = r5_next_bio(rbi, dev->sector); 588 } 589 } 590 } 591 592 atomic_inc(&sh->count); 593 async_trigger_callback(ASYNC_TX_DEP_ACK | ASYNC_TX_ACK, tx, 594 ops_complete_biofill, sh); 595 } 596 597 static void ops_complete_compute5(void *stripe_head_ref) 598 { 599 struct stripe_head *sh = stripe_head_ref; 600 int target = sh->ops.target; 601 struct r5dev *tgt = &sh->dev[target]; 602 603 pr_debug("%s: stripe %llu\n", __FUNCTION__, 604 (unsigned long long)sh->sector); 605 606 set_bit(R5_UPTODATE, &tgt->flags); 607 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 608 clear_bit(R5_Wantcompute, &tgt->flags); 609 set_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete); 610 set_bit(STRIPE_HANDLE, &sh->state); 611 release_stripe(sh); 612 } 613 614 static struct dma_async_tx_descriptor * 615 ops_run_compute5(struct stripe_head *sh, unsigned long pending) 616 { 617 /* kernel stack size limits the total number of disks */ 618 int disks = sh->disks; 619 struct page *xor_srcs[disks]; 620 int target = sh->ops.target; 621 struct r5dev *tgt = &sh->dev[target]; 622 struct page *xor_dest = tgt->page; 623 int count = 0; 624 struct dma_async_tx_descriptor *tx; 625 int i; 626 627 pr_debug("%s: stripe %llu block: %d\n", 628 __FUNCTION__, (unsigned long long)sh->sector, target); 629 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 630 631 for (i = disks; i--; ) 632 if (i != target) 633 xor_srcs[count++] = sh->dev[i].page; 634 635 atomic_inc(&sh->count); 636 637 if (unlikely(count == 1)) 638 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, 639 0, NULL, ops_complete_compute5, sh); 640 else 641 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, 642 ASYNC_TX_XOR_ZERO_DST, NULL, 643 ops_complete_compute5, sh); 644 645 /* ack now if postxor is not set to be run */ 646 if (tx && !test_bit(STRIPE_OP_POSTXOR, &pending)) 647 async_tx_ack(tx); 648 649 return tx; 650 } 651 652 static void ops_complete_prexor(void *stripe_head_ref) 653 { 654 struct stripe_head *sh = stripe_head_ref; 655 656 pr_debug("%s: stripe %llu\n", __FUNCTION__, 657 (unsigned long long)sh->sector); 658 659 set_bit(STRIPE_OP_PREXOR, &sh->ops.complete); 660 } 661 662 static struct dma_async_tx_descriptor * 663 ops_run_prexor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) 664 { 665 /* kernel stack size limits the total number of disks */ 666 int disks = sh->disks; 667 struct page *xor_srcs[disks]; 668 int count = 0, pd_idx = sh->pd_idx, i; 669 670 /* existing parity data subtracted */ 671 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; 672 673 pr_debug("%s: stripe %llu\n", __FUNCTION__, 674 (unsigned long long)sh->sector); 675 676 for (i = disks; i--; ) { 677 struct r5dev *dev = &sh->dev[i]; 678 /* Only process blocks that are known to be uptodate */ 679 if (dev->towrite && test_bit(R5_Wantprexor, &dev->flags)) 680 xor_srcs[count++] = dev->page; 681 } 682 683 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, 684 ASYNC_TX_DEP_ACK | ASYNC_TX_XOR_DROP_DST, tx, 685 ops_complete_prexor, sh); 686 687 return tx; 688 } 689 690 static struct dma_async_tx_descriptor * 691 ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) 692 { 693 int disks = sh->disks; 694 int pd_idx = sh->pd_idx, i; 695 696 /* check if prexor is active which means only process blocks 697 * that are part of a read-modify-write (Wantprexor) 698 */ 699 int prexor = test_bit(STRIPE_OP_PREXOR, &sh->ops.pending); 700 701 pr_debug("%s: stripe %llu\n", __FUNCTION__, 702 (unsigned long long)sh->sector); 703 704 for (i = disks; i--; ) { 705 struct r5dev *dev = &sh->dev[i]; 706 struct bio *chosen; 707 int towrite; 708 709 towrite = 0; 710 if (prexor) { /* rmw */ 711 if (dev->towrite && 712 test_bit(R5_Wantprexor, &dev->flags)) 713 towrite = 1; 714 } else { /* rcw */ 715 if (i != pd_idx && dev->towrite && 716 test_bit(R5_LOCKED, &dev->flags)) 717 towrite = 1; 718 } 719 720 if (towrite) { 721 struct bio *wbi; 722 723 spin_lock(&sh->lock); 724 chosen = dev->towrite; 725 dev->towrite = NULL; 726 BUG_ON(dev->written); 727 wbi = dev->written = chosen; 728 spin_unlock(&sh->lock); 729 730 while (wbi && wbi->bi_sector < 731 dev->sector + STRIPE_SECTORS) { 732 tx = async_copy_data(1, wbi, dev->page, 733 dev->sector, tx); 734 wbi = r5_next_bio(wbi, dev->sector); 735 } 736 } 737 } 738 739 return tx; 740 } 741 742 static void ops_complete_postxor(void *stripe_head_ref) 743 { 744 struct stripe_head *sh = stripe_head_ref; 745 746 pr_debug("%s: stripe %llu\n", __FUNCTION__, 747 (unsigned long long)sh->sector); 748 749 set_bit(STRIPE_OP_POSTXOR, &sh->ops.complete); 750 set_bit(STRIPE_HANDLE, &sh->state); 751 release_stripe(sh); 752 } 753 754 static void ops_complete_write(void *stripe_head_ref) 755 { 756 struct stripe_head *sh = stripe_head_ref; 757 int disks = sh->disks, i, pd_idx = sh->pd_idx; 758 759 pr_debug("%s: stripe %llu\n", __FUNCTION__, 760 (unsigned long long)sh->sector); 761 762 for (i = disks; i--; ) { 763 struct r5dev *dev = &sh->dev[i]; 764 if (dev->written || i == pd_idx) 765 set_bit(R5_UPTODATE, &dev->flags); 766 } 767 768 set_bit(STRIPE_OP_BIODRAIN, &sh->ops.complete); 769 set_bit(STRIPE_OP_POSTXOR, &sh->ops.complete); 770 771 set_bit(STRIPE_HANDLE, &sh->state); 772 release_stripe(sh); 773 } 774 775 static void 776 ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) 777 { 778 /* kernel stack size limits the total number of disks */ 779 int disks = sh->disks; 780 struct page *xor_srcs[disks]; 781 782 int count = 0, pd_idx = sh->pd_idx, i; 783 struct page *xor_dest; 784 int prexor = test_bit(STRIPE_OP_PREXOR, &sh->ops.pending); 785 unsigned long flags; 786 dma_async_tx_callback callback; 787 788 pr_debug("%s: stripe %llu\n", __FUNCTION__, 789 (unsigned long long)sh->sector); 790 791 /* check if prexor is active which means only process blocks 792 * that are part of a read-modify-write (written) 793 */ 794 if (prexor) { 795 xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; 796 for (i = disks; i--; ) { 797 struct r5dev *dev = &sh->dev[i]; 798 if (dev->written) 799 xor_srcs[count++] = dev->page; 800 } 801 } else { 802 xor_dest = sh->dev[pd_idx].page; 803 for (i = disks; i--; ) { 804 struct r5dev *dev = &sh->dev[i]; 805 if (i != pd_idx) 806 xor_srcs[count++] = dev->page; 807 } 808 } 809 810 /* check whether this postxor is part of a write */ 811 callback = test_bit(STRIPE_OP_BIODRAIN, &sh->ops.pending) ? 812 ops_complete_write : ops_complete_postxor; 813 814 /* 1/ if we prexor'd then the dest is reused as a source 815 * 2/ if we did not prexor then we are redoing the parity 816 * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST 817 * for the synchronous xor case 818 */ 819 flags = ASYNC_TX_DEP_ACK | ASYNC_TX_ACK | 820 (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST); 821 822 atomic_inc(&sh->count); 823 824 if (unlikely(count == 1)) { 825 flags &= ~(ASYNC_TX_XOR_DROP_DST | ASYNC_TX_XOR_ZERO_DST); 826 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, 827 flags, tx, callback, sh); 828 } else 829 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, 830 flags, tx, callback, sh); 831 } 832 833 static void ops_complete_check(void *stripe_head_ref) 834 { 835 struct stripe_head *sh = stripe_head_ref; 836 int pd_idx = sh->pd_idx; 837 838 pr_debug("%s: stripe %llu\n", __FUNCTION__, 839 (unsigned long long)sh->sector); 840 841 if (test_and_clear_bit(STRIPE_OP_MOD_DMA_CHECK, &sh->ops.pending) && 842 sh->ops.zero_sum_result == 0) 843 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 844 845 set_bit(STRIPE_OP_CHECK, &sh->ops.complete); 846 set_bit(STRIPE_HANDLE, &sh->state); 847 release_stripe(sh); 848 } 849 850 static void ops_run_check(struct stripe_head *sh) 851 { 852 /* kernel stack size limits the total number of disks */ 853 int disks = sh->disks; 854 struct page *xor_srcs[disks]; 855 struct dma_async_tx_descriptor *tx; 856 857 int count = 0, pd_idx = sh->pd_idx, i; 858 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; 859 860 pr_debug("%s: stripe %llu\n", __FUNCTION__, 861 (unsigned long long)sh->sector); 862 863 for (i = disks; i--; ) { 864 struct r5dev *dev = &sh->dev[i]; 865 if (i != pd_idx) 866 xor_srcs[count++] = dev->page; 867 } 868 869 tx = async_xor_zero_sum(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, 870 &sh->ops.zero_sum_result, 0, NULL, NULL, NULL); 871 872 if (tx) 873 set_bit(STRIPE_OP_MOD_DMA_CHECK, &sh->ops.pending); 874 else 875 clear_bit(STRIPE_OP_MOD_DMA_CHECK, &sh->ops.pending); 876 877 atomic_inc(&sh->count); 878 tx = async_trigger_callback(ASYNC_TX_DEP_ACK | ASYNC_TX_ACK, tx, 879 ops_complete_check, sh); 880 } 881 882 static void raid5_run_ops(struct stripe_head *sh, unsigned long pending) 883 { 884 int overlap_clear = 0, i, disks = sh->disks; 885 struct dma_async_tx_descriptor *tx = NULL; 886 887 if (test_bit(STRIPE_OP_BIOFILL, &pending)) { 888 ops_run_biofill(sh); 889 overlap_clear++; 890 } 891 892 if (test_bit(STRIPE_OP_COMPUTE_BLK, &pending)) 893 tx = ops_run_compute5(sh, pending); 894 895 if (test_bit(STRIPE_OP_PREXOR, &pending)) 896 tx = ops_run_prexor(sh, tx); 897 898 if (test_bit(STRIPE_OP_BIODRAIN, &pending)) { 899 tx = ops_run_biodrain(sh, tx); 900 overlap_clear++; 901 } 902 903 if (test_bit(STRIPE_OP_POSTXOR, &pending)) 904 ops_run_postxor(sh, tx); 905 906 if (test_bit(STRIPE_OP_CHECK, &pending)) 907 ops_run_check(sh); 908 909 if (test_bit(STRIPE_OP_IO, &pending)) 910 ops_run_io(sh); 911 912 if (overlap_clear) 913 for (i = disks; i--; ) { 914 struct r5dev *dev = &sh->dev[i]; 915 if (test_and_clear_bit(R5_Overlap, &dev->flags)) 916 wake_up(&sh->raid_conf->wait_for_overlap); 917 } 918 } 919 920 static int grow_one_stripe(raid5_conf_t *conf) 921 { 922 struct stripe_head *sh; 923 sh = kmem_cache_alloc(conf->slab_cache, GFP_KERNEL); 924 if (!sh) 925 return 0; 926 memset(sh, 0, sizeof(*sh) + (conf->raid_disks-1)*sizeof(struct r5dev)); 927 sh->raid_conf = conf; 928 spin_lock_init(&sh->lock); 929 930 if (grow_buffers(sh, conf->raid_disks)) { 931 shrink_buffers(sh, conf->raid_disks); 932 kmem_cache_free(conf->slab_cache, sh); 933 return 0; 934 } 935 sh->disks = conf->raid_disks; 936 /* we just created an active stripe so... */ 937 atomic_set(&sh->count, 1); 938 atomic_inc(&conf->active_stripes); 939 INIT_LIST_HEAD(&sh->lru); 940 release_stripe(sh); 941 return 1; 942 } 943 944 static int grow_stripes(raid5_conf_t *conf, int num) 945 { 946 struct kmem_cache *sc; 947 int devs = conf->raid_disks; 948 949 sprintf(conf->cache_name[0], "raid5-%s", mdname(conf->mddev)); 950 sprintf(conf->cache_name[1], "raid5-%s-alt", mdname(conf->mddev)); 951 conf->active_name = 0; 952 sc = kmem_cache_create(conf->cache_name[conf->active_name], 953 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev), 954 0, 0, NULL, NULL); 955 if (!sc) 956 return 1; 957 conf->slab_cache = sc; 958 conf->pool_size = devs; 959 while (num--) 960 if (!grow_one_stripe(conf)) 961 return 1; 962 return 0; 963 } 964 965 #ifdef CONFIG_MD_RAID5_RESHAPE 966 static int resize_stripes(raid5_conf_t *conf, int newsize) 967 { 968 /* Make all the stripes able to hold 'newsize' devices. 969 * New slots in each stripe get 'page' set to a new page. 970 * 971 * This happens in stages: 972 * 1/ create a new kmem_cache and allocate the required number of 973 * stripe_heads. 974 * 2/ gather all the old stripe_heads and tranfer the pages across 975 * to the new stripe_heads. This will have the side effect of 976 * freezing the array as once all stripe_heads have been collected, 977 * no IO will be possible. Old stripe heads are freed once their 978 * pages have been transferred over, and the old kmem_cache is 979 * freed when all stripes are done. 980 * 3/ reallocate conf->disks to be suitable bigger. If this fails, 981 * we simple return a failre status - no need to clean anything up. 982 * 4/ allocate new pages for the new slots in the new stripe_heads. 983 * If this fails, we don't bother trying the shrink the 984 * stripe_heads down again, we just leave them as they are. 985 * As each stripe_head is processed the new one is released into 986 * active service. 987 * 988 * Once step2 is started, we cannot afford to wait for a write, 989 * so we use GFP_NOIO allocations. 990 */ 991 struct stripe_head *osh, *nsh; 992 LIST_HEAD(newstripes); 993 struct disk_info *ndisks; 994 int err = 0; 995 struct kmem_cache *sc; 996 int i; 997 998 if (newsize <= conf->pool_size) 999 return 0; /* never bother to shrink */ 1000 1001 md_allow_write(conf->mddev); 1002 1003 /* Step 1 */ 1004 sc = kmem_cache_create(conf->cache_name[1-conf->active_name], 1005 sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev), 1006 0, 0, NULL, NULL); 1007 if (!sc) 1008 return -ENOMEM; 1009 1010 for (i = conf->max_nr_stripes; i; i--) { 1011 nsh = kmem_cache_alloc(sc, GFP_KERNEL); 1012 if (!nsh) 1013 break; 1014 1015 memset(nsh, 0, sizeof(*nsh) + (newsize-1)*sizeof(struct r5dev)); 1016 1017 nsh->raid_conf = conf; 1018 spin_lock_init(&nsh->lock); 1019 1020 list_add(&nsh->lru, &newstripes); 1021 } 1022 if (i) { 1023 /* didn't get enough, give up */ 1024 while (!list_empty(&newstripes)) { 1025 nsh = list_entry(newstripes.next, struct stripe_head, lru); 1026 list_del(&nsh->lru); 1027 kmem_cache_free(sc, nsh); 1028 } 1029 kmem_cache_destroy(sc); 1030 return -ENOMEM; 1031 } 1032 /* Step 2 - Must use GFP_NOIO now. 1033 * OK, we have enough stripes, start collecting inactive 1034 * stripes and copying them over 1035 */ 1036 list_for_each_entry(nsh, &newstripes, lru) { 1037 spin_lock_irq(&conf->device_lock); 1038 wait_event_lock_irq(conf->wait_for_stripe, 1039 !list_empty(&conf->inactive_list), 1040 conf->device_lock, 1041 unplug_slaves(conf->mddev) 1042 ); 1043 osh = get_free_stripe(conf); 1044 spin_unlock_irq(&conf->device_lock); 1045 atomic_set(&nsh->count, 1); 1046 for(i=0; i<conf->pool_size; i++) 1047 nsh->dev[i].page = osh->dev[i].page; 1048 for( ; i<newsize; i++) 1049 nsh->dev[i].page = NULL; 1050 kmem_cache_free(conf->slab_cache, osh); 1051 } 1052 kmem_cache_destroy(conf->slab_cache); 1053 1054 /* Step 3. 1055 * At this point, we are holding all the stripes so the array 1056 * is completely stalled, so now is a good time to resize 1057 * conf->disks. 1058 */ 1059 ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO); 1060 if (ndisks) { 1061 for (i=0; i<conf->raid_disks; i++) 1062 ndisks[i] = conf->disks[i]; 1063 kfree(conf->disks); 1064 conf->disks = ndisks; 1065 } else 1066 err = -ENOMEM; 1067 1068 /* Step 4, return new stripes to service */ 1069 while(!list_empty(&newstripes)) { 1070 nsh = list_entry(newstripes.next, struct stripe_head, lru); 1071 list_del_init(&nsh->lru); 1072 for (i=conf->raid_disks; i < newsize; i++) 1073 if (nsh->dev[i].page == NULL) { 1074 struct page *p = alloc_page(GFP_NOIO); 1075 nsh->dev[i].page = p; 1076 if (!p) 1077 err = -ENOMEM; 1078 } 1079 release_stripe(nsh); 1080 } 1081 /* critical section pass, GFP_NOIO no longer needed */ 1082 1083 conf->slab_cache = sc; 1084 conf->active_name = 1-conf->active_name; 1085 conf->pool_size = newsize; 1086 return err; 1087 } 1088 #endif 1089 1090 static int drop_one_stripe(raid5_conf_t *conf) 1091 { 1092 struct stripe_head *sh; 1093 1094 spin_lock_irq(&conf->device_lock); 1095 sh = get_free_stripe(conf); 1096 spin_unlock_irq(&conf->device_lock); 1097 if (!sh) 1098 return 0; 1099 BUG_ON(atomic_read(&sh->count)); 1100 shrink_buffers(sh, conf->pool_size); 1101 kmem_cache_free(conf->slab_cache, sh); 1102 atomic_dec(&conf->active_stripes); 1103 return 1; 1104 } 1105 1106 static void shrink_stripes(raid5_conf_t *conf) 1107 { 1108 while (drop_one_stripe(conf)) 1109 ; 1110 1111 if (conf->slab_cache) 1112 kmem_cache_destroy(conf->slab_cache); 1113 conf->slab_cache = NULL; 1114 } 1115 1116 static int raid5_end_read_request(struct bio * bi, unsigned int bytes_done, 1117 int error) 1118 { 1119 struct stripe_head *sh = bi->bi_private; 1120 raid5_conf_t *conf = sh->raid_conf; 1121 int disks = sh->disks, i; 1122 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 1123 char b[BDEVNAME_SIZE]; 1124 mdk_rdev_t *rdev; 1125 1126 if (bi->bi_size) 1127 return 1; 1128 1129 for (i=0 ; i<disks; i++) 1130 if (bi == &sh->dev[i].req) 1131 break; 1132 1133 pr_debug("end_read_request %llu/%d, count: %d, uptodate %d.\n", 1134 (unsigned long long)sh->sector, i, atomic_read(&sh->count), 1135 uptodate); 1136 if (i == disks) { 1137 BUG(); 1138 return 0; 1139 } 1140 1141 if (uptodate) { 1142 set_bit(R5_UPTODATE, &sh->dev[i].flags); 1143 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 1144 rdev = conf->disks[i].rdev; 1145 printk(KERN_INFO "raid5:%s: read error corrected (%lu sectors at %llu on %s)\n", 1146 mdname(conf->mddev), STRIPE_SECTORS, 1147 (unsigned long long)sh->sector + rdev->data_offset, 1148 bdevname(rdev->bdev, b)); 1149 clear_bit(R5_ReadError, &sh->dev[i].flags); 1150 clear_bit(R5_ReWrite, &sh->dev[i].flags); 1151 } 1152 if (atomic_read(&conf->disks[i].rdev->read_errors)) 1153 atomic_set(&conf->disks[i].rdev->read_errors, 0); 1154 } else { 1155 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b); 1156 int retry = 0; 1157 rdev = conf->disks[i].rdev; 1158 1159 clear_bit(R5_UPTODATE, &sh->dev[i].flags); 1160 atomic_inc(&rdev->read_errors); 1161 if (conf->mddev->degraded) 1162 printk(KERN_WARNING "raid5:%s: read error not correctable (sector %llu on %s).\n", 1163 mdname(conf->mddev), 1164 (unsigned long long)sh->sector + rdev->data_offset, 1165 bdn); 1166 else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) 1167 /* Oh, no!!! */ 1168 printk(KERN_WARNING "raid5:%s: read error NOT corrected!! (sector %llu on %s).\n", 1169 mdname(conf->mddev), 1170 (unsigned long long)sh->sector + rdev->data_offset, 1171 bdn); 1172 else if (atomic_read(&rdev->read_errors) 1173 > conf->max_nr_stripes) 1174 printk(KERN_WARNING 1175 "raid5:%s: Too many read errors, failing device %s.\n", 1176 mdname(conf->mddev), bdn); 1177 else 1178 retry = 1; 1179 if (retry) 1180 set_bit(R5_ReadError, &sh->dev[i].flags); 1181 else { 1182 clear_bit(R5_ReadError, &sh->dev[i].flags); 1183 clear_bit(R5_ReWrite, &sh->dev[i].flags); 1184 md_error(conf->mddev, rdev); 1185 } 1186 } 1187 rdev_dec_pending(conf->disks[i].rdev, conf->mddev); 1188 clear_bit(R5_LOCKED, &sh->dev[i].flags); 1189 set_bit(STRIPE_HANDLE, &sh->state); 1190 release_stripe(sh); 1191 return 0; 1192 } 1193 1194 static int raid5_end_write_request (struct bio *bi, unsigned int bytes_done, 1195 int error) 1196 { 1197 struct stripe_head *sh = bi->bi_private; 1198 raid5_conf_t *conf = sh->raid_conf; 1199 int disks = sh->disks, i; 1200 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 1201 1202 if (bi->bi_size) 1203 return 1; 1204 1205 for (i=0 ; i<disks; i++) 1206 if (bi == &sh->dev[i].req) 1207 break; 1208 1209 pr_debug("end_write_request %llu/%d, count %d, uptodate: %d.\n", 1210 (unsigned long long)sh->sector, i, atomic_read(&sh->count), 1211 uptodate); 1212 if (i == disks) { 1213 BUG(); 1214 return 0; 1215 } 1216 1217 if (!uptodate) 1218 md_error(conf->mddev, conf->disks[i].rdev); 1219 1220 rdev_dec_pending(conf->disks[i].rdev, conf->mddev); 1221 1222 clear_bit(R5_LOCKED, &sh->dev[i].flags); 1223 set_bit(STRIPE_HANDLE, &sh->state); 1224 release_stripe(sh); 1225 return 0; 1226 } 1227 1228 1229 static sector_t compute_blocknr(struct stripe_head *sh, int i); 1230 1231 static void raid5_build_block (struct stripe_head *sh, int i) 1232 { 1233 struct r5dev *dev = &sh->dev[i]; 1234 1235 bio_init(&dev->req); 1236 dev->req.bi_io_vec = &dev->vec; 1237 dev->req.bi_vcnt++; 1238 dev->req.bi_max_vecs++; 1239 dev->vec.bv_page = dev->page; 1240 dev->vec.bv_len = STRIPE_SIZE; 1241 dev->vec.bv_offset = 0; 1242 1243 dev->req.bi_sector = sh->sector; 1244 dev->req.bi_private = sh; 1245 1246 dev->flags = 0; 1247 dev->sector = compute_blocknr(sh, i); 1248 } 1249 1250 static void error(mddev_t *mddev, mdk_rdev_t *rdev) 1251 { 1252 char b[BDEVNAME_SIZE]; 1253 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 1254 pr_debug("raid5: error called\n"); 1255 1256 if (!test_bit(Faulty, &rdev->flags)) { 1257 set_bit(MD_CHANGE_DEVS, &mddev->flags); 1258 if (test_and_clear_bit(In_sync, &rdev->flags)) { 1259 unsigned long flags; 1260 spin_lock_irqsave(&conf->device_lock, flags); 1261 mddev->degraded++; 1262 spin_unlock_irqrestore(&conf->device_lock, flags); 1263 /* 1264 * if recovery was running, make sure it aborts. 1265 */ 1266 set_bit(MD_RECOVERY_ERR, &mddev->recovery); 1267 } 1268 set_bit(Faulty, &rdev->flags); 1269 printk (KERN_ALERT 1270 "raid5: Disk failure on %s, disabling device." 1271 " Operation continuing on %d devices\n", 1272 bdevname(rdev->bdev,b), conf->raid_disks - mddev->degraded); 1273 } 1274 } 1275 1276 /* 1277 * Input: a 'big' sector number, 1278 * Output: index of the data and parity disk, and the sector # in them. 1279 */ 1280 static sector_t raid5_compute_sector(sector_t r_sector, unsigned int raid_disks, 1281 unsigned int data_disks, unsigned int * dd_idx, 1282 unsigned int * pd_idx, raid5_conf_t *conf) 1283 { 1284 long stripe; 1285 unsigned long chunk_number; 1286 unsigned int chunk_offset; 1287 sector_t new_sector; 1288 int sectors_per_chunk = conf->chunk_size >> 9; 1289 1290 /* First compute the information on this sector */ 1291 1292 /* 1293 * Compute the chunk number and the sector offset inside the chunk 1294 */ 1295 chunk_offset = sector_div(r_sector, sectors_per_chunk); 1296 chunk_number = r_sector; 1297 BUG_ON(r_sector != chunk_number); 1298 1299 /* 1300 * Compute the stripe number 1301 */ 1302 stripe = chunk_number / data_disks; 1303 1304 /* 1305 * Compute the data disk and parity disk indexes inside the stripe 1306 */ 1307 *dd_idx = chunk_number % data_disks; 1308 1309 /* 1310 * Select the parity disk based on the user selected algorithm. 1311 */ 1312 switch(conf->level) { 1313 case 4: 1314 *pd_idx = data_disks; 1315 break; 1316 case 5: 1317 switch (conf->algorithm) { 1318 case ALGORITHM_LEFT_ASYMMETRIC: 1319 *pd_idx = data_disks - stripe % raid_disks; 1320 if (*dd_idx >= *pd_idx) 1321 (*dd_idx)++; 1322 break; 1323 case ALGORITHM_RIGHT_ASYMMETRIC: 1324 *pd_idx = stripe % raid_disks; 1325 if (*dd_idx >= *pd_idx) 1326 (*dd_idx)++; 1327 break; 1328 case ALGORITHM_LEFT_SYMMETRIC: 1329 *pd_idx = data_disks - stripe % raid_disks; 1330 *dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks; 1331 break; 1332 case ALGORITHM_RIGHT_SYMMETRIC: 1333 *pd_idx = stripe % raid_disks; 1334 *dd_idx = (*pd_idx + 1 + *dd_idx) % raid_disks; 1335 break; 1336 default: 1337 printk(KERN_ERR "raid5: unsupported algorithm %d\n", 1338 conf->algorithm); 1339 } 1340 break; 1341 case 6: 1342 1343 /**** FIX THIS ****/ 1344 switch (conf->algorithm) { 1345 case ALGORITHM_LEFT_ASYMMETRIC: 1346 *pd_idx = raid_disks - 1 - (stripe % raid_disks); 1347 if (*pd_idx == raid_disks-1) 1348 (*dd_idx)++; /* Q D D D P */ 1349 else if (*dd_idx >= *pd_idx) 1350 (*dd_idx) += 2; /* D D P Q D */ 1351 break; 1352 case ALGORITHM_RIGHT_ASYMMETRIC: 1353 *pd_idx = stripe % raid_disks; 1354 if (*pd_idx == raid_disks-1) 1355 (*dd_idx)++; /* Q D D D P */ 1356 else if (*dd_idx >= *pd_idx) 1357 (*dd_idx) += 2; /* D D P Q D */ 1358 break; 1359 case ALGORITHM_LEFT_SYMMETRIC: 1360 *pd_idx = raid_disks - 1 - (stripe % raid_disks); 1361 *dd_idx = (*pd_idx + 2 + *dd_idx) % raid_disks; 1362 break; 1363 case ALGORITHM_RIGHT_SYMMETRIC: 1364 *pd_idx = stripe % raid_disks; 1365 *dd_idx = (*pd_idx + 2 + *dd_idx) % raid_disks; 1366 break; 1367 default: 1368 printk (KERN_CRIT "raid6: unsupported algorithm %d\n", 1369 conf->algorithm); 1370 } 1371 break; 1372 } 1373 1374 /* 1375 * Finally, compute the new sector number 1376 */ 1377 new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset; 1378 return new_sector; 1379 } 1380 1381 1382 static sector_t compute_blocknr(struct stripe_head *sh, int i) 1383 { 1384 raid5_conf_t *conf = sh->raid_conf; 1385 int raid_disks = sh->disks; 1386 int data_disks = raid_disks - conf->max_degraded; 1387 sector_t new_sector = sh->sector, check; 1388 int sectors_per_chunk = conf->chunk_size >> 9; 1389 sector_t stripe; 1390 int chunk_offset; 1391 int chunk_number, dummy1, dummy2, dd_idx = i; 1392 sector_t r_sector; 1393 1394 1395 chunk_offset = sector_div(new_sector, sectors_per_chunk); 1396 stripe = new_sector; 1397 BUG_ON(new_sector != stripe); 1398 1399 if (i == sh->pd_idx) 1400 return 0; 1401 switch(conf->level) { 1402 case 4: break; 1403 case 5: 1404 switch (conf->algorithm) { 1405 case ALGORITHM_LEFT_ASYMMETRIC: 1406 case ALGORITHM_RIGHT_ASYMMETRIC: 1407 if (i > sh->pd_idx) 1408 i--; 1409 break; 1410 case ALGORITHM_LEFT_SYMMETRIC: 1411 case ALGORITHM_RIGHT_SYMMETRIC: 1412 if (i < sh->pd_idx) 1413 i += raid_disks; 1414 i -= (sh->pd_idx + 1); 1415 break; 1416 default: 1417 printk(KERN_ERR "raid5: unsupported algorithm %d\n", 1418 conf->algorithm); 1419 } 1420 break; 1421 case 6: 1422 if (i == raid6_next_disk(sh->pd_idx, raid_disks)) 1423 return 0; /* It is the Q disk */ 1424 switch (conf->algorithm) { 1425 case ALGORITHM_LEFT_ASYMMETRIC: 1426 case ALGORITHM_RIGHT_ASYMMETRIC: 1427 if (sh->pd_idx == raid_disks-1) 1428 i--; /* Q D D D P */ 1429 else if (i > sh->pd_idx) 1430 i -= 2; /* D D P Q D */ 1431 break; 1432 case ALGORITHM_LEFT_SYMMETRIC: 1433 case ALGORITHM_RIGHT_SYMMETRIC: 1434 if (sh->pd_idx == raid_disks-1) 1435 i--; /* Q D D D P */ 1436 else { 1437 /* D D P Q D */ 1438 if (i < sh->pd_idx) 1439 i += raid_disks; 1440 i -= (sh->pd_idx + 2); 1441 } 1442 break; 1443 default: 1444 printk (KERN_CRIT "raid6: unsupported algorithm %d\n", 1445 conf->algorithm); 1446 } 1447 break; 1448 } 1449 1450 chunk_number = stripe * data_disks + i; 1451 r_sector = (sector_t)chunk_number * sectors_per_chunk + chunk_offset; 1452 1453 check = raid5_compute_sector (r_sector, raid_disks, data_disks, &dummy1, &dummy2, conf); 1454 if (check != sh->sector || dummy1 != dd_idx || dummy2 != sh->pd_idx) { 1455 printk(KERN_ERR "compute_blocknr: map not correct\n"); 1456 return 0; 1457 } 1458 return r_sector; 1459 } 1460 1461 1462 1463 /* 1464 * Copy data between a page in the stripe cache, and one or more bion 1465 * The page could align with the middle of the bio, or there could be 1466 * several bion, each with several bio_vecs, which cover part of the page 1467 * Multiple bion are linked together on bi_next. There may be extras 1468 * at the end of this list. We ignore them. 1469 */ 1470 static void copy_data(int frombio, struct bio *bio, 1471 struct page *page, 1472 sector_t sector) 1473 { 1474 char *pa = page_address(page); 1475 struct bio_vec *bvl; 1476 int i; 1477 int page_offset; 1478 1479 if (bio->bi_sector >= sector) 1480 page_offset = (signed)(bio->bi_sector - sector) * 512; 1481 else 1482 page_offset = (signed)(sector - bio->bi_sector) * -512; 1483 bio_for_each_segment(bvl, bio, i) { 1484 int len = bio_iovec_idx(bio,i)->bv_len; 1485 int clen; 1486 int b_offset = 0; 1487 1488 if (page_offset < 0) { 1489 b_offset = -page_offset; 1490 page_offset += b_offset; 1491 len -= b_offset; 1492 } 1493 1494 if (len > 0 && page_offset + len > STRIPE_SIZE) 1495 clen = STRIPE_SIZE - page_offset; 1496 else clen = len; 1497 1498 if (clen > 0) { 1499 char *ba = __bio_kmap_atomic(bio, i, KM_USER0); 1500 if (frombio) 1501 memcpy(pa+page_offset, ba+b_offset, clen); 1502 else 1503 memcpy(ba+b_offset, pa+page_offset, clen); 1504 __bio_kunmap_atomic(ba, KM_USER0); 1505 } 1506 if (clen < len) /* hit end of page */ 1507 break; 1508 page_offset += len; 1509 } 1510 } 1511 1512 #define check_xor() do { \ 1513 if (count == MAX_XOR_BLOCKS) { \ 1514 xor_blocks(count, STRIPE_SIZE, dest, ptr);\ 1515 count = 0; \ 1516 } \ 1517 } while(0) 1518 1519 static void compute_parity6(struct stripe_head *sh, int method) 1520 { 1521 raid6_conf_t *conf = sh->raid_conf; 1522 int i, pd_idx = sh->pd_idx, qd_idx, d0_idx, disks = sh->disks, count; 1523 struct bio *chosen; 1524 /**** FIX THIS: This could be very bad if disks is close to 256 ****/ 1525 void *ptrs[disks]; 1526 1527 qd_idx = raid6_next_disk(pd_idx, disks); 1528 d0_idx = raid6_next_disk(qd_idx, disks); 1529 1530 pr_debug("compute_parity, stripe %llu, method %d\n", 1531 (unsigned long long)sh->sector, method); 1532 1533 switch(method) { 1534 case READ_MODIFY_WRITE: 1535 BUG(); /* READ_MODIFY_WRITE N/A for RAID-6 */ 1536 case RECONSTRUCT_WRITE: 1537 for (i= disks; i-- ;) 1538 if ( i != pd_idx && i != qd_idx && sh->dev[i].towrite ) { 1539 chosen = sh->dev[i].towrite; 1540 sh->dev[i].towrite = NULL; 1541 1542 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 1543 wake_up(&conf->wait_for_overlap); 1544 1545 BUG_ON(sh->dev[i].written); 1546 sh->dev[i].written = chosen; 1547 } 1548 break; 1549 case CHECK_PARITY: 1550 BUG(); /* Not implemented yet */ 1551 } 1552 1553 for (i = disks; i--;) 1554 if (sh->dev[i].written) { 1555 sector_t sector = sh->dev[i].sector; 1556 struct bio *wbi = sh->dev[i].written; 1557 while (wbi && wbi->bi_sector < sector + STRIPE_SECTORS) { 1558 copy_data(1, wbi, sh->dev[i].page, sector); 1559 wbi = r5_next_bio(wbi, sector); 1560 } 1561 1562 set_bit(R5_LOCKED, &sh->dev[i].flags); 1563 set_bit(R5_UPTODATE, &sh->dev[i].flags); 1564 } 1565 1566 // switch(method) { 1567 // case RECONSTRUCT_WRITE: 1568 // case CHECK_PARITY: 1569 // case UPDATE_PARITY: 1570 /* Note that unlike RAID-5, the ordering of the disks matters greatly. */ 1571 /* FIX: Is this ordering of drives even remotely optimal? */ 1572 count = 0; 1573 i = d0_idx; 1574 do { 1575 ptrs[count++] = page_address(sh->dev[i].page); 1576 if (count <= disks-2 && !test_bit(R5_UPTODATE, &sh->dev[i].flags)) 1577 printk("block %d/%d not uptodate on parity calc\n", i,count); 1578 i = raid6_next_disk(i, disks); 1579 } while ( i != d0_idx ); 1580 // break; 1581 // } 1582 1583 raid6_call.gen_syndrome(disks, STRIPE_SIZE, ptrs); 1584 1585 switch(method) { 1586 case RECONSTRUCT_WRITE: 1587 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 1588 set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags); 1589 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); 1590 set_bit(R5_LOCKED, &sh->dev[qd_idx].flags); 1591 break; 1592 case UPDATE_PARITY: 1593 set_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 1594 set_bit(R5_UPTODATE, &sh->dev[qd_idx].flags); 1595 break; 1596 } 1597 } 1598 1599 1600 /* Compute one missing block */ 1601 static void compute_block_1(struct stripe_head *sh, int dd_idx, int nozero) 1602 { 1603 int i, count, disks = sh->disks; 1604 void *ptr[MAX_XOR_BLOCKS], *dest, *p; 1605 int pd_idx = sh->pd_idx; 1606 int qd_idx = raid6_next_disk(pd_idx, disks); 1607 1608 pr_debug("compute_block_1, stripe %llu, idx %d\n", 1609 (unsigned long long)sh->sector, dd_idx); 1610 1611 if ( dd_idx == qd_idx ) { 1612 /* We're actually computing the Q drive */ 1613 compute_parity6(sh, UPDATE_PARITY); 1614 } else { 1615 dest = page_address(sh->dev[dd_idx].page); 1616 if (!nozero) memset(dest, 0, STRIPE_SIZE); 1617 count = 0; 1618 for (i = disks ; i--; ) { 1619 if (i == dd_idx || i == qd_idx) 1620 continue; 1621 p = page_address(sh->dev[i].page); 1622 if (test_bit(R5_UPTODATE, &sh->dev[i].flags)) 1623 ptr[count++] = p; 1624 else 1625 printk("compute_block() %d, stripe %llu, %d" 1626 " not present\n", dd_idx, 1627 (unsigned long long)sh->sector, i); 1628 1629 check_xor(); 1630 } 1631 if (count) 1632 xor_blocks(count, STRIPE_SIZE, dest, ptr); 1633 if (!nozero) set_bit(R5_UPTODATE, &sh->dev[dd_idx].flags); 1634 else clear_bit(R5_UPTODATE, &sh->dev[dd_idx].flags); 1635 } 1636 } 1637 1638 /* Compute two missing blocks */ 1639 static void compute_block_2(struct stripe_head *sh, int dd_idx1, int dd_idx2) 1640 { 1641 int i, count, disks = sh->disks; 1642 int pd_idx = sh->pd_idx; 1643 int qd_idx = raid6_next_disk(pd_idx, disks); 1644 int d0_idx = raid6_next_disk(qd_idx, disks); 1645 int faila, failb; 1646 1647 /* faila and failb are disk numbers relative to d0_idx */ 1648 /* pd_idx become disks-2 and qd_idx become disks-1 */ 1649 faila = (dd_idx1 < d0_idx) ? dd_idx1+(disks-d0_idx) : dd_idx1-d0_idx; 1650 failb = (dd_idx2 < d0_idx) ? dd_idx2+(disks-d0_idx) : dd_idx2-d0_idx; 1651 1652 BUG_ON(faila == failb); 1653 if ( failb < faila ) { int tmp = faila; faila = failb; failb = tmp; } 1654 1655 pr_debug("compute_block_2, stripe %llu, idx %d,%d (%d,%d)\n", 1656 (unsigned long long)sh->sector, dd_idx1, dd_idx2, faila, failb); 1657 1658 if ( failb == disks-1 ) { 1659 /* Q disk is one of the missing disks */ 1660 if ( faila == disks-2 ) { 1661 /* Missing P+Q, just recompute */ 1662 compute_parity6(sh, UPDATE_PARITY); 1663 return; 1664 } else { 1665 /* We're missing D+Q; recompute D from P */ 1666 compute_block_1(sh, (dd_idx1 == qd_idx) ? dd_idx2 : dd_idx1, 0); 1667 compute_parity6(sh, UPDATE_PARITY); /* Is this necessary? */ 1668 return; 1669 } 1670 } 1671 1672 /* We're missing D+P or D+D; build pointer table */ 1673 { 1674 /**** FIX THIS: This could be very bad if disks is close to 256 ****/ 1675 void *ptrs[disks]; 1676 1677 count = 0; 1678 i = d0_idx; 1679 do { 1680 ptrs[count++] = page_address(sh->dev[i].page); 1681 i = raid6_next_disk(i, disks); 1682 if (i != dd_idx1 && i != dd_idx2 && 1683 !test_bit(R5_UPTODATE, &sh->dev[i].flags)) 1684 printk("compute_2 with missing block %d/%d\n", count, i); 1685 } while ( i != d0_idx ); 1686 1687 if ( failb == disks-2 ) { 1688 /* We're missing D+P. */ 1689 raid6_datap_recov(disks, STRIPE_SIZE, faila, ptrs); 1690 } else { 1691 /* We're missing D+D. */ 1692 raid6_2data_recov(disks, STRIPE_SIZE, faila, failb, ptrs); 1693 } 1694 1695 /* Both the above update both missing blocks */ 1696 set_bit(R5_UPTODATE, &sh->dev[dd_idx1].flags); 1697 set_bit(R5_UPTODATE, &sh->dev[dd_idx2].flags); 1698 } 1699 } 1700 1701 static int 1702 handle_write_operations5(struct stripe_head *sh, int rcw, int expand) 1703 { 1704 int i, pd_idx = sh->pd_idx, disks = sh->disks; 1705 int locked = 0; 1706 1707 if (rcw) { 1708 /* if we are not expanding this is a proper write request, and 1709 * there will be bios with new data to be drained into the 1710 * stripe cache 1711 */ 1712 if (!expand) { 1713 set_bit(STRIPE_OP_BIODRAIN, &sh->ops.pending); 1714 sh->ops.count++; 1715 } 1716 1717 set_bit(STRIPE_OP_POSTXOR, &sh->ops.pending); 1718 sh->ops.count++; 1719 1720 for (i = disks; i--; ) { 1721 struct r5dev *dev = &sh->dev[i]; 1722 1723 if (dev->towrite) { 1724 set_bit(R5_LOCKED, &dev->flags); 1725 if (!expand) 1726 clear_bit(R5_UPTODATE, &dev->flags); 1727 locked++; 1728 } 1729 } 1730 } else { 1731 BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) || 1732 test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags))); 1733 1734 set_bit(STRIPE_OP_PREXOR, &sh->ops.pending); 1735 set_bit(STRIPE_OP_BIODRAIN, &sh->ops.pending); 1736 set_bit(STRIPE_OP_POSTXOR, &sh->ops.pending); 1737 1738 sh->ops.count += 3; 1739 1740 for (i = disks; i--; ) { 1741 struct r5dev *dev = &sh->dev[i]; 1742 if (i == pd_idx) 1743 continue; 1744 1745 /* For a read-modify write there may be blocks that are 1746 * locked for reading while others are ready to be 1747 * written so we distinguish these blocks by the 1748 * R5_Wantprexor bit 1749 */ 1750 if (dev->towrite && 1751 (test_bit(R5_UPTODATE, &dev->flags) || 1752 test_bit(R5_Wantcompute, &dev->flags))) { 1753 set_bit(R5_Wantprexor, &dev->flags); 1754 set_bit(R5_LOCKED, &dev->flags); 1755 clear_bit(R5_UPTODATE, &dev->flags); 1756 locked++; 1757 } 1758 } 1759 } 1760 1761 /* keep the parity disk locked while asynchronous operations 1762 * are in flight 1763 */ 1764 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); 1765 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 1766 locked++; 1767 1768 pr_debug("%s: stripe %llu locked: %d pending: %lx\n", 1769 __FUNCTION__, (unsigned long long)sh->sector, 1770 locked, sh->ops.pending); 1771 1772 return locked; 1773 } 1774 1775 /* 1776 * Each stripe/dev can have one or more bion attached. 1777 * toread/towrite point to the first in a chain. 1778 * The bi_next chain must be in order. 1779 */ 1780 static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, int forwrite) 1781 { 1782 struct bio **bip; 1783 raid5_conf_t *conf = sh->raid_conf; 1784 int firstwrite=0; 1785 1786 pr_debug("adding bh b#%llu to stripe s#%llu\n", 1787 (unsigned long long)bi->bi_sector, 1788 (unsigned long long)sh->sector); 1789 1790 1791 spin_lock(&sh->lock); 1792 spin_lock_irq(&conf->device_lock); 1793 if (forwrite) { 1794 bip = &sh->dev[dd_idx].towrite; 1795 if (*bip == NULL && sh->dev[dd_idx].written == NULL) 1796 firstwrite = 1; 1797 } else 1798 bip = &sh->dev[dd_idx].toread; 1799 while (*bip && (*bip)->bi_sector < bi->bi_sector) { 1800 if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector) 1801 goto overlap; 1802 bip = & (*bip)->bi_next; 1803 } 1804 if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9)) 1805 goto overlap; 1806 1807 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next); 1808 if (*bip) 1809 bi->bi_next = *bip; 1810 *bip = bi; 1811 bi->bi_phys_segments ++; 1812 spin_unlock_irq(&conf->device_lock); 1813 spin_unlock(&sh->lock); 1814 1815 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n", 1816 (unsigned long long)bi->bi_sector, 1817 (unsigned long long)sh->sector, dd_idx); 1818 1819 if (conf->mddev->bitmap && firstwrite) { 1820 bitmap_startwrite(conf->mddev->bitmap, sh->sector, 1821 STRIPE_SECTORS, 0); 1822 sh->bm_seq = conf->seq_flush+1; 1823 set_bit(STRIPE_BIT_DELAY, &sh->state); 1824 } 1825 1826 if (forwrite) { 1827 /* check if page is covered */ 1828 sector_t sector = sh->dev[dd_idx].sector; 1829 for (bi=sh->dev[dd_idx].towrite; 1830 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS && 1831 bi && bi->bi_sector <= sector; 1832 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) { 1833 if (bi->bi_sector + (bi->bi_size>>9) >= sector) 1834 sector = bi->bi_sector + (bi->bi_size>>9); 1835 } 1836 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS) 1837 set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags); 1838 } 1839 return 1; 1840 1841 overlap: 1842 set_bit(R5_Overlap, &sh->dev[dd_idx].flags); 1843 spin_unlock_irq(&conf->device_lock); 1844 spin_unlock(&sh->lock); 1845 return 0; 1846 } 1847 1848 static void end_reshape(raid5_conf_t *conf); 1849 1850 static int page_is_zero(struct page *p) 1851 { 1852 char *a = page_address(p); 1853 return ((*(u32*)a) == 0 && 1854 memcmp(a, a+4, STRIPE_SIZE-4)==0); 1855 } 1856 1857 static int stripe_to_pdidx(sector_t stripe, raid5_conf_t *conf, int disks) 1858 { 1859 int sectors_per_chunk = conf->chunk_size >> 9; 1860 int pd_idx, dd_idx; 1861 int chunk_offset = sector_div(stripe, sectors_per_chunk); 1862 1863 raid5_compute_sector(stripe * (disks - conf->max_degraded) 1864 *sectors_per_chunk + chunk_offset, 1865 disks, disks - conf->max_degraded, 1866 &dd_idx, &pd_idx, conf); 1867 return pd_idx; 1868 } 1869 1870 static void 1871 handle_requests_to_failed_array(raid5_conf_t *conf, struct stripe_head *sh, 1872 struct stripe_head_state *s, int disks, 1873 struct bio **return_bi) 1874 { 1875 int i; 1876 for (i = disks; i--; ) { 1877 struct bio *bi; 1878 int bitmap_end = 0; 1879 1880 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 1881 mdk_rdev_t *rdev; 1882 rcu_read_lock(); 1883 rdev = rcu_dereference(conf->disks[i].rdev); 1884 if (rdev && test_bit(In_sync, &rdev->flags)) 1885 /* multiple read failures in one stripe */ 1886 md_error(conf->mddev, rdev); 1887 rcu_read_unlock(); 1888 } 1889 spin_lock_irq(&conf->device_lock); 1890 /* fail all writes first */ 1891 bi = sh->dev[i].towrite; 1892 sh->dev[i].towrite = NULL; 1893 if (bi) { 1894 s->to_write--; 1895 bitmap_end = 1; 1896 } 1897 1898 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 1899 wake_up(&conf->wait_for_overlap); 1900 1901 while (bi && bi->bi_sector < 1902 sh->dev[i].sector + STRIPE_SECTORS) { 1903 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); 1904 clear_bit(BIO_UPTODATE, &bi->bi_flags); 1905 if (--bi->bi_phys_segments == 0) { 1906 md_write_end(conf->mddev); 1907 bi->bi_next = *return_bi; 1908 *return_bi = bi; 1909 } 1910 bi = nextbi; 1911 } 1912 /* and fail all 'written' */ 1913 bi = sh->dev[i].written; 1914 sh->dev[i].written = NULL; 1915 if (bi) bitmap_end = 1; 1916 while (bi && bi->bi_sector < 1917 sh->dev[i].sector + STRIPE_SECTORS) { 1918 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); 1919 clear_bit(BIO_UPTODATE, &bi->bi_flags); 1920 if (--bi->bi_phys_segments == 0) { 1921 md_write_end(conf->mddev); 1922 bi->bi_next = *return_bi; 1923 *return_bi = bi; 1924 } 1925 bi = bi2; 1926 } 1927 1928 /* fail any reads if this device is non-operational and 1929 * the data has not reached the cache yet. 1930 */ 1931 if (!test_bit(R5_Wantfill, &sh->dev[i].flags) && 1932 (!test_bit(R5_Insync, &sh->dev[i].flags) || 1933 test_bit(R5_ReadError, &sh->dev[i].flags))) { 1934 bi = sh->dev[i].toread; 1935 sh->dev[i].toread = NULL; 1936 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 1937 wake_up(&conf->wait_for_overlap); 1938 if (bi) s->to_read--; 1939 while (bi && bi->bi_sector < 1940 sh->dev[i].sector + STRIPE_SECTORS) { 1941 struct bio *nextbi = 1942 r5_next_bio(bi, sh->dev[i].sector); 1943 clear_bit(BIO_UPTODATE, &bi->bi_flags); 1944 if (--bi->bi_phys_segments == 0) { 1945 bi->bi_next = *return_bi; 1946 *return_bi = bi; 1947 } 1948 bi = nextbi; 1949 } 1950 } 1951 spin_unlock_irq(&conf->device_lock); 1952 if (bitmap_end) 1953 bitmap_endwrite(conf->mddev->bitmap, sh->sector, 1954 STRIPE_SECTORS, 0, 0); 1955 } 1956 1957 } 1958 1959 /* __handle_issuing_new_read_requests5 - returns 0 if there are no more disks 1960 * to process 1961 */ 1962 static int __handle_issuing_new_read_requests5(struct stripe_head *sh, 1963 struct stripe_head_state *s, int disk_idx, int disks) 1964 { 1965 struct r5dev *dev = &sh->dev[disk_idx]; 1966 struct r5dev *failed_dev = &sh->dev[s->failed_num]; 1967 1968 /* don't schedule compute operations or reads on the parity block while 1969 * a check is in flight 1970 */ 1971 if ((disk_idx == sh->pd_idx) && 1972 test_bit(STRIPE_OP_CHECK, &sh->ops.pending)) 1973 return ~0; 1974 1975 /* is the data in this block needed, and can we get it? */ 1976 if (!test_bit(R5_LOCKED, &dev->flags) && 1977 !test_bit(R5_UPTODATE, &dev->flags) && (dev->toread || 1978 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags)) || 1979 s->syncing || s->expanding || (s->failed && 1980 (failed_dev->toread || (failed_dev->towrite && 1981 !test_bit(R5_OVERWRITE, &failed_dev->flags) 1982 ))))) { 1983 /* 1/ We would like to get this block, possibly by computing it, 1984 * but we might not be able to. 1985 * 1986 * 2/ Since parity check operations potentially make the parity 1987 * block !uptodate it will need to be refreshed before any 1988 * compute operations on data disks are scheduled. 1989 * 1990 * 3/ We hold off parity block re-reads until check operations 1991 * have quiesced. 1992 */ 1993 if ((s->uptodate == disks - 1) && 1994 !test_bit(STRIPE_OP_CHECK, &sh->ops.pending)) { 1995 set_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending); 1996 set_bit(R5_Wantcompute, &dev->flags); 1997 sh->ops.target = disk_idx; 1998 s->req_compute = 1; 1999 sh->ops.count++; 2000 /* Careful: from this point on 'uptodate' is in the eye 2001 * of raid5_run_ops which services 'compute' operations 2002 * before writes. R5_Wantcompute flags a block that will 2003 * be R5_UPTODATE by the time it is needed for a 2004 * subsequent operation. 2005 */ 2006 s->uptodate++; 2007 return 0; /* uptodate + compute == disks */ 2008 } else if ((s->uptodate < disks - 1) && 2009 test_bit(R5_Insync, &dev->flags)) { 2010 /* Note: we hold off compute operations while checks are 2011 * in flight, but we still prefer 'compute' over 'read' 2012 * hence we only read if (uptodate < * disks-1) 2013 */ 2014 set_bit(R5_LOCKED, &dev->flags); 2015 set_bit(R5_Wantread, &dev->flags); 2016 if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending)) 2017 sh->ops.count++; 2018 s->locked++; 2019 pr_debug("Reading block %d (sync=%d)\n", disk_idx, 2020 s->syncing); 2021 } 2022 } 2023 2024 return ~0; 2025 } 2026 2027 static void handle_issuing_new_read_requests5(struct stripe_head *sh, 2028 struct stripe_head_state *s, int disks) 2029 { 2030 int i; 2031 2032 /* Clear completed compute operations. Parity recovery 2033 * (STRIPE_OP_MOD_REPAIR_PD) implies a write-back which is handled 2034 * later on in this routine 2035 */ 2036 if (test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete) && 2037 !test_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending)) { 2038 clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete); 2039 clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.ack); 2040 clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending); 2041 } 2042 2043 /* look for blocks to read/compute, skip this if a compute 2044 * is already in flight, or if the stripe contents are in the 2045 * midst of changing due to a write 2046 */ 2047 if (!test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending) && 2048 !test_bit(STRIPE_OP_PREXOR, &sh->ops.pending) && 2049 !test_bit(STRIPE_OP_POSTXOR, &sh->ops.pending)) { 2050 for (i = disks; i--; ) 2051 if (__handle_issuing_new_read_requests5( 2052 sh, s, i, disks) == 0) 2053 break; 2054 } 2055 set_bit(STRIPE_HANDLE, &sh->state); 2056 } 2057 2058 static void handle_issuing_new_read_requests6(struct stripe_head *sh, 2059 struct stripe_head_state *s, struct r6_state *r6s, 2060 int disks) 2061 { 2062 int i; 2063 for (i = disks; i--; ) { 2064 struct r5dev *dev = &sh->dev[i]; 2065 if (!test_bit(R5_LOCKED, &dev->flags) && 2066 !test_bit(R5_UPTODATE, &dev->flags) && 2067 (dev->toread || (dev->towrite && 2068 !test_bit(R5_OVERWRITE, &dev->flags)) || 2069 s->syncing || s->expanding || 2070 (s->failed >= 1 && 2071 (sh->dev[r6s->failed_num[0]].toread || 2072 s->to_write)) || 2073 (s->failed >= 2 && 2074 (sh->dev[r6s->failed_num[1]].toread || 2075 s->to_write)))) { 2076 /* we would like to get this block, possibly 2077 * by computing it, but we might not be able to 2078 */ 2079 if (s->uptodate == disks-1) { 2080 pr_debug("Computing stripe %llu block %d\n", 2081 (unsigned long long)sh->sector, i); 2082 compute_block_1(sh, i, 0); 2083 s->uptodate++; 2084 } else if ( s->uptodate == disks-2 && s->failed >= 2 ) { 2085 /* Computing 2-failure is *very* expensive; only 2086 * do it if failed >= 2 2087 */ 2088 int other; 2089 for (other = disks; other--; ) { 2090 if (other == i) 2091 continue; 2092 if (!test_bit(R5_UPTODATE, 2093 &sh->dev[other].flags)) 2094 break; 2095 } 2096 BUG_ON(other < 0); 2097 pr_debug("Computing stripe %llu blocks %d,%d\n", 2098 (unsigned long long)sh->sector, 2099 i, other); 2100 compute_block_2(sh, i, other); 2101 s->uptodate += 2; 2102 } else if (test_bit(R5_Insync, &dev->flags)) { 2103 set_bit(R5_LOCKED, &dev->flags); 2104 set_bit(R5_Wantread, &dev->flags); 2105 s->locked++; 2106 pr_debug("Reading block %d (sync=%d)\n", 2107 i, s->syncing); 2108 } 2109 } 2110 } 2111 set_bit(STRIPE_HANDLE, &sh->state); 2112 } 2113 2114 2115 /* handle_completed_write_requests 2116 * any written block on an uptodate or failed drive can be returned. 2117 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but 2118 * never LOCKED, so we don't need to test 'failed' directly. 2119 */ 2120 static void handle_completed_write_requests(raid5_conf_t *conf, 2121 struct stripe_head *sh, int disks, struct bio **return_bi) 2122 { 2123 int i; 2124 struct r5dev *dev; 2125 2126 for (i = disks; i--; ) 2127 if (sh->dev[i].written) { 2128 dev = &sh->dev[i]; 2129 if (!test_bit(R5_LOCKED, &dev->flags) && 2130 test_bit(R5_UPTODATE, &dev->flags)) { 2131 /* We can return any write requests */ 2132 struct bio *wbi, *wbi2; 2133 int bitmap_end = 0; 2134 pr_debug("Return write for disc %d\n", i); 2135 spin_lock_irq(&conf->device_lock); 2136 wbi = dev->written; 2137 dev->written = NULL; 2138 while (wbi && wbi->bi_sector < 2139 dev->sector + STRIPE_SECTORS) { 2140 wbi2 = r5_next_bio(wbi, dev->sector); 2141 if (--wbi->bi_phys_segments == 0) { 2142 md_write_end(conf->mddev); 2143 wbi->bi_next = *return_bi; 2144 *return_bi = wbi; 2145 } 2146 wbi = wbi2; 2147 } 2148 if (dev->towrite == NULL) 2149 bitmap_end = 1; 2150 spin_unlock_irq(&conf->device_lock); 2151 if (bitmap_end) 2152 bitmap_endwrite(conf->mddev->bitmap, 2153 sh->sector, 2154 STRIPE_SECTORS, 2155 !test_bit(STRIPE_DEGRADED, &sh->state), 2156 0); 2157 } 2158 } 2159 } 2160 2161 static void handle_issuing_new_write_requests5(raid5_conf_t *conf, 2162 struct stripe_head *sh, struct stripe_head_state *s, int disks) 2163 { 2164 int rmw = 0, rcw = 0, i; 2165 for (i = disks; i--; ) { 2166 /* would I have to read this buffer for read_modify_write */ 2167 struct r5dev *dev = &sh->dev[i]; 2168 if ((dev->towrite || i == sh->pd_idx) && 2169 !test_bit(R5_LOCKED, &dev->flags) && 2170 !(test_bit(R5_UPTODATE, &dev->flags) || 2171 test_bit(R5_Wantcompute, &dev->flags))) { 2172 if (test_bit(R5_Insync, &dev->flags)) 2173 rmw++; 2174 else 2175 rmw += 2*disks; /* cannot read it */ 2176 } 2177 /* Would I have to read this buffer for reconstruct_write */ 2178 if (!test_bit(R5_OVERWRITE, &dev->flags) && i != sh->pd_idx && 2179 !test_bit(R5_LOCKED, &dev->flags) && 2180 !(test_bit(R5_UPTODATE, &dev->flags) || 2181 test_bit(R5_Wantcompute, &dev->flags))) { 2182 if (test_bit(R5_Insync, &dev->flags)) rcw++; 2183 else 2184 rcw += 2*disks; 2185 } 2186 } 2187 pr_debug("for sector %llu, rmw=%d rcw=%d\n", 2188 (unsigned long long)sh->sector, rmw, rcw); 2189 set_bit(STRIPE_HANDLE, &sh->state); 2190 if (rmw < rcw && rmw > 0) 2191 /* prefer read-modify-write, but need to get some data */ 2192 for (i = disks; i--; ) { 2193 struct r5dev *dev = &sh->dev[i]; 2194 if ((dev->towrite || i == sh->pd_idx) && 2195 !test_bit(R5_LOCKED, &dev->flags) && 2196 !(test_bit(R5_UPTODATE, &dev->flags) || 2197 test_bit(R5_Wantcompute, &dev->flags)) && 2198 test_bit(R5_Insync, &dev->flags)) { 2199 if ( 2200 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 2201 pr_debug("Read_old block " 2202 "%d for r-m-w\n", i); 2203 set_bit(R5_LOCKED, &dev->flags); 2204 set_bit(R5_Wantread, &dev->flags); 2205 if (!test_and_set_bit( 2206 STRIPE_OP_IO, &sh->ops.pending)) 2207 sh->ops.count++; 2208 s->locked++; 2209 } else { 2210 set_bit(STRIPE_DELAYED, &sh->state); 2211 set_bit(STRIPE_HANDLE, &sh->state); 2212 } 2213 } 2214 } 2215 if (rcw <= rmw && rcw > 0) 2216 /* want reconstruct write, but need to get some data */ 2217 for (i = disks; i--; ) { 2218 struct r5dev *dev = &sh->dev[i]; 2219 if (!test_bit(R5_OVERWRITE, &dev->flags) && 2220 i != sh->pd_idx && 2221 !test_bit(R5_LOCKED, &dev->flags) && 2222 !(test_bit(R5_UPTODATE, &dev->flags) || 2223 test_bit(R5_Wantcompute, &dev->flags)) && 2224 test_bit(R5_Insync, &dev->flags)) { 2225 if ( 2226 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 2227 pr_debug("Read_old block " 2228 "%d for Reconstruct\n", i); 2229 set_bit(R5_LOCKED, &dev->flags); 2230 set_bit(R5_Wantread, &dev->flags); 2231 if (!test_and_set_bit( 2232 STRIPE_OP_IO, &sh->ops.pending)) 2233 sh->ops.count++; 2234 s->locked++; 2235 } else { 2236 set_bit(STRIPE_DELAYED, &sh->state); 2237 set_bit(STRIPE_HANDLE, &sh->state); 2238 } 2239 } 2240 } 2241 /* now if nothing is locked, and if we have enough data, 2242 * we can start a write request 2243 */ 2244 /* since handle_stripe can be called at any time we need to handle the 2245 * case where a compute block operation has been submitted and then a 2246 * subsequent call wants to start a write request. raid5_run_ops only 2247 * handles the case where compute block and postxor are requested 2248 * simultaneously. If this is not the case then new writes need to be 2249 * held off until the compute completes. 2250 */ 2251 if ((s->req_compute || 2252 !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending)) && 2253 (s->locked == 0 && (rcw == 0 || rmw == 0) && 2254 !test_bit(STRIPE_BIT_DELAY, &sh->state))) 2255 s->locked += handle_write_operations5(sh, rcw == 0, 0); 2256 } 2257 2258 static void handle_issuing_new_write_requests6(raid5_conf_t *conf, 2259 struct stripe_head *sh, struct stripe_head_state *s, 2260 struct r6_state *r6s, int disks) 2261 { 2262 int rcw = 0, must_compute = 0, pd_idx = sh->pd_idx, i; 2263 int qd_idx = r6s->qd_idx; 2264 for (i = disks; i--; ) { 2265 struct r5dev *dev = &sh->dev[i]; 2266 /* Would I have to read this buffer for reconstruct_write */ 2267 if (!test_bit(R5_OVERWRITE, &dev->flags) 2268 && i != pd_idx && i != qd_idx 2269 && (!test_bit(R5_LOCKED, &dev->flags) 2270 ) && 2271 !test_bit(R5_UPTODATE, &dev->flags)) { 2272 if (test_bit(R5_Insync, &dev->flags)) rcw++; 2273 else { 2274 pr_debug("raid6: must_compute: " 2275 "disk %d flags=%#lx\n", i, dev->flags); 2276 must_compute++; 2277 } 2278 } 2279 } 2280 pr_debug("for sector %llu, rcw=%d, must_compute=%d\n", 2281 (unsigned long long)sh->sector, rcw, must_compute); 2282 set_bit(STRIPE_HANDLE, &sh->state); 2283 2284 if (rcw > 0) 2285 /* want reconstruct write, but need to get some data */ 2286 for (i = disks; i--; ) { 2287 struct r5dev *dev = &sh->dev[i]; 2288 if (!test_bit(R5_OVERWRITE, &dev->flags) 2289 && !(s->failed == 0 && (i == pd_idx || i == qd_idx)) 2290 && !test_bit(R5_LOCKED, &dev->flags) && 2291 !test_bit(R5_UPTODATE, &dev->flags) && 2292 test_bit(R5_Insync, &dev->flags)) { 2293 if ( 2294 test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 2295 pr_debug("Read_old stripe %llu " 2296 "block %d for Reconstruct\n", 2297 (unsigned long long)sh->sector, i); 2298 set_bit(R5_LOCKED, &dev->flags); 2299 set_bit(R5_Wantread, &dev->flags); 2300 s->locked++; 2301 } else { 2302 pr_debug("Request delayed stripe %llu " 2303 "block %d for Reconstruct\n", 2304 (unsigned long long)sh->sector, i); 2305 set_bit(STRIPE_DELAYED, &sh->state); 2306 set_bit(STRIPE_HANDLE, &sh->state); 2307 } 2308 } 2309 } 2310 /* now if nothing is locked, and if we have enough data, we can start a 2311 * write request 2312 */ 2313 if (s->locked == 0 && rcw == 0 && 2314 !test_bit(STRIPE_BIT_DELAY, &sh->state)) { 2315 if (must_compute > 0) { 2316 /* We have failed blocks and need to compute them */ 2317 switch (s->failed) { 2318 case 0: 2319 BUG(); 2320 case 1: 2321 compute_block_1(sh, r6s->failed_num[0], 0); 2322 break; 2323 case 2: 2324 compute_block_2(sh, r6s->failed_num[0], 2325 r6s->failed_num[1]); 2326 break; 2327 default: /* This request should have been failed? */ 2328 BUG(); 2329 } 2330 } 2331 2332 pr_debug("Computing parity for stripe %llu\n", 2333 (unsigned long long)sh->sector); 2334 compute_parity6(sh, RECONSTRUCT_WRITE); 2335 /* now every locked buffer is ready to be written */ 2336 for (i = disks; i--; ) 2337 if (test_bit(R5_LOCKED, &sh->dev[i].flags)) { 2338 pr_debug("Writing stripe %llu block %d\n", 2339 (unsigned long long)sh->sector, i); 2340 s->locked++; 2341 set_bit(R5_Wantwrite, &sh->dev[i].flags); 2342 } 2343 /* after a RECONSTRUCT_WRITE, the stripe MUST be in-sync */ 2344 set_bit(STRIPE_INSYNC, &sh->state); 2345 2346 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 2347 atomic_dec(&conf->preread_active_stripes); 2348 if (atomic_read(&conf->preread_active_stripes) < 2349 IO_THRESHOLD) 2350 md_wakeup_thread(conf->mddev->thread); 2351 } 2352 } 2353 } 2354 2355 static void handle_parity_checks5(raid5_conf_t *conf, struct stripe_head *sh, 2356 struct stripe_head_state *s, int disks) 2357 { 2358 set_bit(STRIPE_HANDLE, &sh->state); 2359 /* Take one of the following actions: 2360 * 1/ start a check parity operation if (uptodate == disks) 2361 * 2/ finish a check parity operation and act on the result 2362 * 3/ skip to the writeback section if we previously 2363 * initiated a recovery operation 2364 */ 2365 if (s->failed == 0 && 2366 !test_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending)) { 2367 if (!test_and_set_bit(STRIPE_OP_CHECK, &sh->ops.pending)) { 2368 BUG_ON(s->uptodate != disks); 2369 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); 2370 sh->ops.count++; 2371 s->uptodate--; 2372 } else if ( 2373 test_and_clear_bit(STRIPE_OP_CHECK, &sh->ops.complete)) { 2374 clear_bit(STRIPE_OP_CHECK, &sh->ops.ack); 2375 clear_bit(STRIPE_OP_CHECK, &sh->ops.pending); 2376 2377 if (sh->ops.zero_sum_result == 0) 2378 /* parity is correct (on disc, 2379 * not in buffer any more) 2380 */ 2381 set_bit(STRIPE_INSYNC, &sh->state); 2382 else { 2383 conf->mddev->resync_mismatches += 2384 STRIPE_SECTORS; 2385 if (test_bit( 2386 MD_RECOVERY_CHECK, &conf->mddev->recovery)) 2387 /* don't try to repair!! */ 2388 set_bit(STRIPE_INSYNC, &sh->state); 2389 else { 2390 set_bit(STRIPE_OP_COMPUTE_BLK, 2391 &sh->ops.pending); 2392 set_bit(STRIPE_OP_MOD_REPAIR_PD, 2393 &sh->ops.pending); 2394 set_bit(R5_Wantcompute, 2395 &sh->dev[sh->pd_idx].flags); 2396 sh->ops.target = sh->pd_idx; 2397 sh->ops.count++; 2398 s->uptodate++; 2399 } 2400 } 2401 } 2402 } 2403 2404 /* check if we can clear a parity disk reconstruct */ 2405 if (test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete) && 2406 test_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending)) { 2407 2408 clear_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending); 2409 clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.complete); 2410 clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.ack); 2411 clear_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending); 2412 } 2413 2414 /* Wait for check parity and compute block operations to complete 2415 * before write-back 2416 */ 2417 if (!test_bit(STRIPE_INSYNC, &sh->state) && 2418 !test_bit(STRIPE_OP_CHECK, &sh->ops.pending) && 2419 !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending)) { 2420 struct r5dev *dev; 2421 /* either failed parity check, or recovery is happening */ 2422 if (s->failed == 0) 2423 s->failed_num = sh->pd_idx; 2424 dev = &sh->dev[s->failed_num]; 2425 BUG_ON(!test_bit(R5_UPTODATE, &dev->flags)); 2426 BUG_ON(s->uptodate != disks); 2427 2428 set_bit(R5_LOCKED, &dev->flags); 2429 set_bit(R5_Wantwrite, &dev->flags); 2430 if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending)) 2431 sh->ops.count++; 2432 2433 clear_bit(STRIPE_DEGRADED, &sh->state); 2434 s->locked++; 2435 set_bit(STRIPE_INSYNC, &sh->state); 2436 } 2437 } 2438 2439 2440 static void handle_parity_checks6(raid5_conf_t *conf, struct stripe_head *sh, 2441 struct stripe_head_state *s, 2442 struct r6_state *r6s, struct page *tmp_page, 2443 int disks) 2444 { 2445 int update_p = 0, update_q = 0; 2446 struct r5dev *dev; 2447 int pd_idx = sh->pd_idx; 2448 int qd_idx = r6s->qd_idx; 2449 2450 set_bit(STRIPE_HANDLE, &sh->state); 2451 2452 BUG_ON(s->failed > 2); 2453 BUG_ON(s->uptodate < disks); 2454 /* Want to check and possibly repair P and Q. 2455 * However there could be one 'failed' device, in which 2456 * case we can only check one of them, possibly using the 2457 * other to generate missing data 2458 */ 2459 2460 /* If !tmp_page, we cannot do the calculations, 2461 * but as we have set STRIPE_HANDLE, we will soon be called 2462 * by stripe_handle with a tmp_page - just wait until then. 2463 */ 2464 if (tmp_page) { 2465 if (s->failed == r6s->q_failed) { 2466 /* The only possible failed device holds 'Q', so it 2467 * makes sense to check P (If anything else were failed, 2468 * we would have used P to recreate it). 2469 */ 2470 compute_block_1(sh, pd_idx, 1); 2471 if (!page_is_zero(sh->dev[pd_idx].page)) { 2472 compute_block_1(sh, pd_idx, 0); 2473 update_p = 1; 2474 } 2475 } 2476 if (!r6s->q_failed && s->failed < 2) { 2477 /* q is not failed, and we didn't use it to generate 2478 * anything, so it makes sense to check it 2479 */ 2480 memcpy(page_address(tmp_page), 2481 page_address(sh->dev[qd_idx].page), 2482 STRIPE_SIZE); 2483 compute_parity6(sh, UPDATE_PARITY); 2484 if (memcmp(page_address(tmp_page), 2485 page_address(sh->dev[qd_idx].page), 2486 STRIPE_SIZE) != 0) { 2487 clear_bit(STRIPE_INSYNC, &sh->state); 2488 update_q = 1; 2489 } 2490 } 2491 if (update_p || update_q) { 2492 conf->mddev->resync_mismatches += STRIPE_SECTORS; 2493 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) 2494 /* don't try to repair!! */ 2495 update_p = update_q = 0; 2496 } 2497 2498 /* now write out any block on a failed drive, 2499 * or P or Q if they need it 2500 */ 2501 2502 if (s->failed == 2) { 2503 dev = &sh->dev[r6s->failed_num[1]]; 2504 s->locked++; 2505 set_bit(R5_LOCKED, &dev->flags); 2506 set_bit(R5_Wantwrite, &dev->flags); 2507 } 2508 if (s->failed >= 1) { 2509 dev = &sh->dev[r6s->failed_num[0]]; 2510 s->locked++; 2511 set_bit(R5_LOCKED, &dev->flags); 2512 set_bit(R5_Wantwrite, &dev->flags); 2513 } 2514 2515 if (update_p) { 2516 dev = &sh->dev[pd_idx]; 2517 s->locked++; 2518 set_bit(R5_LOCKED, &dev->flags); 2519 set_bit(R5_Wantwrite, &dev->flags); 2520 } 2521 if (update_q) { 2522 dev = &sh->dev[qd_idx]; 2523 s->locked++; 2524 set_bit(R5_LOCKED, &dev->flags); 2525 set_bit(R5_Wantwrite, &dev->flags); 2526 } 2527 clear_bit(STRIPE_DEGRADED, &sh->state); 2528 2529 set_bit(STRIPE_INSYNC, &sh->state); 2530 } 2531 } 2532 2533 static void handle_stripe_expansion(raid5_conf_t *conf, struct stripe_head *sh, 2534 struct r6_state *r6s) 2535 { 2536 int i; 2537 2538 /* We have read all the blocks in this stripe and now we need to 2539 * copy some of them into a target stripe for expand. 2540 */ 2541 struct dma_async_tx_descriptor *tx = NULL; 2542 clear_bit(STRIPE_EXPAND_SOURCE, &sh->state); 2543 for (i = 0; i < sh->disks; i++) 2544 if (i != sh->pd_idx && (r6s && i != r6s->qd_idx)) { 2545 int dd_idx, pd_idx, j; 2546 struct stripe_head *sh2; 2547 2548 sector_t bn = compute_blocknr(sh, i); 2549 sector_t s = raid5_compute_sector(bn, conf->raid_disks, 2550 conf->raid_disks - 2551 conf->max_degraded, &dd_idx, 2552 &pd_idx, conf); 2553 sh2 = get_active_stripe(conf, s, conf->raid_disks, 2554 pd_idx, 1); 2555 if (sh2 == NULL) 2556 /* so far only the early blocks of this stripe 2557 * have been requested. When later blocks 2558 * get requested, we will try again 2559 */ 2560 continue; 2561 if (!test_bit(STRIPE_EXPANDING, &sh2->state) || 2562 test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) { 2563 /* must have already done this block */ 2564 release_stripe(sh2); 2565 continue; 2566 } 2567 2568 /* place all the copies on one channel */ 2569 tx = async_memcpy(sh2->dev[dd_idx].page, 2570 sh->dev[i].page, 0, 0, STRIPE_SIZE, 2571 ASYNC_TX_DEP_ACK, tx, NULL, NULL); 2572 2573 set_bit(R5_Expanded, &sh2->dev[dd_idx].flags); 2574 set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags); 2575 for (j = 0; j < conf->raid_disks; j++) 2576 if (j != sh2->pd_idx && 2577 (r6s && j != r6s->qd_idx) && 2578 !test_bit(R5_Expanded, &sh2->dev[j].flags)) 2579 break; 2580 if (j == conf->raid_disks) { 2581 set_bit(STRIPE_EXPAND_READY, &sh2->state); 2582 set_bit(STRIPE_HANDLE, &sh2->state); 2583 } 2584 release_stripe(sh2); 2585 2586 /* done submitting copies, wait for them to complete */ 2587 if (i + 1 >= sh->disks) { 2588 async_tx_ack(tx); 2589 dma_wait_for_async_tx(tx); 2590 } 2591 } 2592 } 2593 2594 /* 2595 * handle_stripe - do things to a stripe. 2596 * 2597 * We lock the stripe and then examine the state of various bits 2598 * to see what needs to be done. 2599 * Possible results: 2600 * return some read request which now have data 2601 * return some write requests which are safely on disc 2602 * schedule a read on some buffers 2603 * schedule a write of some buffers 2604 * return confirmation of parity correctness 2605 * 2606 * buffers are taken off read_list or write_list, and bh_cache buffers 2607 * get BH_Lock set before the stripe lock is released. 2608 * 2609 */ 2610 2611 static void handle_stripe5(struct stripe_head *sh) 2612 { 2613 raid5_conf_t *conf = sh->raid_conf; 2614 int disks = sh->disks, i; 2615 struct bio *return_bi = NULL; 2616 struct stripe_head_state s; 2617 struct r5dev *dev; 2618 unsigned long pending = 0; 2619 2620 memset(&s, 0, sizeof(s)); 2621 pr_debug("handling stripe %llu, state=%#lx cnt=%d, pd_idx=%d " 2622 "ops=%lx:%lx:%lx\n", (unsigned long long)sh->sector, sh->state, 2623 atomic_read(&sh->count), sh->pd_idx, 2624 sh->ops.pending, sh->ops.ack, sh->ops.complete); 2625 2626 spin_lock(&sh->lock); 2627 clear_bit(STRIPE_HANDLE, &sh->state); 2628 clear_bit(STRIPE_DELAYED, &sh->state); 2629 2630 s.syncing = test_bit(STRIPE_SYNCING, &sh->state); 2631 s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state); 2632 s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state); 2633 /* Now to look around and see what can be done */ 2634 2635 rcu_read_lock(); 2636 for (i=disks; i--; ) { 2637 mdk_rdev_t *rdev; 2638 struct r5dev *dev = &sh->dev[i]; 2639 clear_bit(R5_Insync, &dev->flags); 2640 2641 pr_debug("check %d: state 0x%lx toread %p read %p write %p " 2642 "written %p\n", i, dev->flags, dev->toread, dev->read, 2643 dev->towrite, dev->written); 2644 2645 /* maybe we can request a biofill operation 2646 * 2647 * new wantfill requests are only permitted while 2648 * STRIPE_OP_BIOFILL is clear 2649 */ 2650 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread && 2651 !test_bit(STRIPE_OP_BIOFILL, &sh->ops.pending)) 2652 set_bit(R5_Wantfill, &dev->flags); 2653 2654 /* now count some things */ 2655 if (test_bit(R5_LOCKED, &dev->flags)) s.locked++; 2656 if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++; 2657 if (test_bit(R5_Wantcompute, &dev->flags)) s.compute++; 2658 2659 if (test_bit(R5_Wantfill, &dev->flags)) 2660 s.to_fill++; 2661 else if (dev->toread) 2662 s.to_read++; 2663 if (dev->towrite) { 2664 s.to_write++; 2665 if (!test_bit(R5_OVERWRITE, &dev->flags)) 2666 s.non_overwrite++; 2667 } 2668 if (dev->written) 2669 s.written++; 2670 rdev = rcu_dereference(conf->disks[i].rdev); 2671 if (!rdev || !test_bit(In_sync, &rdev->flags)) { 2672 /* The ReadError flag will just be confusing now */ 2673 clear_bit(R5_ReadError, &dev->flags); 2674 clear_bit(R5_ReWrite, &dev->flags); 2675 } 2676 if (!rdev || !test_bit(In_sync, &rdev->flags) 2677 || test_bit(R5_ReadError, &dev->flags)) { 2678 s.failed++; 2679 s.failed_num = i; 2680 } else 2681 set_bit(R5_Insync, &dev->flags); 2682 } 2683 rcu_read_unlock(); 2684 2685 if (s.to_fill && !test_and_set_bit(STRIPE_OP_BIOFILL, &sh->ops.pending)) 2686 sh->ops.count++; 2687 2688 pr_debug("locked=%d uptodate=%d to_read=%d" 2689 " to_write=%d failed=%d failed_num=%d\n", 2690 s.locked, s.uptodate, s.to_read, s.to_write, 2691 s.failed, s.failed_num); 2692 /* check if the array has lost two devices and, if so, some requests might 2693 * need to be failed 2694 */ 2695 if (s.failed > 1 && s.to_read+s.to_write+s.written) 2696 handle_requests_to_failed_array(conf, sh, &s, disks, 2697 &return_bi); 2698 if (s.failed > 1 && s.syncing) { 2699 md_done_sync(conf->mddev, STRIPE_SECTORS,0); 2700 clear_bit(STRIPE_SYNCING, &sh->state); 2701 s.syncing = 0; 2702 } 2703 2704 /* might be able to return some write requests if the parity block 2705 * is safe, or on a failed drive 2706 */ 2707 dev = &sh->dev[sh->pd_idx]; 2708 if ( s.written && 2709 ((test_bit(R5_Insync, &dev->flags) && 2710 !test_bit(R5_LOCKED, &dev->flags) && 2711 test_bit(R5_UPTODATE, &dev->flags)) || 2712 (s.failed == 1 && s.failed_num == sh->pd_idx))) 2713 handle_completed_write_requests(conf, sh, disks, &return_bi); 2714 2715 /* Now we might consider reading some blocks, either to check/generate 2716 * parity, or to satisfy requests 2717 * or to load a block that is being partially written. 2718 */ 2719 if (s.to_read || s.non_overwrite || 2720 (s.syncing && (s.uptodate + s.compute < disks)) || s.expanding || 2721 test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending)) 2722 handle_issuing_new_read_requests5(sh, &s, disks); 2723 2724 /* Now we check to see if any write operations have recently 2725 * completed 2726 */ 2727 2728 /* leave prexor set until postxor is done, allows us to distinguish 2729 * a rmw from a rcw during biodrain 2730 */ 2731 if (test_bit(STRIPE_OP_PREXOR, &sh->ops.complete) && 2732 test_bit(STRIPE_OP_POSTXOR, &sh->ops.complete)) { 2733 2734 clear_bit(STRIPE_OP_PREXOR, &sh->ops.complete); 2735 clear_bit(STRIPE_OP_PREXOR, &sh->ops.ack); 2736 clear_bit(STRIPE_OP_PREXOR, &sh->ops.pending); 2737 2738 for (i = disks; i--; ) 2739 clear_bit(R5_Wantprexor, &sh->dev[i].flags); 2740 } 2741 2742 /* if only POSTXOR is set then this is an 'expand' postxor */ 2743 if (test_bit(STRIPE_OP_BIODRAIN, &sh->ops.complete) && 2744 test_bit(STRIPE_OP_POSTXOR, &sh->ops.complete)) { 2745 2746 clear_bit(STRIPE_OP_BIODRAIN, &sh->ops.complete); 2747 clear_bit(STRIPE_OP_BIODRAIN, &sh->ops.ack); 2748 clear_bit(STRIPE_OP_BIODRAIN, &sh->ops.pending); 2749 2750 clear_bit(STRIPE_OP_POSTXOR, &sh->ops.complete); 2751 clear_bit(STRIPE_OP_POSTXOR, &sh->ops.ack); 2752 clear_bit(STRIPE_OP_POSTXOR, &sh->ops.pending); 2753 2754 /* All the 'written' buffers and the parity block are ready to 2755 * be written back to disk 2756 */ 2757 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags)); 2758 for (i = disks; i--; ) { 2759 dev = &sh->dev[i]; 2760 if (test_bit(R5_LOCKED, &dev->flags) && 2761 (i == sh->pd_idx || dev->written)) { 2762 pr_debug("Writing block %d\n", i); 2763 set_bit(R5_Wantwrite, &dev->flags); 2764 if (!test_and_set_bit( 2765 STRIPE_OP_IO, &sh->ops.pending)) 2766 sh->ops.count++; 2767 if (!test_bit(R5_Insync, &dev->flags) || 2768 (i == sh->pd_idx && s.failed == 0)) 2769 set_bit(STRIPE_INSYNC, &sh->state); 2770 } 2771 } 2772 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) { 2773 atomic_dec(&conf->preread_active_stripes); 2774 if (atomic_read(&conf->preread_active_stripes) < 2775 IO_THRESHOLD) 2776 md_wakeup_thread(conf->mddev->thread); 2777 } 2778 } 2779 2780 /* Now to consider new write requests and what else, if anything 2781 * should be read. We do not handle new writes when: 2782 * 1/ A 'write' operation (copy+xor) is already in flight. 2783 * 2/ A 'check' operation is in flight, as it may clobber the parity 2784 * block. 2785 */ 2786 if (s.to_write && !test_bit(STRIPE_OP_POSTXOR, &sh->ops.pending) && 2787 !test_bit(STRIPE_OP_CHECK, &sh->ops.pending)) 2788 handle_issuing_new_write_requests5(conf, sh, &s, disks); 2789 2790 /* maybe we need to check and possibly fix the parity for this stripe 2791 * Any reads will already have been scheduled, so we just see if enough 2792 * data is available. The parity check is held off while parity 2793 * dependent operations are in flight. 2794 */ 2795 if ((s.syncing && s.locked == 0 && 2796 !test_bit(STRIPE_OP_COMPUTE_BLK, &sh->ops.pending) && 2797 !test_bit(STRIPE_INSYNC, &sh->state)) || 2798 test_bit(STRIPE_OP_CHECK, &sh->ops.pending) || 2799 test_bit(STRIPE_OP_MOD_REPAIR_PD, &sh->ops.pending)) 2800 handle_parity_checks5(conf, sh, &s, disks); 2801 2802 if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) { 2803 md_done_sync(conf->mddev, STRIPE_SECTORS,1); 2804 clear_bit(STRIPE_SYNCING, &sh->state); 2805 } 2806 2807 /* If the failed drive is just a ReadError, then we might need to progress 2808 * the repair/check process 2809 */ 2810 if (s.failed == 1 && !conf->mddev->ro && 2811 test_bit(R5_ReadError, &sh->dev[s.failed_num].flags) 2812 && !test_bit(R5_LOCKED, &sh->dev[s.failed_num].flags) 2813 && test_bit(R5_UPTODATE, &sh->dev[s.failed_num].flags) 2814 ) { 2815 dev = &sh->dev[s.failed_num]; 2816 if (!test_bit(R5_ReWrite, &dev->flags)) { 2817 set_bit(R5_Wantwrite, &dev->flags); 2818 if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending)) 2819 sh->ops.count++; 2820 set_bit(R5_ReWrite, &dev->flags); 2821 set_bit(R5_LOCKED, &dev->flags); 2822 s.locked++; 2823 } else { 2824 /* let's read it back */ 2825 set_bit(R5_Wantread, &dev->flags); 2826 if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending)) 2827 sh->ops.count++; 2828 set_bit(R5_LOCKED, &dev->flags); 2829 s.locked++; 2830 } 2831 } 2832 2833 /* Finish postxor operations initiated by the expansion 2834 * process 2835 */ 2836 if (test_bit(STRIPE_OP_POSTXOR, &sh->ops.complete) && 2837 !test_bit(STRIPE_OP_BIODRAIN, &sh->ops.pending)) { 2838 2839 clear_bit(STRIPE_EXPANDING, &sh->state); 2840 2841 clear_bit(STRIPE_OP_POSTXOR, &sh->ops.pending); 2842 clear_bit(STRIPE_OP_POSTXOR, &sh->ops.ack); 2843 clear_bit(STRIPE_OP_POSTXOR, &sh->ops.complete); 2844 2845 for (i = conf->raid_disks; i--; ) { 2846 set_bit(R5_Wantwrite, &sh->dev[i].flags); 2847 if (!test_and_set_bit(STRIPE_OP_IO, &sh->ops.pending)) 2848 sh->ops.count++; 2849 } 2850 } 2851 2852 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) && 2853 !test_bit(STRIPE_OP_POSTXOR, &sh->ops.pending)) { 2854 /* Need to write out all blocks after computing parity */ 2855 sh->disks = conf->raid_disks; 2856 sh->pd_idx = stripe_to_pdidx(sh->sector, conf, 2857 conf->raid_disks); 2858 s.locked += handle_write_operations5(sh, 0, 1); 2859 } else if (s.expanded && 2860 !test_bit(STRIPE_OP_POSTXOR, &sh->ops.pending)) { 2861 clear_bit(STRIPE_EXPAND_READY, &sh->state); 2862 atomic_dec(&conf->reshape_stripes); 2863 wake_up(&conf->wait_for_overlap); 2864 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); 2865 } 2866 2867 if (s.expanding && s.locked == 0) 2868 handle_stripe_expansion(conf, sh, NULL); 2869 2870 if (sh->ops.count) 2871 pending = get_stripe_work(sh); 2872 2873 spin_unlock(&sh->lock); 2874 2875 if (pending) 2876 raid5_run_ops(sh, pending); 2877 2878 return_io(return_bi); 2879 2880 } 2881 2882 static void handle_stripe6(struct stripe_head *sh, struct page *tmp_page) 2883 { 2884 raid6_conf_t *conf = sh->raid_conf; 2885 int disks = sh->disks; 2886 struct bio *return_bi = NULL; 2887 int i, pd_idx = sh->pd_idx; 2888 struct stripe_head_state s; 2889 struct r6_state r6s; 2890 struct r5dev *dev, *pdev, *qdev; 2891 2892 r6s.qd_idx = raid6_next_disk(pd_idx, disks); 2893 pr_debug("handling stripe %llu, state=%#lx cnt=%d, " 2894 "pd_idx=%d, qd_idx=%d\n", 2895 (unsigned long long)sh->sector, sh->state, 2896 atomic_read(&sh->count), pd_idx, r6s.qd_idx); 2897 memset(&s, 0, sizeof(s)); 2898 2899 spin_lock(&sh->lock); 2900 clear_bit(STRIPE_HANDLE, &sh->state); 2901 clear_bit(STRIPE_DELAYED, &sh->state); 2902 2903 s.syncing = test_bit(STRIPE_SYNCING, &sh->state); 2904 s.expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state); 2905 s.expanded = test_bit(STRIPE_EXPAND_READY, &sh->state); 2906 /* Now to look around and see what can be done */ 2907 2908 rcu_read_lock(); 2909 for (i=disks; i--; ) { 2910 mdk_rdev_t *rdev; 2911 dev = &sh->dev[i]; 2912 clear_bit(R5_Insync, &dev->flags); 2913 2914 pr_debug("check %d: state 0x%lx read %p write %p written %p\n", 2915 i, dev->flags, dev->toread, dev->towrite, dev->written); 2916 /* maybe we can reply to a read */ 2917 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread) { 2918 struct bio *rbi, *rbi2; 2919 pr_debug("Return read for disc %d\n", i); 2920 spin_lock_irq(&conf->device_lock); 2921 rbi = dev->toread; 2922 dev->toread = NULL; 2923 if (test_and_clear_bit(R5_Overlap, &dev->flags)) 2924 wake_up(&conf->wait_for_overlap); 2925 spin_unlock_irq(&conf->device_lock); 2926 while (rbi && rbi->bi_sector < dev->sector + STRIPE_SECTORS) { 2927 copy_data(0, rbi, dev->page, dev->sector); 2928 rbi2 = r5_next_bio(rbi, dev->sector); 2929 spin_lock_irq(&conf->device_lock); 2930 if (--rbi->bi_phys_segments == 0) { 2931 rbi->bi_next = return_bi; 2932 return_bi = rbi; 2933 } 2934 spin_unlock_irq(&conf->device_lock); 2935 rbi = rbi2; 2936 } 2937 } 2938 2939 /* now count some things */ 2940 if (test_bit(R5_LOCKED, &dev->flags)) s.locked++; 2941 if (test_bit(R5_UPTODATE, &dev->flags)) s.uptodate++; 2942 2943 2944 if (dev->toread) 2945 s.to_read++; 2946 if (dev->towrite) { 2947 s.to_write++; 2948 if (!test_bit(R5_OVERWRITE, &dev->flags)) 2949 s.non_overwrite++; 2950 } 2951 if (dev->written) 2952 s.written++; 2953 rdev = rcu_dereference(conf->disks[i].rdev); 2954 if (!rdev || !test_bit(In_sync, &rdev->flags)) { 2955 /* The ReadError flag will just be confusing now */ 2956 clear_bit(R5_ReadError, &dev->flags); 2957 clear_bit(R5_ReWrite, &dev->flags); 2958 } 2959 if (!rdev || !test_bit(In_sync, &rdev->flags) 2960 || test_bit(R5_ReadError, &dev->flags)) { 2961 if (s.failed < 2) 2962 r6s.failed_num[s.failed] = i; 2963 s.failed++; 2964 } else 2965 set_bit(R5_Insync, &dev->flags); 2966 } 2967 rcu_read_unlock(); 2968 pr_debug("locked=%d uptodate=%d to_read=%d" 2969 " to_write=%d failed=%d failed_num=%d,%d\n", 2970 s.locked, s.uptodate, s.to_read, s.to_write, s.failed, 2971 r6s.failed_num[0], r6s.failed_num[1]); 2972 /* check if the array has lost >2 devices and, if so, some requests 2973 * might need to be failed 2974 */ 2975 if (s.failed > 2 && s.to_read+s.to_write+s.written) 2976 handle_requests_to_failed_array(conf, sh, &s, disks, 2977 &return_bi); 2978 if (s.failed > 2 && s.syncing) { 2979 md_done_sync(conf->mddev, STRIPE_SECTORS,0); 2980 clear_bit(STRIPE_SYNCING, &sh->state); 2981 s.syncing = 0; 2982 } 2983 2984 /* 2985 * might be able to return some write requests if the parity blocks 2986 * are safe, or on a failed drive 2987 */ 2988 pdev = &sh->dev[pd_idx]; 2989 r6s.p_failed = (s.failed >= 1 && r6s.failed_num[0] == pd_idx) 2990 || (s.failed >= 2 && r6s.failed_num[1] == pd_idx); 2991 qdev = &sh->dev[r6s.qd_idx]; 2992 r6s.q_failed = (s.failed >= 1 && r6s.failed_num[0] == r6s.qd_idx) 2993 || (s.failed >= 2 && r6s.failed_num[1] == r6s.qd_idx); 2994 2995 if ( s.written && 2996 ( r6s.p_failed || ((test_bit(R5_Insync, &pdev->flags) 2997 && !test_bit(R5_LOCKED, &pdev->flags) 2998 && test_bit(R5_UPTODATE, &pdev->flags)))) && 2999 ( r6s.q_failed || ((test_bit(R5_Insync, &qdev->flags) 3000 && !test_bit(R5_LOCKED, &qdev->flags) 3001 && test_bit(R5_UPTODATE, &qdev->flags))))) 3002 handle_completed_write_requests(conf, sh, disks, &return_bi); 3003 3004 /* Now we might consider reading some blocks, either to check/generate 3005 * parity, or to satisfy requests 3006 * or to load a block that is being partially written. 3007 */ 3008 if (s.to_read || s.non_overwrite || (s.to_write && s.failed) || 3009 (s.syncing && (s.uptodate < disks)) || s.expanding) 3010 handle_issuing_new_read_requests6(sh, &s, &r6s, disks); 3011 3012 /* now to consider writing and what else, if anything should be read */ 3013 if (s.to_write) 3014 handle_issuing_new_write_requests6(conf, sh, &s, &r6s, disks); 3015 3016 /* maybe we need to check and possibly fix the parity for this stripe 3017 * Any reads will already have been scheduled, so we just see if enough 3018 * data is available 3019 */ 3020 if (s.syncing && s.locked == 0 && !test_bit(STRIPE_INSYNC, &sh->state)) 3021 handle_parity_checks6(conf, sh, &s, &r6s, tmp_page, disks); 3022 3023 if (s.syncing && s.locked == 0 && test_bit(STRIPE_INSYNC, &sh->state)) { 3024 md_done_sync(conf->mddev, STRIPE_SECTORS,1); 3025 clear_bit(STRIPE_SYNCING, &sh->state); 3026 } 3027 3028 /* If the failed drives are just a ReadError, then we might need 3029 * to progress the repair/check process 3030 */ 3031 if (s.failed <= 2 && !conf->mddev->ro) 3032 for (i = 0; i < s.failed; i++) { 3033 dev = &sh->dev[r6s.failed_num[i]]; 3034 if (test_bit(R5_ReadError, &dev->flags) 3035 && !test_bit(R5_LOCKED, &dev->flags) 3036 && test_bit(R5_UPTODATE, &dev->flags) 3037 ) { 3038 if (!test_bit(R5_ReWrite, &dev->flags)) { 3039 set_bit(R5_Wantwrite, &dev->flags); 3040 set_bit(R5_ReWrite, &dev->flags); 3041 set_bit(R5_LOCKED, &dev->flags); 3042 } else { 3043 /* let's read it back */ 3044 set_bit(R5_Wantread, &dev->flags); 3045 set_bit(R5_LOCKED, &dev->flags); 3046 } 3047 } 3048 } 3049 3050 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state)) { 3051 /* Need to write out all blocks after computing P&Q */ 3052 sh->disks = conf->raid_disks; 3053 sh->pd_idx = stripe_to_pdidx(sh->sector, conf, 3054 conf->raid_disks); 3055 compute_parity6(sh, RECONSTRUCT_WRITE); 3056 for (i = conf->raid_disks ; i-- ; ) { 3057 set_bit(R5_LOCKED, &sh->dev[i].flags); 3058 s.locked++; 3059 set_bit(R5_Wantwrite, &sh->dev[i].flags); 3060 } 3061 clear_bit(STRIPE_EXPANDING, &sh->state); 3062 } else if (s.expanded) { 3063 clear_bit(STRIPE_EXPAND_READY, &sh->state); 3064 atomic_dec(&conf->reshape_stripes); 3065 wake_up(&conf->wait_for_overlap); 3066 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); 3067 } 3068 3069 if (s.expanding && s.locked == 0) 3070 handle_stripe_expansion(conf, sh, &r6s); 3071 3072 spin_unlock(&sh->lock); 3073 3074 return_io(return_bi); 3075 3076 for (i=disks; i-- ;) { 3077 int rw; 3078 struct bio *bi; 3079 mdk_rdev_t *rdev; 3080 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) 3081 rw = WRITE; 3082 else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) 3083 rw = READ; 3084 else 3085 continue; 3086 3087 bi = &sh->dev[i].req; 3088 3089 bi->bi_rw = rw; 3090 if (rw == WRITE) 3091 bi->bi_end_io = raid5_end_write_request; 3092 else 3093 bi->bi_end_io = raid5_end_read_request; 3094 3095 rcu_read_lock(); 3096 rdev = rcu_dereference(conf->disks[i].rdev); 3097 if (rdev && test_bit(Faulty, &rdev->flags)) 3098 rdev = NULL; 3099 if (rdev) 3100 atomic_inc(&rdev->nr_pending); 3101 rcu_read_unlock(); 3102 3103 if (rdev) { 3104 if (s.syncing || s.expanding || s.expanded) 3105 md_sync_acct(rdev->bdev, STRIPE_SECTORS); 3106 3107 bi->bi_bdev = rdev->bdev; 3108 pr_debug("for %llu schedule op %ld on disc %d\n", 3109 (unsigned long long)sh->sector, bi->bi_rw, i); 3110 atomic_inc(&sh->count); 3111 bi->bi_sector = sh->sector + rdev->data_offset; 3112 bi->bi_flags = 1 << BIO_UPTODATE; 3113 bi->bi_vcnt = 1; 3114 bi->bi_max_vecs = 1; 3115 bi->bi_idx = 0; 3116 bi->bi_io_vec = &sh->dev[i].vec; 3117 bi->bi_io_vec[0].bv_len = STRIPE_SIZE; 3118 bi->bi_io_vec[0].bv_offset = 0; 3119 bi->bi_size = STRIPE_SIZE; 3120 bi->bi_next = NULL; 3121 if (rw == WRITE && 3122 test_bit(R5_ReWrite, &sh->dev[i].flags)) 3123 atomic_add(STRIPE_SECTORS, &rdev->corrected_errors); 3124 generic_make_request(bi); 3125 } else { 3126 if (rw == WRITE) 3127 set_bit(STRIPE_DEGRADED, &sh->state); 3128 pr_debug("skip op %ld on disc %d for sector %llu\n", 3129 bi->bi_rw, i, (unsigned long long)sh->sector); 3130 clear_bit(R5_LOCKED, &sh->dev[i].flags); 3131 set_bit(STRIPE_HANDLE, &sh->state); 3132 } 3133 } 3134 } 3135 3136 static void handle_stripe(struct stripe_head *sh, struct page *tmp_page) 3137 { 3138 if (sh->raid_conf->level == 6) 3139 handle_stripe6(sh, tmp_page); 3140 else 3141 handle_stripe5(sh); 3142 } 3143 3144 3145 3146 static void raid5_activate_delayed(raid5_conf_t *conf) 3147 { 3148 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) { 3149 while (!list_empty(&conf->delayed_list)) { 3150 struct list_head *l = conf->delayed_list.next; 3151 struct stripe_head *sh; 3152 sh = list_entry(l, struct stripe_head, lru); 3153 list_del_init(l); 3154 clear_bit(STRIPE_DELAYED, &sh->state); 3155 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 3156 atomic_inc(&conf->preread_active_stripes); 3157 list_add_tail(&sh->lru, &conf->handle_list); 3158 } 3159 } 3160 } 3161 3162 static void activate_bit_delay(raid5_conf_t *conf) 3163 { 3164 /* device_lock is held */ 3165 struct list_head head; 3166 list_add(&head, &conf->bitmap_list); 3167 list_del_init(&conf->bitmap_list); 3168 while (!list_empty(&head)) { 3169 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru); 3170 list_del_init(&sh->lru); 3171 atomic_inc(&sh->count); 3172 __release_stripe(conf, sh); 3173 } 3174 } 3175 3176 static void unplug_slaves(mddev_t *mddev) 3177 { 3178 raid5_conf_t *conf = mddev_to_conf(mddev); 3179 int i; 3180 3181 rcu_read_lock(); 3182 for (i=0; i<mddev->raid_disks; i++) { 3183 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); 3184 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) { 3185 request_queue_t *r_queue = bdev_get_queue(rdev->bdev); 3186 3187 atomic_inc(&rdev->nr_pending); 3188 rcu_read_unlock(); 3189 3190 if (r_queue->unplug_fn) 3191 r_queue->unplug_fn(r_queue); 3192 3193 rdev_dec_pending(rdev, mddev); 3194 rcu_read_lock(); 3195 } 3196 } 3197 rcu_read_unlock(); 3198 } 3199 3200 static void raid5_unplug_device(request_queue_t *q) 3201 { 3202 mddev_t *mddev = q->queuedata; 3203 raid5_conf_t *conf = mddev_to_conf(mddev); 3204 unsigned long flags; 3205 3206 spin_lock_irqsave(&conf->device_lock, flags); 3207 3208 if (blk_remove_plug(q)) { 3209 conf->seq_flush++; 3210 raid5_activate_delayed(conf); 3211 } 3212 md_wakeup_thread(mddev->thread); 3213 3214 spin_unlock_irqrestore(&conf->device_lock, flags); 3215 3216 unplug_slaves(mddev); 3217 } 3218 3219 static int raid5_issue_flush(request_queue_t *q, struct gendisk *disk, 3220 sector_t *error_sector) 3221 { 3222 mddev_t *mddev = q->queuedata; 3223 raid5_conf_t *conf = mddev_to_conf(mddev); 3224 int i, ret = 0; 3225 3226 rcu_read_lock(); 3227 for (i=0; i<mddev->raid_disks && ret == 0; i++) { 3228 mdk_rdev_t *rdev = rcu_dereference(conf->disks[i].rdev); 3229 if (rdev && !test_bit(Faulty, &rdev->flags)) { 3230 struct block_device *bdev = rdev->bdev; 3231 request_queue_t *r_queue = bdev_get_queue(bdev); 3232 3233 if (!r_queue->issue_flush_fn) 3234 ret = -EOPNOTSUPP; 3235 else { 3236 atomic_inc(&rdev->nr_pending); 3237 rcu_read_unlock(); 3238 ret = r_queue->issue_flush_fn(r_queue, bdev->bd_disk, 3239 error_sector); 3240 rdev_dec_pending(rdev, mddev); 3241 rcu_read_lock(); 3242 } 3243 } 3244 } 3245 rcu_read_unlock(); 3246 return ret; 3247 } 3248 3249 static int raid5_congested(void *data, int bits) 3250 { 3251 mddev_t *mddev = data; 3252 raid5_conf_t *conf = mddev_to_conf(mddev); 3253 3254 /* No difference between reads and writes. Just check 3255 * how busy the stripe_cache is 3256 */ 3257 if (conf->inactive_blocked) 3258 return 1; 3259 if (conf->quiesce) 3260 return 1; 3261 if (list_empty_careful(&conf->inactive_list)) 3262 return 1; 3263 3264 return 0; 3265 } 3266 3267 /* We want read requests to align with chunks where possible, 3268 * but write requests don't need to. 3269 */ 3270 static int raid5_mergeable_bvec(request_queue_t *q, struct bio *bio, struct bio_vec *biovec) 3271 { 3272 mddev_t *mddev = q->queuedata; 3273 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); 3274 int max; 3275 unsigned int chunk_sectors = mddev->chunk_size >> 9; 3276 unsigned int bio_sectors = bio->bi_size >> 9; 3277 3278 if (bio_data_dir(bio) == WRITE) 3279 return biovec->bv_len; /* always allow writes to be mergeable */ 3280 3281 max = (chunk_sectors - ((sector & (chunk_sectors - 1)) + bio_sectors)) << 9; 3282 if (max < 0) max = 0; 3283 if (max <= biovec->bv_len && bio_sectors == 0) 3284 return biovec->bv_len; 3285 else 3286 return max; 3287 } 3288 3289 3290 static int in_chunk_boundary(mddev_t *mddev, struct bio *bio) 3291 { 3292 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); 3293 unsigned int chunk_sectors = mddev->chunk_size >> 9; 3294 unsigned int bio_sectors = bio->bi_size >> 9; 3295 3296 return chunk_sectors >= 3297 ((sector & (chunk_sectors - 1)) + bio_sectors); 3298 } 3299 3300 /* 3301 * add bio to the retry LIFO ( in O(1) ... we are in interrupt ) 3302 * later sampled by raid5d. 3303 */ 3304 static void add_bio_to_retry(struct bio *bi,raid5_conf_t *conf) 3305 { 3306 unsigned long flags; 3307 3308 spin_lock_irqsave(&conf->device_lock, flags); 3309 3310 bi->bi_next = conf->retry_read_aligned_list; 3311 conf->retry_read_aligned_list = bi; 3312 3313 spin_unlock_irqrestore(&conf->device_lock, flags); 3314 md_wakeup_thread(conf->mddev->thread); 3315 } 3316 3317 3318 static struct bio *remove_bio_from_retry(raid5_conf_t *conf) 3319 { 3320 struct bio *bi; 3321 3322 bi = conf->retry_read_aligned; 3323 if (bi) { 3324 conf->retry_read_aligned = NULL; 3325 return bi; 3326 } 3327 bi = conf->retry_read_aligned_list; 3328 if(bi) { 3329 conf->retry_read_aligned_list = bi->bi_next; 3330 bi->bi_next = NULL; 3331 bi->bi_phys_segments = 1; /* biased count of active stripes */ 3332 bi->bi_hw_segments = 0; /* count of processed stripes */ 3333 } 3334 3335 return bi; 3336 } 3337 3338 3339 /* 3340 * The "raid5_align_endio" should check if the read succeeded and if it 3341 * did, call bio_endio on the original bio (having bio_put the new bio 3342 * first). 3343 * If the read failed.. 3344 */ 3345 static int raid5_align_endio(struct bio *bi, unsigned int bytes, int error) 3346 { 3347 struct bio* raid_bi = bi->bi_private; 3348 mddev_t *mddev; 3349 raid5_conf_t *conf; 3350 int uptodate = test_bit(BIO_UPTODATE, &bi->bi_flags); 3351 mdk_rdev_t *rdev; 3352 3353 if (bi->bi_size) 3354 return 1; 3355 bio_put(bi); 3356 3357 mddev = raid_bi->bi_bdev->bd_disk->queue->queuedata; 3358 conf = mddev_to_conf(mddev); 3359 rdev = (void*)raid_bi->bi_next; 3360 raid_bi->bi_next = NULL; 3361 3362 rdev_dec_pending(rdev, conf->mddev); 3363 3364 if (!error && uptodate) { 3365 bio_endio(raid_bi, bytes, 0); 3366 if (atomic_dec_and_test(&conf->active_aligned_reads)) 3367 wake_up(&conf->wait_for_stripe); 3368 return 0; 3369 } 3370 3371 3372 pr_debug("raid5_align_endio : io error...handing IO for a retry\n"); 3373 3374 add_bio_to_retry(raid_bi, conf); 3375 return 0; 3376 } 3377 3378 static int bio_fits_rdev(struct bio *bi) 3379 { 3380 request_queue_t *q = bdev_get_queue(bi->bi_bdev); 3381 3382 if ((bi->bi_size>>9) > q->max_sectors) 3383 return 0; 3384 blk_recount_segments(q, bi); 3385 if (bi->bi_phys_segments > q->max_phys_segments || 3386 bi->bi_hw_segments > q->max_hw_segments) 3387 return 0; 3388 3389 if (q->merge_bvec_fn) 3390 /* it's too hard to apply the merge_bvec_fn at this stage, 3391 * just just give up 3392 */ 3393 return 0; 3394 3395 return 1; 3396 } 3397 3398 3399 static int chunk_aligned_read(request_queue_t *q, struct bio * raid_bio) 3400 { 3401 mddev_t *mddev = q->queuedata; 3402 raid5_conf_t *conf = mddev_to_conf(mddev); 3403 const unsigned int raid_disks = conf->raid_disks; 3404 const unsigned int data_disks = raid_disks - conf->max_degraded; 3405 unsigned int dd_idx, pd_idx; 3406 struct bio* align_bi; 3407 mdk_rdev_t *rdev; 3408 3409 if (!in_chunk_boundary(mddev, raid_bio)) { 3410 pr_debug("chunk_aligned_read : non aligned\n"); 3411 return 0; 3412 } 3413 /* 3414 * use bio_clone to make a copy of the bio 3415 */ 3416 align_bi = bio_clone(raid_bio, GFP_NOIO); 3417 if (!align_bi) 3418 return 0; 3419 /* 3420 * set bi_end_io to a new function, and set bi_private to the 3421 * original bio. 3422 */ 3423 align_bi->bi_end_io = raid5_align_endio; 3424 align_bi->bi_private = raid_bio; 3425 /* 3426 * compute position 3427 */ 3428 align_bi->bi_sector = raid5_compute_sector(raid_bio->bi_sector, 3429 raid_disks, 3430 data_disks, 3431 &dd_idx, 3432 &pd_idx, 3433 conf); 3434 3435 rcu_read_lock(); 3436 rdev = rcu_dereference(conf->disks[dd_idx].rdev); 3437 if (rdev && test_bit(In_sync, &rdev->flags)) { 3438 atomic_inc(&rdev->nr_pending); 3439 rcu_read_unlock(); 3440 raid_bio->bi_next = (void*)rdev; 3441 align_bi->bi_bdev = rdev->bdev; 3442 align_bi->bi_flags &= ~(1 << BIO_SEG_VALID); 3443 align_bi->bi_sector += rdev->data_offset; 3444 3445 if (!bio_fits_rdev(align_bi)) { 3446 /* too big in some way */ 3447 bio_put(align_bi); 3448 rdev_dec_pending(rdev, mddev); 3449 return 0; 3450 } 3451 3452 spin_lock_irq(&conf->device_lock); 3453 wait_event_lock_irq(conf->wait_for_stripe, 3454 conf->quiesce == 0, 3455 conf->device_lock, /* nothing */); 3456 atomic_inc(&conf->active_aligned_reads); 3457 spin_unlock_irq(&conf->device_lock); 3458 3459 generic_make_request(align_bi); 3460 return 1; 3461 } else { 3462 rcu_read_unlock(); 3463 bio_put(align_bi); 3464 return 0; 3465 } 3466 } 3467 3468 3469 static int make_request(request_queue_t *q, struct bio * bi) 3470 { 3471 mddev_t *mddev = q->queuedata; 3472 raid5_conf_t *conf = mddev_to_conf(mddev); 3473 unsigned int dd_idx, pd_idx; 3474 sector_t new_sector; 3475 sector_t logical_sector, last_sector; 3476 struct stripe_head *sh; 3477 const int rw = bio_data_dir(bi); 3478 int remaining; 3479 3480 if (unlikely(bio_barrier(bi))) { 3481 bio_endio(bi, bi->bi_size, -EOPNOTSUPP); 3482 return 0; 3483 } 3484 3485 md_write_start(mddev, bi); 3486 3487 disk_stat_inc(mddev->gendisk, ios[rw]); 3488 disk_stat_add(mddev->gendisk, sectors[rw], bio_sectors(bi)); 3489 3490 if (rw == READ && 3491 mddev->reshape_position == MaxSector && 3492 chunk_aligned_read(q,bi)) 3493 return 0; 3494 3495 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 3496 last_sector = bi->bi_sector + (bi->bi_size>>9); 3497 bi->bi_next = NULL; 3498 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ 3499 3500 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) { 3501 DEFINE_WAIT(w); 3502 int disks, data_disks; 3503 3504 retry: 3505 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); 3506 if (likely(conf->expand_progress == MaxSector)) 3507 disks = conf->raid_disks; 3508 else { 3509 /* spinlock is needed as expand_progress may be 3510 * 64bit on a 32bit platform, and so it might be 3511 * possible to see a half-updated value 3512 * Ofcourse expand_progress could change after 3513 * the lock is dropped, so once we get a reference 3514 * to the stripe that we think it is, we will have 3515 * to check again. 3516 */ 3517 spin_lock_irq(&conf->device_lock); 3518 disks = conf->raid_disks; 3519 if (logical_sector >= conf->expand_progress) 3520 disks = conf->previous_raid_disks; 3521 else { 3522 if (logical_sector >= conf->expand_lo) { 3523 spin_unlock_irq(&conf->device_lock); 3524 schedule(); 3525 goto retry; 3526 } 3527 } 3528 spin_unlock_irq(&conf->device_lock); 3529 } 3530 data_disks = disks - conf->max_degraded; 3531 3532 new_sector = raid5_compute_sector(logical_sector, disks, data_disks, 3533 &dd_idx, &pd_idx, conf); 3534 pr_debug("raid5: make_request, sector %llu logical %llu\n", 3535 (unsigned long long)new_sector, 3536 (unsigned long long)logical_sector); 3537 3538 sh = get_active_stripe(conf, new_sector, disks, pd_idx, (bi->bi_rw&RWA_MASK)); 3539 if (sh) { 3540 if (unlikely(conf->expand_progress != MaxSector)) { 3541 /* expansion might have moved on while waiting for a 3542 * stripe, so we must do the range check again. 3543 * Expansion could still move past after this 3544 * test, but as we are holding a reference to 3545 * 'sh', we know that if that happens, 3546 * STRIPE_EXPANDING will get set and the expansion 3547 * won't proceed until we finish with the stripe. 3548 */ 3549 int must_retry = 0; 3550 spin_lock_irq(&conf->device_lock); 3551 if (logical_sector < conf->expand_progress && 3552 disks == conf->previous_raid_disks) 3553 /* mismatch, need to try again */ 3554 must_retry = 1; 3555 spin_unlock_irq(&conf->device_lock); 3556 if (must_retry) { 3557 release_stripe(sh); 3558 goto retry; 3559 } 3560 } 3561 /* FIXME what if we get a false positive because these 3562 * are being updated. 3563 */ 3564 if (logical_sector >= mddev->suspend_lo && 3565 logical_sector < mddev->suspend_hi) { 3566 release_stripe(sh); 3567 schedule(); 3568 goto retry; 3569 } 3570 3571 if (test_bit(STRIPE_EXPANDING, &sh->state) || 3572 !add_stripe_bio(sh, bi, dd_idx, (bi->bi_rw&RW_MASK))) { 3573 /* Stripe is busy expanding or 3574 * add failed due to overlap. Flush everything 3575 * and wait a while 3576 */ 3577 raid5_unplug_device(mddev->queue); 3578 release_stripe(sh); 3579 schedule(); 3580 goto retry; 3581 } 3582 finish_wait(&conf->wait_for_overlap, &w); 3583 handle_stripe(sh, NULL); 3584 release_stripe(sh); 3585 } else { 3586 /* cannot get stripe for read-ahead, just give-up */ 3587 clear_bit(BIO_UPTODATE, &bi->bi_flags); 3588 finish_wait(&conf->wait_for_overlap, &w); 3589 break; 3590 } 3591 3592 } 3593 spin_lock_irq(&conf->device_lock); 3594 remaining = --bi->bi_phys_segments; 3595 spin_unlock_irq(&conf->device_lock); 3596 if (remaining == 0) { 3597 int bytes = bi->bi_size; 3598 3599 if ( rw == WRITE ) 3600 md_write_end(mddev); 3601 bi->bi_size = 0; 3602 bi->bi_end_io(bi, bytes, 3603 test_bit(BIO_UPTODATE, &bi->bi_flags) 3604 ? 0 : -EIO); 3605 } 3606 return 0; 3607 } 3608 3609 static sector_t reshape_request(mddev_t *mddev, sector_t sector_nr, int *skipped) 3610 { 3611 /* reshaping is quite different to recovery/resync so it is 3612 * handled quite separately ... here. 3613 * 3614 * On each call to sync_request, we gather one chunk worth of 3615 * destination stripes and flag them as expanding. 3616 * Then we find all the source stripes and request reads. 3617 * As the reads complete, handle_stripe will copy the data 3618 * into the destination stripe and release that stripe. 3619 */ 3620 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 3621 struct stripe_head *sh; 3622 int pd_idx; 3623 sector_t first_sector, last_sector; 3624 int raid_disks = conf->previous_raid_disks; 3625 int data_disks = raid_disks - conf->max_degraded; 3626 int new_data_disks = conf->raid_disks - conf->max_degraded; 3627 int i; 3628 int dd_idx; 3629 sector_t writepos, safepos, gap; 3630 3631 if (sector_nr == 0 && 3632 conf->expand_progress != 0) { 3633 /* restarting in the middle, skip the initial sectors */ 3634 sector_nr = conf->expand_progress; 3635 sector_div(sector_nr, new_data_disks); 3636 *skipped = 1; 3637 return sector_nr; 3638 } 3639 3640 /* we update the metadata when there is more than 3Meg 3641 * in the block range (that is rather arbitrary, should 3642 * probably be time based) or when the data about to be 3643 * copied would over-write the source of the data at 3644 * the front of the range. 3645 * i.e. one new_stripe forward from expand_progress new_maps 3646 * to after where expand_lo old_maps to 3647 */ 3648 writepos = conf->expand_progress + 3649 conf->chunk_size/512*(new_data_disks); 3650 sector_div(writepos, new_data_disks); 3651 safepos = conf->expand_lo; 3652 sector_div(safepos, data_disks); 3653 gap = conf->expand_progress - conf->expand_lo; 3654 3655 if (writepos >= safepos || 3656 gap > (new_data_disks)*3000*2 /*3Meg*/) { 3657 /* Cannot proceed until we've updated the superblock... */ 3658 wait_event(conf->wait_for_overlap, 3659 atomic_read(&conf->reshape_stripes)==0); 3660 mddev->reshape_position = conf->expand_progress; 3661 set_bit(MD_CHANGE_DEVS, &mddev->flags); 3662 md_wakeup_thread(mddev->thread); 3663 wait_event(mddev->sb_wait, mddev->flags == 0 || 3664 kthread_should_stop()); 3665 spin_lock_irq(&conf->device_lock); 3666 conf->expand_lo = mddev->reshape_position; 3667 spin_unlock_irq(&conf->device_lock); 3668 wake_up(&conf->wait_for_overlap); 3669 } 3670 3671 for (i=0; i < conf->chunk_size/512; i+= STRIPE_SECTORS) { 3672 int j; 3673 int skipped = 0; 3674 pd_idx = stripe_to_pdidx(sector_nr+i, conf, conf->raid_disks); 3675 sh = get_active_stripe(conf, sector_nr+i, 3676 conf->raid_disks, pd_idx, 0); 3677 set_bit(STRIPE_EXPANDING, &sh->state); 3678 atomic_inc(&conf->reshape_stripes); 3679 /* If any of this stripe is beyond the end of the old 3680 * array, then we need to zero those blocks 3681 */ 3682 for (j=sh->disks; j--;) { 3683 sector_t s; 3684 if (j == sh->pd_idx) 3685 continue; 3686 if (conf->level == 6 && 3687 j == raid6_next_disk(sh->pd_idx, sh->disks)) 3688 continue; 3689 s = compute_blocknr(sh, j); 3690 if (s < (mddev->array_size<<1)) { 3691 skipped = 1; 3692 continue; 3693 } 3694 memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE); 3695 set_bit(R5_Expanded, &sh->dev[j].flags); 3696 set_bit(R5_UPTODATE, &sh->dev[j].flags); 3697 } 3698 if (!skipped) { 3699 set_bit(STRIPE_EXPAND_READY, &sh->state); 3700 set_bit(STRIPE_HANDLE, &sh->state); 3701 } 3702 release_stripe(sh); 3703 } 3704 spin_lock_irq(&conf->device_lock); 3705 conf->expand_progress = (sector_nr + i) * new_data_disks; 3706 spin_unlock_irq(&conf->device_lock); 3707 /* Ok, those stripe are ready. We can start scheduling 3708 * reads on the source stripes. 3709 * The source stripes are determined by mapping the first and last 3710 * block on the destination stripes. 3711 */ 3712 first_sector = 3713 raid5_compute_sector(sector_nr*(new_data_disks), 3714 raid_disks, data_disks, 3715 &dd_idx, &pd_idx, conf); 3716 last_sector = 3717 raid5_compute_sector((sector_nr+conf->chunk_size/512) 3718 *(new_data_disks) -1, 3719 raid_disks, data_disks, 3720 &dd_idx, &pd_idx, conf); 3721 if (last_sector >= (mddev->size<<1)) 3722 last_sector = (mddev->size<<1)-1; 3723 while (first_sector <= last_sector) { 3724 pd_idx = stripe_to_pdidx(first_sector, conf, 3725 conf->previous_raid_disks); 3726 sh = get_active_stripe(conf, first_sector, 3727 conf->previous_raid_disks, pd_idx, 0); 3728 set_bit(STRIPE_EXPAND_SOURCE, &sh->state); 3729 set_bit(STRIPE_HANDLE, &sh->state); 3730 release_stripe(sh); 3731 first_sector += STRIPE_SECTORS; 3732 } 3733 return conf->chunk_size>>9; 3734 } 3735 3736 /* FIXME go_faster isn't used */ 3737 static inline sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, int go_faster) 3738 { 3739 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 3740 struct stripe_head *sh; 3741 int pd_idx; 3742 int raid_disks = conf->raid_disks; 3743 sector_t max_sector = mddev->size << 1; 3744 int sync_blocks; 3745 int still_degraded = 0; 3746 int i; 3747 3748 if (sector_nr >= max_sector) { 3749 /* just being told to finish up .. nothing much to do */ 3750 unplug_slaves(mddev); 3751 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { 3752 end_reshape(conf); 3753 return 0; 3754 } 3755 3756 if (mddev->curr_resync < max_sector) /* aborted */ 3757 bitmap_end_sync(mddev->bitmap, mddev->curr_resync, 3758 &sync_blocks, 1); 3759 else /* completed sync */ 3760 conf->fullsync = 0; 3761 bitmap_close_sync(mddev->bitmap); 3762 3763 return 0; 3764 } 3765 3766 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 3767 return reshape_request(mddev, sector_nr, skipped); 3768 3769 /* if there is too many failed drives and we are trying 3770 * to resync, then assert that we are finished, because there is 3771 * nothing we can do. 3772 */ 3773 if (mddev->degraded >= conf->max_degraded && 3774 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 3775 sector_t rv = (mddev->size << 1) - sector_nr; 3776 *skipped = 1; 3777 return rv; 3778 } 3779 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && 3780 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && 3781 !conf->fullsync && sync_blocks >= STRIPE_SECTORS) { 3782 /* we can skip this block, and probably more */ 3783 sync_blocks /= STRIPE_SECTORS; 3784 *skipped = 1; 3785 return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */ 3786 } 3787 3788 pd_idx = stripe_to_pdidx(sector_nr, conf, raid_disks); 3789 sh = get_active_stripe(conf, sector_nr, raid_disks, pd_idx, 1); 3790 if (sh == NULL) { 3791 sh = get_active_stripe(conf, sector_nr, raid_disks, pd_idx, 0); 3792 /* make sure we don't swamp the stripe cache if someone else 3793 * is trying to get access 3794 */ 3795 schedule_timeout_uninterruptible(1); 3796 } 3797 /* Need to check if array will still be degraded after recovery/resync 3798 * We don't need to check the 'failed' flag as when that gets set, 3799 * recovery aborts. 3800 */ 3801 for (i=0; i<mddev->raid_disks; i++) 3802 if (conf->disks[i].rdev == NULL) 3803 still_degraded = 1; 3804 3805 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded); 3806 3807 spin_lock(&sh->lock); 3808 set_bit(STRIPE_SYNCING, &sh->state); 3809 clear_bit(STRIPE_INSYNC, &sh->state); 3810 spin_unlock(&sh->lock); 3811 3812 handle_stripe(sh, NULL); 3813 release_stripe(sh); 3814 3815 return STRIPE_SECTORS; 3816 } 3817 3818 static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio) 3819 { 3820 /* We may not be able to submit a whole bio at once as there 3821 * may not be enough stripe_heads available. 3822 * We cannot pre-allocate enough stripe_heads as we may need 3823 * more than exist in the cache (if we allow ever large chunks). 3824 * So we do one stripe head at a time and record in 3825 * ->bi_hw_segments how many have been done. 3826 * 3827 * We *know* that this entire raid_bio is in one chunk, so 3828 * it will be only one 'dd_idx' and only need one call to raid5_compute_sector. 3829 */ 3830 struct stripe_head *sh; 3831 int dd_idx, pd_idx; 3832 sector_t sector, logical_sector, last_sector; 3833 int scnt = 0; 3834 int remaining; 3835 int handled = 0; 3836 3837 logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 3838 sector = raid5_compute_sector( logical_sector, 3839 conf->raid_disks, 3840 conf->raid_disks - conf->max_degraded, 3841 &dd_idx, 3842 &pd_idx, 3843 conf); 3844 last_sector = raid_bio->bi_sector + (raid_bio->bi_size>>9); 3845 3846 for (; logical_sector < last_sector; 3847 logical_sector += STRIPE_SECTORS, 3848 sector += STRIPE_SECTORS, 3849 scnt++) { 3850 3851 if (scnt < raid_bio->bi_hw_segments) 3852 /* already done this stripe */ 3853 continue; 3854 3855 sh = get_active_stripe(conf, sector, conf->raid_disks, pd_idx, 1); 3856 3857 if (!sh) { 3858 /* failed to get a stripe - must wait */ 3859 raid_bio->bi_hw_segments = scnt; 3860 conf->retry_read_aligned = raid_bio; 3861 return handled; 3862 } 3863 3864 set_bit(R5_ReadError, &sh->dev[dd_idx].flags); 3865 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) { 3866 release_stripe(sh); 3867 raid_bio->bi_hw_segments = scnt; 3868 conf->retry_read_aligned = raid_bio; 3869 return handled; 3870 } 3871 3872 handle_stripe(sh, NULL); 3873 release_stripe(sh); 3874 handled++; 3875 } 3876 spin_lock_irq(&conf->device_lock); 3877 remaining = --raid_bio->bi_phys_segments; 3878 spin_unlock_irq(&conf->device_lock); 3879 if (remaining == 0) { 3880 int bytes = raid_bio->bi_size; 3881 3882 raid_bio->bi_size = 0; 3883 raid_bio->bi_end_io(raid_bio, bytes, 3884 test_bit(BIO_UPTODATE, &raid_bio->bi_flags) 3885 ? 0 : -EIO); 3886 } 3887 if (atomic_dec_and_test(&conf->active_aligned_reads)) 3888 wake_up(&conf->wait_for_stripe); 3889 return handled; 3890 } 3891 3892 3893 3894 /* 3895 * This is our raid5 kernel thread. 3896 * 3897 * We scan the hash table for stripes which can be handled now. 3898 * During the scan, completed stripes are saved for us by the interrupt 3899 * handler, so that they will not have to wait for our next wakeup. 3900 */ 3901 static void raid5d (mddev_t *mddev) 3902 { 3903 struct stripe_head *sh; 3904 raid5_conf_t *conf = mddev_to_conf(mddev); 3905 int handled; 3906 3907 pr_debug("+++ raid5d active\n"); 3908 3909 md_check_recovery(mddev); 3910 3911 handled = 0; 3912 spin_lock_irq(&conf->device_lock); 3913 while (1) { 3914 struct list_head *first; 3915 struct bio *bio; 3916 3917 if (conf->seq_flush != conf->seq_write) { 3918 int seq = conf->seq_flush; 3919 spin_unlock_irq(&conf->device_lock); 3920 bitmap_unplug(mddev->bitmap); 3921 spin_lock_irq(&conf->device_lock); 3922 conf->seq_write = seq; 3923 activate_bit_delay(conf); 3924 } 3925 3926 if (list_empty(&conf->handle_list) && 3927 atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD && 3928 !blk_queue_plugged(mddev->queue) && 3929 !list_empty(&conf->delayed_list)) 3930 raid5_activate_delayed(conf); 3931 3932 while ((bio = remove_bio_from_retry(conf))) { 3933 int ok; 3934 spin_unlock_irq(&conf->device_lock); 3935 ok = retry_aligned_read(conf, bio); 3936 spin_lock_irq(&conf->device_lock); 3937 if (!ok) 3938 break; 3939 handled++; 3940 } 3941 3942 if (list_empty(&conf->handle_list)) { 3943 async_tx_issue_pending_all(); 3944 break; 3945 } 3946 3947 first = conf->handle_list.next; 3948 sh = list_entry(first, struct stripe_head, lru); 3949 3950 list_del_init(first); 3951 atomic_inc(&sh->count); 3952 BUG_ON(atomic_read(&sh->count)!= 1); 3953 spin_unlock_irq(&conf->device_lock); 3954 3955 handled++; 3956 handle_stripe(sh, conf->spare_page); 3957 release_stripe(sh); 3958 3959 spin_lock_irq(&conf->device_lock); 3960 } 3961 pr_debug("%d stripes handled\n", handled); 3962 3963 spin_unlock_irq(&conf->device_lock); 3964 3965 unplug_slaves(mddev); 3966 3967 pr_debug("--- raid5d inactive\n"); 3968 } 3969 3970 static ssize_t 3971 raid5_show_stripe_cache_size(mddev_t *mddev, char *page) 3972 { 3973 raid5_conf_t *conf = mddev_to_conf(mddev); 3974 if (conf) 3975 return sprintf(page, "%d\n", conf->max_nr_stripes); 3976 else 3977 return 0; 3978 } 3979 3980 static ssize_t 3981 raid5_store_stripe_cache_size(mddev_t *mddev, const char *page, size_t len) 3982 { 3983 raid5_conf_t *conf = mddev_to_conf(mddev); 3984 char *end; 3985 int new; 3986 if (len >= PAGE_SIZE) 3987 return -EINVAL; 3988 if (!conf) 3989 return -ENODEV; 3990 3991 new = simple_strtoul(page, &end, 10); 3992 if (!*page || (*end && *end != '\n') ) 3993 return -EINVAL; 3994 if (new <= 16 || new > 32768) 3995 return -EINVAL; 3996 while (new < conf->max_nr_stripes) { 3997 if (drop_one_stripe(conf)) 3998 conf->max_nr_stripes--; 3999 else 4000 break; 4001 } 4002 md_allow_write(mddev); 4003 while (new > conf->max_nr_stripes) { 4004 if (grow_one_stripe(conf)) 4005 conf->max_nr_stripes++; 4006 else break; 4007 } 4008 return len; 4009 } 4010 4011 static struct md_sysfs_entry 4012 raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR, 4013 raid5_show_stripe_cache_size, 4014 raid5_store_stripe_cache_size); 4015 4016 static ssize_t 4017 stripe_cache_active_show(mddev_t *mddev, char *page) 4018 { 4019 raid5_conf_t *conf = mddev_to_conf(mddev); 4020 if (conf) 4021 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes)); 4022 else 4023 return 0; 4024 } 4025 4026 static struct md_sysfs_entry 4027 raid5_stripecache_active = __ATTR_RO(stripe_cache_active); 4028 4029 static struct attribute *raid5_attrs[] = { 4030 &raid5_stripecache_size.attr, 4031 &raid5_stripecache_active.attr, 4032 NULL, 4033 }; 4034 static struct attribute_group raid5_attrs_group = { 4035 .name = NULL, 4036 .attrs = raid5_attrs, 4037 }; 4038 4039 static int run(mddev_t *mddev) 4040 { 4041 raid5_conf_t *conf; 4042 int raid_disk, memory; 4043 mdk_rdev_t *rdev; 4044 struct disk_info *disk; 4045 struct list_head *tmp; 4046 int working_disks = 0; 4047 4048 if (mddev->level != 5 && mddev->level != 4 && mddev->level != 6) { 4049 printk(KERN_ERR "raid5: %s: raid level not set to 4/5/6 (%d)\n", 4050 mdname(mddev), mddev->level); 4051 return -EIO; 4052 } 4053 4054 if (mddev->reshape_position != MaxSector) { 4055 /* Check that we can continue the reshape. 4056 * Currently only disks can change, it must 4057 * increase, and we must be past the point where 4058 * a stripe over-writes itself 4059 */ 4060 sector_t here_new, here_old; 4061 int old_disks; 4062 int max_degraded = (mddev->level == 5 ? 1 : 2); 4063 4064 if (mddev->new_level != mddev->level || 4065 mddev->new_layout != mddev->layout || 4066 mddev->new_chunk != mddev->chunk_size) { 4067 printk(KERN_ERR "raid5: %s: unsupported reshape " 4068 "required - aborting.\n", 4069 mdname(mddev)); 4070 return -EINVAL; 4071 } 4072 if (mddev->delta_disks <= 0) { 4073 printk(KERN_ERR "raid5: %s: unsupported reshape " 4074 "(reduce disks) required - aborting.\n", 4075 mdname(mddev)); 4076 return -EINVAL; 4077 } 4078 old_disks = mddev->raid_disks - mddev->delta_disks; 4079 /* reshape_position must be on a new-stripe boundary, and one 4080 * further up in new geometry must map after here in old 4081 * geometry. 4082 */ 4083 here_new = mddev->reshape_position; 4084 if (sector_div(here_new, (mddev->chunk_size>>9)* 4085 (mddev->raid_disks - max_degraded))) { 4086 printk(KERN_ERR "raid5: reshape_position not " 4087 "on a stripe boundary\n"); 4088 return -EINVAL; 4089 } 4090 /* here_new is the stripe we will write to */ 4091 here_old = mddev->reshape_position; 4092 sector_div(here_old, (mddev->chunk_size>>9)* 4093 (old_disks-max_degraded)); 4094 /* here_old is the first stripe that we might need to read 4095 * from */ 4096 if (here_new >= here_old) { 4097 /* Reading from the same stripe as writing to - bad */ 4098 printk(KERN_ERR "raid5: reshape_position too early for " 4099 "auto-recovery - aborting.\n"); 4100 return -EINVAL; 4101 } 4102 printk(KERN_INFO "raid5: reshape will continue\n"); 4103 /* OK, we should be able to continue; */ 4104 } 4105 4106 4107 mddev->private = kzalloc(sizeof (raid5_conf_t), GFP_KERNEL); 4108 if ((conf = mddev->private) == NULL) 4109 goto abort; 4110 if (mddev->reshape_position == MaxSector) { 4111 conf->previous_raid_disks = conf->raid_disks = mddev->raid_disks; 4112 } else { 4113 conf->raid_disks = mddev->raid_disks; 4114 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks; 4115 } 4116 4117 conf->disks = kzalloc(conf->raid_disks * sizeof(struct disk_info), 4118 GFP_KERNEL); 4119 if (!conf->disks) 4120 goto abort; 4121 4122 conf->mddev = mddev; 4123 4124 if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) 4125 goto abort; 4126 4127 if (mddev->level == 6) { 4128 conf->spare_page = alloc_page(GFP_KERNEL); 4129 if (!conf->spare_page) 4130 goto abort; 4131 } 4132 spin_lock_init(&conf->device_lock); 4133 init_waitqueue_head(&conf->wait_for_stripe); 4134 init_waitqueue_head(&conf->wait_for_overlap); 4135 INIT_LIST_HEAD(&conf->handle_list); 4136 INIT_LIST_HEAD(&conf->delayed_list); 4137 INIT_LIST_HEAD(&conf->bitmap_list); 4138 INIT_LIST_HEAD(&conf->inactive_list); 4139 atomic_set(&conf->active_stripes, 0); 4140 atomic_set(&conf->preread_active_stripes, 0); 4141 atomic_set(&conf->active_aligned_reads, 0); 4142 4143 pr_debug("raid5: run(%s) called.\n", mdname(mddev)); 4144 4145 ITERATE_RDEV(mddev,rdev,tmp) { 4146 raid_disk = rdev->raid_disk; 4147 if (raid_disk >= conf->raid_disks 4148 || raid_disk < 0) 4149 continue; 4150 disk = conf->disks + raid_disk; 4151 4152 disk->rdev = rdev; 4153 4154 if (test_bit(In_sync, &rdev->flags)) { 4155 char b[BDEVNAME_SIZE]; 4156 printk(KERN_INFO "raid5: device %s operational as raid" 4157 " disk %d\n", bdevname(rdev->bdev,b), 4158 raid_disk); 4159 working_disks++; 4160 } 4161 } 4162 4163 /* 4164 * 0 for a fully functional array, 1 or 2 for a degraded array. 4165 */ 4166 mddev->degraded = conf->raid_disks - working_disks; 4167 conf->mddev = mddev; 4168 conf->chunk_size = mddev->chunk_size; 4169 conf->level = mddev->level; 4170 if (conf->level == 6) 4171 conf->max_degraded = 2; 4172 else 4173 conf->max_degraded = 1; 4174 conf->algorithm = mddev->layout; 4175 conf->max_nr_stripes = NR_STRIPES; 4176 conf->expand_progress = mddev->reshape_position; 4177 4178 /* device size must be a multiple of chunk size */ 4179 mddev->size &= ~(mddev->chunk_size/1024 -1); 4180 mddev->resync_max_sectors = mddev->size << 1; 4181 4182 if (conf->level == 6 && conf->raid_disks < 4) { 4183 printk(KERN_ERR "raid6: not enough configured devices for %s (%d, minimum 4)\n", 4184 mdname(mddev), conf->raid_disks); 4185 goto abort; 4186 } 4187 if (!conf->chunk_size || conf->chunk_size % 4) { 4188 printk(KERN_ERR "raid5: invalid chunk size %d for %s\n", 4189 conf->chunk_size, mdname(mddev)); 4190 goto abort; 4191 } 4192 if (conf->algorithm > ALGORITHM_RIGHT_SYMMETRIC) { 4193 printk(KERN_ERR 4194 "raid5: unsupported parity algorithm %d for %s\n", 4195 conf->algorithm, mdname(mddev)); 4196 goto abort; 4197 } 4198 if (mddev->degraded > conf->max_degraded) { 4199 printk(KERN_ERR "raid5: not enough operational devices for %s" 4200 " (%d/%d failed)\n", 4201 mdname(mddev), mddev->degraded, conf->raid_disks); 4202 goto abort; 4203 } 4204 4205 if (mddev->degraded > 0 && 4206 mddev->recovery_cp != MaxSector) { 4207 if (mddev->ok_start_degraded) 4208 printk(KERN_WARNING 4209 "raid5: starting dirty degraded array: %s" 4210 "- data corruption possible.\n", 4211 mdname(mddev)); 4212 else { 4213 printk(KERN_ERR 4214 "raid5: cannot start dirty degraded array for %s\n", 4215 mdname(mddev)); 4216 goto abort; 4217 } 4218 } 4219 4220 { 4221 mddev->thread = md_register_thread(raid5d, mddev, "%s_raid5"); 4222 if (!mddev->thread) { 4223 printk(KERN_ERR 4224 "raid5: couldn't allocate thread for %s\n", 4225 mdname(mddev)); 4226 goto abort; 4227 } 4228 } 4229 memory = conf->max_nr_stripes * (sizeof(struct stripe_head) + 4230 conf->raid_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024; 4231 if (grow_stripes(conf, conf->max_nr_stripes)) { 4232 printk(KERN_ERR 4233 "raid5: couldn't allocate %dkB for buffers\n", memory); 4234 shrink_stripes(conf); 4235 md_unregister_thread(mddev->thread); 4236 goto abort; 4237 } else 4238 printk(KERN_INFO "raid5: allocated %dkB for %s\n", 4239 memory, mdname(mddev)); 4240 4241 if (mddev->degraded == 0) 4242 printk("raid5: raid level %d set %s active with %d out of %d" 4243 " devices, algorithm %d\n", conf->level, mdname(mddev), 4244 mddev->raid_disks-mddev->degraded, mddev->raid_disks, 4245 conf->algorithm); 4246 else 4247 printk(KERN_ALERT "raid5: raid level %d set %s active with %d" 4248 " out of %d devices, algorithm %d\n", conf->level, 4249 mdname(mddev), mddev->raid_disks - mddev->degraded, 4250 mddev->raid_disks, conf->algorithm); 4251 4252 print_raid5_conf(conf); 4253 4254 if (conf->expand_progress != MaxSector) { 4255 printk("...ok start reshape thread\n"); 4256 conf->expand_lo = conf->expand_progress; 4257 atomic_set(&conf->reshape_stripes, 0); 4258 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 4259 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 4260 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 4261 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 4262 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 4263 "%s_reshape"); 4264 } 4265 4266 /* read-ahead size must cover two whole stripes, which is 4267 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices 4268 */ 4269 { 4270 int data_disks = conf->previous_raid_disks - conf->max_degraded; 4271 int stripe = data_disks * 4272 (mddev->chunk_size / PAGE_SIZE); 4273 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 4274 mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 4275 } 4276 4277 /* Ok, everything is just fine now */ 4278 if (sysfs_create_group(&mddev->kobj, &raid5_attrs_group)) 4279 printk(KERN_WARNING 4280 "raid5: failed to create sysfs attributes for %s\n", 4281 mdname(mddev)); 4282 4283 mddev->queue->unplug_fn = raid5_unplug_device; 4284 mddev->queue->issue_flush_fn = raid5_issue_flush; 4285 mddev->queue->backing_dev_info.congested_data = mddev; 4286 mddev->queue->backing_dev_info.congested_fn = raid5_congested; 4287 4288 mddev->array_size = mddev->size * (conf->previous_raid_disks - 4289 conf->max_degraded); 4290 4291 blk_queue_merge_bvec(mddev->queue, raid5_mergeable_bvec); 4292 4293 return 0; 4294 abort: 4295 if (conf) { 4296 print_raid5_conf(conf); 4297 safe_put_page(conf->spare_page); 4298 kfree(conf->disks); 4299 kfree(conf->stripe_hashtbl); 4300 kfree(conf); 4301 } 4302 mddev->private = NULL; 4303 printk(KERN_ALERT "raid5: failed to run raid set %s\n", mdname(mddev)); 4304 return -EIO; 4305 } 4306 4307 4308 4309 static int stop(mddev_t *mddev) 4310 { 4311 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 4312 4313 md_unregister_thread(mddev->thread); 4314 mddev->thread = NULL; 4315 shrink_stripes(conf); 4316 kfree(conf->stripe_hashtbl); 4317 mddev->queue->backing_dev_info.congested_fn = NULL; 4318 blk_sync_queue(mddev->queue); /* the unplug fn references 'conf'*/ 4319 sysfs_remove_group(&mddev->kobj, &raid5_attrs_group); 4320 kfree(conf->disks); 4321 kfree(conf); 4322 mddev->private = NULL; 4323 return 0; 4324 } 4325 4326 #ifdef DEBUG 4327 static void print_sh (struct seq_file *seq, struct stripe_head *sh) 4328 { 4329 int i; 4330 4331 seq_printf(seq, "sh %llu, pd_idx %d, state %ld.\n", 4332 (unsigned long long)sh->sector, sh->pd_idx, sh->state); 4333 seq_printf(seq, "sh %llu, count %d.\n", 4334 (unsigned long long)sh->sector, atomic_read(&sh->count)); 4335 seq_printf(seq, "sh %llu, ", (unsigned long long)sh->sector); 4336 for (i = 0; i < sh->disks; i++) { 4337 seq_printf(seq, "(cache%d: %p %ld) ", 4338 i, sh->dev[i].page, sh->dev[i].flags); 4339 } 4340 seq_printf(seq, "\n"); 4341 } 4342 4343 static void printall (struct seq_file *seq, raid5_conf_t *conf) 4344 { 4345 struct stripe_head *sh; 4346 struct hlist_node *hn; 4347 int i; 4348 4349 spin_lock_irq(&conf->device_lock); 4350 for (i = 0; i < NR_HASH; i++) { 4351 hlist_for_each_entry(sh, hn, &conf->stripe_hashtbl[i], hash) { 4352 if (sh->raid_conf != conf) 4353 continue; 4354 print_sh(seq, sh); 4355 } 4356 } 4357 spin_unlock_irq(&conf->device_lock); 4358 } 4359 #endif 4360 4361 static void status (struct seq_file *seq, mddev_t *mddev) 4362 { 4363 raid5_conf_t *conf = (raid5_conf_t *) mddev->private; 4364 int i; 4365 4366 seq_printf (seq, " level %d, %dk chunk, algorithm %d", mddev->level, mddev->chunk_size >> 10, mddev->layout); 4367 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded); 4368 for (i = 0; i < conf->raid_disks; i++) 4369 seq_printf (seq, "%s", 4370 conf->disks[i].rdev && 4371 test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_"); 4372 seq_printf (seq, "]"); 4373 #ifdef DEBUG 4374 seq_printf (seq, "\n"); 4375 printall(seq, conf); 4376 #endif 4377 } 4378 4379 static void print_raid5_conf (raid5_conf_t *conf) 4380 { 4381 int i; 4382 struct disk_info *tmp; 4383 4384 printk("RAID5 conf printout:\n"); 4385 if (!conf) { 4386 printk("(conf==NULL)\n"); 4387 return; 4388 } 4389 printk(" --- rd:%d wd:%d\n", conf->raid_disks, 4390 conf->raid_disks - conf->mddev->degraded); 4391 4392 for (i = 0; i < conf->raid_disks; i++) { 4393 char b[BDEVNAME_SIZE]; 4394 tmp = conf->disks + i; 4395 if (tmp->rdev) 4396 printk(" disk %d, o:%d, dev:%s\n", 4397 i, !test_bit(Faulty, &tmp->rdev->flags), 4398 bdevname(tmp->rdev->bdev,b)); 4399 } 4400 } 4401 4402 static int raid5_spare_active(mddev_t *mddev) 4403 { 4404 int i; 4405 raid5_conf_t *conf = mddev->private; 4406 struct disk_info *tmp; 4407 4408 for (i = 0; i < conf->raid_disks; i++) { 4409 tmp = conf->disks + i; 4410 if (tmp->rdev 4411 && !test_bit(Faulty, &tmp->rdev->flags) 4412 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) { 4413 unsigned long flags; 4414 spin_lock_irqsave(&conf->device_lock, flags); 4415 mddev->degraded--; 4416 spin_unlock_irqrestore(&conf->device_lock, flags); 4417 } 4418 } 4419 print_raid5_conf(conf); 4420 return 0; 4421 } 4422 4423 static int raid5_remove_disk(mddev_t *mddev, int number) 4424 { 4425 raid5_conf_t *conf = mddev->private; 4426 int err = 0; 4427 mdk_rdev_t *rdev; 4428 struct disk_info *p = conf->disks + number; 4429 4430 print_raid5_conf(conf); 4431 rdev = p->rdev; 4432 if (rdev) { 4433 if (test_bit(In_sync, &rdev->flags) || 4434 atomic_read(&rdev->nr_pending)) { 4435 err = -EBUSY; 4436 goto abort; 4437 } 4438 p->rdev = NULL; 4439 synchronize_rcu(); 4440 if (atomic_read(&rdev->nr_pending)) { 4441 /* lost the race, try later */ 4442 err = -EBUSY; 4443 p->rdev = rdev; 4444 } 4445 } 4446 abort: 4447 4448 print_raid5_conf(conf); 4449 return err; 4450 } 4451 4452 static int raid5_add_disk(mddev_t *mddev, mdk_rdev_t *rdev) 4453 { 4454 raid5_conf_t *conf = mddev->private; 4455 int found = 0; 4456 int disk; 4457 struct disk_info *p; 4458 4459 if (mddev->degraded > conf->max_degraded) 4460 /* no point adding a device */ 4461 return 0; 4462 4463 /* 4464 * find the disk ... but prefer rdev->saved_raid_disk 4465 * if possible. 4466 */ 4467 if (rdev->saved_raid_disk >= 0 && 4468 conf->disks[rdev->saved_raid_disk].rdev == NULL) 4469 disk = rdev->saved_raid_disk; 4470 else 4471 disk = 0; 4472 for ( ; disk < conf->raid_disks; disk++) 4473 if ((p=conf->disks + disk)->rdev == NULL) { 4474 clear_bit(In_sync, &rdev->flags); 4475 rdev->raid_disk = disk; 4476 found = 1; 4477 if (rdev->saved_raid_disk != disk) 4478 conf->fullsync = 1; 4479 rcu_assign_pointer(p->rdev, rdev); 4480 break; 4481 } 4482 print_raid5_conf(conf); 4483 return found; 4484 } 4485 4486 static int raid5_resize(mddev_t *mddev, sector_t sectors) 4487 { 4488 /* no resync is happening, and there is enough space 4489 * on all devices, so we can resize. 4490 * We need to make sure resync covers any new space. 4491 * If the array is shrinking we should possibly wait until 4492 * any io in the removed space completes, but it hardly seems 4493 * worth it. 4494 */ 4495 raid5_conf_t *conf = mddev_to_conf(mddev); 4496 4497 sectors &= ~((sector_t)mddev->chunk_size/512 - 1); 4498 mddev->array_size = (sectors * (mddev->raid_disks-conf->max_degraded))>>1; 4499 set_capacity(mddev->gendisk, mddev->array_size << 1); 4500 mddev->changed = 1; 4501 if (sectors/2 > mddev->size && mddev->recovery_cp == MaxSector) { 4502 mddev->recovery_cp = mddev->size << 1; 4503 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 4504 } 4505 mddev->size = sectors /2; 4506 mddev->resync_max_sectors = sectors; 4507 return 0; 4508 } 4509 4510 #ifdef CONFIG_MD_RAID5_RESHAPE 4511 static int raid5_check_reshape(mddev_t *mddev) 4512 { 4513 raid5_conf_t *conf = mddev_to_conf(mddev); 4514 int err; 4515 4516 if (mddev->delta_disks < 0 || 4517 mddev->new_level != mddev->level) 4518 return -EINVAL; /* Cannot shrink array or change level yet */ 4519 if (mddev->delta_disks == 0) 4520 return 0; /* nothing to do */ 4521 4522 /* Can only proceed if there are plenty of stripe_heads. 4523 * We need a minimum of one full stripe,, and for sensible progress 4524 * it is best to have about 4 times that. 4525 * If we require 4 times, then the default 256 4K stripe_heads will 4526 * allow for chunk sizes up to 256K, which is probably OK. 4527 * If the chunk size is greater, user-space should request more 4528 * stripe_heads first. 4529 */ 4530 if ((mddev->chunk_size / STRIPE_SIZE) * 4 > conf->max_nr_stripes || 4531 (mddev->new_chunk / STRIPE_SIZE) * 4 > conf->max_nr_stripes) { 4532 printk(KERN_WARNING "raid5: reshape: not enough stripes. Needed %lu\n", 4533 (mddev->chunk_size / STRIPE_SIZE)*4); 4534 return -ENOSPC; 4535 } 4536 4537 err = resize_stripes(conf, conf->raid_disks + mddev->delta_disks); 4538 if (err) 4539 return err; 4540 4541 if (mddev->degraded > conf->max_degraded) 4542 return -EINVAL; 4543 /* looks like we might be able to manage this */ 4544 return 0; 4545 } 4546 4547 static int raid5_start_reshape(mddev_t *mddev) 4548 { 4549 raid5_conf_t *conf = mddev_to_conf(mddev); 4550 mdk_rdev_t *rdev; 4551 struct list_head *rtmp; 4552 int spares = 0; 4553 int added_devices = 0; 4554 unsigned long flags; 4555 4556 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 4557 return -EBUSY; 4558 4559 ITERATE_RDEV(mddev, rdev, rtmp) 4560 if (rdev->raid_disk < 0 && 4561 !test_bit(Faulty, &rdev->flags)) 4562 spares++; 4563 4564 if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded) 4565 /* Not enough devices even to make a degraded array 4566 * of that size 4567 */ 4568 return -EINVAL; 4569 4570 atomic_set(&conf->reshape_stripes, 0); 4571 spin_lock_irq(&conf->device_lock); 4572 conf->previous_raid_disks = conf->raid_disks; 4573 conf->raid_disks += mddev->delta_disks; 4574 conf->expand_progress = 0; 4575 conf->expand_lo = 0; 4576 spin_unlock_irq(&conf->device_lock); 4577 4578 /* Add some new drives, as many as will fit. 4579 * We know there are enough to make the newly sized array work. 4580 */ 4581 ITERATE_RDEV(mddev, rdev, rtmp) 4582 if (rdev->raid_disk < 0 && 4583 !test_bit(Faulty, &rdev->flags)) { 4584 if (raid5_add_disk(mddev, rdev)) { 4585 char nm[20]; 4586 set_bit(In_sync, &rdev->flags); 4587 added_devices++; 4588 rdev->recovery_offset = 0; 4589 sprintf(nm, "rd%d", rdev->raid_disk); 4590 if (sysfs_create_link(&mddev->kobj, 4591 &rdev->kobj, nm)) 4592 printk(KERN_WARNING 4593 "raid5: failed to create " 4594 " link %s for %s\n", 4595 nm, mdname(mddev)); 4596 } else 4597 break; 4598 } 4599 4600 spin_lock_irqsave(&conf->device_lock, flags); 4601 mddev->degraded = (conf->raid_disks - conf->previous_raid_disks) - added_devices; 4602 spin_unlock_irqrestore(&conf->device_lock, flags); 4603 mddev->raid_disks = conf->raid_disks; 4604 mddev->reshape_position = 0; 4605 set_bit(MD_CHANGE_DEVS, &mddev->flags); 4606 4607 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 4608 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 4609 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 4610 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 4611 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 4612 "%s_reshape"); 4613 if (!mddev->sync_thread) { 4614 mddev->recovery = 0; 4615 spin_lock_irq(&conf->device_lock); 4616 mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks; 4617 conf->expand_progress = MaxSector; 4618 spin_unlock_irq(&conf->device_lock); 4619 return -EAGAIN; 4620 } 4621 md_wakeup_thread(mddev->sync_thread); 4622 md_new_event(mddev); 4623 return 0; 4624 } 4625 #endif 4626 4627 static void end_reshape(raid5_conf_t *conf) 4628 { 4629 struct block_device *bdev; 4630 4631 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { 4632 conf->mddev->array_size = conf->mddev->size * 4633 (conf->raid_disks - conf->max_degraded); 4634 set_capacity(conf->mddev->gendisk, conf->mddev->array_size << 1); 4635 conf->mddev->changed = 1; 4636 4637 bdev = bdget_disk(conf->mddev->gendisk, 0); 4638 if (bdev) { 4639 mutex_lock(&bdev->bd_inode->i_mutex); 4640 i_size_write(bdev->bd_inode, (loff_t)conf->mddev->array_size << 10); 4641 mutex_unlock(&bdev->bd_inode->i_mutex); 4642 bdput(bdev); 4643 } 4644 spin_lock_irq(&conf->device_lock); 4645 conf->expand_progress = MaxSector; 4646 spin_unlock_irq(&conf->device_lock); 4647 conf->mddev->reshape_position = MaxSector; 4648 4649 /* read-ahead size must cover two whole stripes, which is 4650 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices 4651 */ 4652 { 4653 int data_disks = conf->previous_raid_disks - conf->max_degraded; 4654 int stripe = data_disks * 4655 (conf->mddev->chunk_size / PAGE_SIZE); 4656 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 4657 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 4658 } 4659 } 4660 } 4661 4662 static void raid5_quiesce(mddev_t *mddev, int state) 4663 { 4664 raid5_conf_t *conf = mddev_to_conf(mddev); 4665 4666 switch(state) { 4667 case 2: /* resume for a suspend */ 4668 wake_up(&conf->wait_for_overlap); 4669 break; 4670 4671 case 1: /* stop all writes */ 4672 spin_lock_irq(&conf->device_lock); 4673 conf->quiesce = 1; 4674 wait_event_lock_irq(conf->wait_for_stripe, 4675 atomic_read(&conf->active_stripes) == 0 && 4676 atomic_read(&conf->active_aligned_reads) == 0, 4677 conf->device_lock, /* nothing */); 4678 spin_unlock_irq(&conf->device_lock); 4679 break; 4680 4681 case 0: /* re-enable writes */ 4682 spin_lock_irq(&conf->device_lock); 4683 conf->quiesce = 0; 4684 wake_up(&conf->wait_for_stripe); 4685 wake_up(&conf->wait_for_overlap); 4686 spin_unlock_irq(&conf->device_lock); 4687 break; 4688 } 4689 } 4690 4691 static struct mdk_personality raid6_personality = 4692 { 4693 .name = "raid6", 4694 .level = 6, 4695 .owner = THIS_MODULE, 4696 .make_request = make_request, 4697 .run = run, 4698 .stop = stop, 4699 .status = status, 4700 .error_handler = error, 4701 .hot_add_disk = raid5_add_disk, 4702 .hot_remove_disk= raid5_remove_disk, 4703 .spare_active = raid5_spare_active, 4704 .sync_request = sync_request, 4705 .resize = raid5_resize, 4706 #ifdef CONFIG_MD_RAID5_RESHAPE 4707 .check_reshape = raid5_check_reshape, 4708 .start_reshape = raid5_start_reshape, 4709 #endif 4710 .quiesce = raid5_quiesce, 4711 }; 4712 static struct mdk_personality raid5_personality = 4713 { 4714 .name = "raid5", 4715 .level = 5, 4716 .owner = THIS_MODULE, 4717 .make_request = make_request, 4718 .run = run, 4719 .stop = stop, 4720 .status = status, 4721 .error_handler = error, 4722 .hot_add_disk = raid5_add_disk, 4723 .hot_remove_disk= raid5_remove_disk, 4724 .spare_active = raid5_spare_active, 4725 .sync_request = sync_request, 4726 .resize = raid5_resize, 4727 #ifdef CONFIG_MD_RAID5_RESHAPE 4728 .check_reshape = raid5_check_reshape, 4729 .start_reshape = raid5_start_reshape, 4730 #endif 4731 .quiesce = raid5_quiesce, 4732 }; 4733 4734 static struct mdk_personality raid4_personality = 4735 { 4736 .name = "raid4", 4737 .level = 4, 4738 .owner = THIS_MODULE, 4739 .make_request = make_request, 4740 .run = run, 4741 .stop = stop, 4742 .status = status, 4743 .error_handler = error, 4744 .hot_add_disk = raid5_add_disk, 4745 .hot_remove_disk= raid5_remove_disk, 4746 .spare_active = raid5_spare_active, 4747 .sync_request = sync_request, 4748 .resize = raid5_resize, 4749 #ifdef CONFIG_MD_RAID5_RESHAPE 4750 .check_reshape = raid5_check_reshape, 4751 .start_reshape = raid5_start_reshape, 4752 #endif 4753 .quiesce = raid5_quiesce, 4754 }; 4755 4756 static int __init raid5_init(void) 4757 { 4758 int e; 4759 4760 e = raid6_select_algo(); 4761 if ( e ) 4762 return e; 4763 register_md_personality(&raid6_personality); 4764 register_md_personality(&raid5_personality); 4765 register_md_personality(&raid4_personality); 4766 return 0; 4767 } 4768 4769 static void raid5_exit(void) 4770 { 4771 unregister_md_personality(&raid6_personality); 4772 unregister_md_personality(&raid5_personality); 4773 unregister_md_personality(&raid4_personality); 4774 } 4775 4776 module_init(raid5_init); 4777 module_exit(raid5_exit); 4778 MODULE_LICENSE("GPL"); 4779 MODULE_ALIAS("md-personality-4"); /* RAID5 */ 4780 MODULE_ALIAS("md-raid5"); 4781 MODULE_ALIAS("md-raid4"); 4782 MODULE_ALIAS("md-level-5"); 4783 MODULE_ALIAS("md-level-4"); 4784 MODULE_ALIAS("md-personality-8"); /* RAID6 */ 4785 MODULE_ALIAS("md-raid6"); 4786 MODULE_ALIAS("md-level-6"); 4787 4788 /* This used to be two separate modules, they were: */ 4789 MODULE_ALIAS("raid5"); 4790 MODULE_ALIAS("raid6"); 4791