1 /* 2 * raid5.c : Multiple Devices driver for Linux 3 * Copyright (C) 1996, 1997 Ingo Molnar, Miguel de Icaza, Gadi Oxman 4 * Copyright (C) 1999, 2000 Ingo Molnar 5 * Copyright (C) 2002, 2003 H. Peter Anvin 6 * 7 * RAID-4/5/6 management functions. 8 * Thanks to Penguin Computing for making the RAID-6 development possible 9 * by donating a test server! 10 * 11 * This program is free software; you can redistribute it and/or modify 12 * it under the terms of the GNU General Public License as published by 13 * the Free Software Foundation; either version 2, or (at your option) 14 * any later version. 15 * 16 * You should have received a copy of the GNU General Public License 17 * (for example /usr/src/linux/COPYING); if not, write to the Free 18 * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 19 */ 20 21 /* 22 * BITMAP UNPLUGGING: 23 * 24 * The sequencing for updating the bitmap reliably is a little 25 * subtle (and I got it wrong the first time) so it deserves some 26 * explanation. 27 * 28 * We group bitmap updates into batches. Each batch has a number. 29 * We may write out several batches at once, but that isn't very important. 30 * conf->seq_write is the number of the last batch successfully written. 31 * conf->seq_flush is the number of the last batch that was closed to 32 * new additions. 33 * When we discover that we will need to write to any block in a stripe 34 * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq 35 * the number of the batch it will be in. This is seq_flush+1. 36 * When we are ready to do a write, if that batch hasn't been written yet, 37 * we plug the array and queue the stripe for later. 38 * When an unplug happens, we increment bm_flush, thus closing the current 39 * batch. 40 * When we notice that bm_flush > bm_write, we write out all pending updates 41 * to the bitmap, and advance bm_write to where bm_flush was. 42 * This may occasionally write a bit out twice, but is sure never to 43 * miss any bits. 44 */ 45 46 #include <linux/blkdev.h> 47 #include <linux/kthread.h> 48 #include <linux/raid/pq.h> 49 #include <linux/async_tx.h> 50 #include <linux/module.h> 51 #include <linux/async.h> 52 #include <linux/seq_file.h> 53 #include <linux/cpu.h> 54 #include <linux/slab.h> 55 #include <linux/ratelimit.h> 56 #include <linux/nodemask.h> 57 #include <linux/flex_array.h> 58 #include <trace/events/block.h> 59 60 #include "md.h" 61 #include "raid5.h" 62 #include "raid0.h" 63 #include "bitmap.h" 64 65 #define cpu_to_group(cpu) cpu_to_node(cpu) 66 #define ANY_GROUP NUMA_NO_NODE 67 68 static bool devices_handle_discard_safely = false; 69 module_param(devices_handle_discard_safely, bool, 0644); 70 MODULE_PARM_DESC(devices_handle_discard_safely, 71 "Set to Y if all devices in each array reliably return zeroes on reads from discarded regions"); 72 static struct workqueue_struct *raid5_wq; 73 /* 74 * Stripe cache 75 */ 76 77 #define NR_STRIPES 256 78 #define STRIPE_SIZE PAGE_SIZE 79 #define STRIPE_SHIFT (PAGE_SHIFT - 9) 80 #define STRIPE_SECTORS (STRIPE_SIZE>>9) 81 #define IO_THRESHOLD 1 82 #define BYPASS_THRESHOLD 1 83 #define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head)) 84 #define HASH_MASK (NR_HASH - 1) 85 #define MAX_STRIPE_BATCH 8 86 87 static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect) 88 { 89 int hash = (sect >> STRIPE_SHIFT) & HASH_MASK; 90 return &conf->stripe_hashtbl[hash]; 91 } 92 93 static inline int stripe_hash_locks_hash(sector_t sect) 94 { 95 return (sect >> STRIPE_SHIFT) & STRIPE_HASH_LOCKS_MASK; 96 } 97 98 static inline void lock_device_hash_lock(struct r5conf *conf, int hash) 99 { 100 spin_lock_irq(conf->hash_locks + hash); 101 spin_lock(&conf->device_lock); 102 } 103 104 static inline void unlock_device_hash_lock(struct r5conf *conf, int hash) 105 { 106 spin_unlock(&conf->device_lock); 107 spin_unlock_irq(conf->hash_locks + hash); 108 } 109 110 static inline void lock_all_device_hash_locks_irq(struct r5conf *conf) 111 { 112 int i; 113 local_irq_disable(); 114 spin_lock(conf->hash_locks); 115 for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++) 116 spin_lock_nest_lock(conf->hash_locks + i, conf->hash_locks); 117 spin_lock(&conf->device_lock); 118 } 119 120 static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf) 121 { 122 int i; 123 spin_unlock(&conf->device_lock); 124 for (i = NR_STRIPE_HASH_LOCKS; i; i--) 125 spin_unlock(conf->hash_locks + i - 1); 126 local_irq_enable(); 127 } 128 129 /* bio's attached to a stripe+device for I/O are linked together in bi_sector 130 * order without overlap. There may be several bio's per stripe+device, and 131 * a bio could span several devices. 132 * When walking this list for a particular stripe+device, we must never proceed 133 * beyond a bio that extends past this device, as the next bio might no longer 134 * be valid. 135 * This function is used to determine the 'next' bio in the list, given the sector 136 * of the current stripe+device 137 */ 138 static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector) 139 { 140 int sectors = bio_sectors(bio); 141 if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS) 142 return bio->bi_next; 143 else 144 return NULL; 145 } 146 147 /* 148 * We maintain a biased count of active stripes in the bottom 16 bits of 149 * bi_phys_segments, and a count of processed stripes in the upper 16 bits 150 */ 151 static inline int raid5_bi_processed_stripes(struct bio *bio) 152 { 153 atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; 154 return (atomic_read(segments) >> 16) & 0xffff; 155 } 156 157 static inline int raid5_dec_bi_active_stripes(struct bio *bio) 158 { 159 atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; 160 return atomic_sub_return(1, segments) & 0xffff; 161 } 162 163 static inline void raid5_inc_bi_active_stripes(struct bio *bio) 164 { 165 atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; 166 atomic_inc(segments); 167 } 168 169 static inline void raid5_set_bi_processed_stripes(struct bio *bio, 170 unsigned int cnt) 171 { 172 atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; 173 int old, new; 174 175 do { 176 old = atomic_read(segments); 177 new = (old & 0xffff) | (cnt << 16); 178 } while (atomic_cmpxchg(segments, old, new) != old); 179 } 180 181 static inline void raid5_set_bi_stripes(struct bio *bio, unsigned int cnt) 182 { 183 atomic_t *segments = (atomic_t *)&bio->bi_phys_segments; 184 atomic_set(segments, cnt); 185 } 186 187 /* Find first data disk in a raid6 stripe */ 188 static inline int raid6_d0(struct stripe_head *sh) 189 { 190 if (sh->ddf_layout) 191 /* ddf always start from first device */ 192 return 0; 193 /* md starts just after Q block */ 194 if (sh->qd_idx == sh->disks - 1) 195 return 0; 196 else 197 return sh->qd_idx + 1; 198 } 199 static inline int raid6_next_disk(int disk, int raid_disks) 200 { 201 disk++; 202 return (disk < raid_disks) ? disk : 0; 203 } 204 205 /* When walking through the disks in a raid5, starting at raid6_d0, 206 * We need to map each disk to a 'slot', where the data disks are slot 207 * 0 .. raid_disks-3, the parity disk is raid_disks-2 and the Q disk 208 * is raid_disks-1. This help does that mapping. 209 */ 210 static int raid6_idx_to_slot(int idx, struct stripe_head *sh, 211 int *count, int syndrome_disks) 212 { 213 int slot = *count; 214 215 if (sh->ddf_layout) 216 (*count)++; 217 if (idx == sh->pd_idx) 218 return syndrome_disks; 219 if (idx == sh->qd_idx) 220 return syndrome_disks + 1; 221 if (!sh->ddf_layout) 222 (*count)++; 223 return slot; 224 } 225 226 static void return_io(struct bio *return_bi) 227 { 228 struct bio *bi = return_bi; 229 while (bi) { 230 231 return_bi = bi->bi_next; 232 bi->bi_next = NULL; 233 bi->bi_iter.bi_size = 0; 234 trace_block_bio_complete(bdev_get_queue(bi->bi_bdev), 235 bi, 0); 236 bio_endio(bi); 237 bi = return_bi; 238 } 239 } 240 241 static void print_raid5_conf (struct r5conf *conf); 242 243 static int stripe_operations_active(struct stripe_head *sh) 244 { 245 return sh->check_state || sh->reconstruct_state || 246 test_bit(STRIPE_BIOFILL_RUN, &sh->state) || 247 test_bit(STRIPE_COMPUTE_RUN, &sh->state); 248 } 249 250 static void raid5_wakeup_stripe_thread(struct stripe_head *sh) 251 { 252 struct r5conf *conf = sh->raid_conf; 253 struct r5worker_group *group; 254 int thread_cnt; 255 int i, cpu = sh->cpu; 256 257 if (!cpu_online(cpu)) { 258 cpu = cpumask_any(cpu_online_mask); 259 sh->cpu = cpu; 260 } 261 262 if (list_empty(&sh->lru)) { 263 struct r5worker_group *group; 264 group = conf->worker_groups + cpu_to_group(cpu); 265 list_add_tail(&sh->lru, &group->handle_list); 266 group->stripes_cnt++; 267 sh->group = group; 268 } 269 270 if (conf->worker_cnt_per_group == 0) { 271 md_wakeup_thread(conf->mddev->thread); 272 return; 273 } 274 275 group = conf->worker_groups + cpu_to_group(sh->cpu); 276 277 group->workers[0].working = true; 278 /* at least one worker should run to avoid race */ 279 queue_work_on(sh->cpu, raid5_wq, &group->workers[0].work); 280 281 thread_cnt = group->stripes_cnt / MAX_STRIPE_BATCH - 1; 282 /* wakeup more workers */ 283 for (i = 1; i < conf->worker_cnt_per_group && thread_cnt > 0; i++) { 284 if (group->workers[i].working == false) { 285 group->workers[i].working = true; 286 queue_work_on(sh->cpu, raid5_wq, 287 &group->workers[i].work); 288 thread_cnt--; 289 } 290 } 291 } 292 293 static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh, 294 struct list_head *temp_inactive_list) 295 { 296 BUG_ON(!list_empty(&sh->lru)); 297 BUG_ON(atomic_read(&conf->active_stripes)==0); 298 if (test_bit(STRIPE_HANDLE, &sh->state)) { 299 if (test_bit(STRIPE_DELAYED, &sh->state) && 300 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 301 list_add_tail(&sh->lru, &conf->delayed_list); 302 else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && 303 sh->bm_seq - conf->seq_write > 0) 304 list_add_tail(&sh->lru, &conf->bitmap_list); 305 else { 306 clear_bit(STRIPE_DELAYED, &sh->state); 307 clear_bit(STRIPE_BIT_DELAY, &sh->state); 308 if (conf->worker_cnt_per_group == 0) { 309 list_add_tail(&sh->lru, &conf->handle_list); 310 } else { 311 raid5_wakeup_stripe_thread(sh); 312 return; 313 } 314 } 315 md_wakeup_thread(conf->mddev->thread); 316 } else { 317 BUG_ON(stripe_operations_active(sh)); 318 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 319 if (atomic_dec_return(&conf->preread_active_stripes) 320 < IO_THRESHOLD) 321 md_wakeup_thread(conf->mddev->thread); 322 atomic_dec(&conf->active_stripes); 323 if (!test_bit(STRIPE_EXPANDING, &sh->state)) 324 list_add_tail(&sh->lru, temp_inactive_list); 325 } 326 } 327 328 static void __release_stripe(struct r5conf *conf, struct stripe_head *sh, 329 struct list_head *temp_inactive_list) 330 { 331 if (atomic_dec_and_test(&sh->count)) 332 do_release_stripe(conf, sh, temp_inactive_list); 333 } 334 335 /* 336 * @hash could be NR_STRIPE_HASH_LOCKS, then we have a list of inactive_list 337 * 338 * Be careful: Only one task can add/delete stripes from temp_inactive_list at 339 * given time. Adding stripes only takes device lock, while deleting stripes 340 * only takes hash lock. 341 */ 342 static void release_inactive_stripe_list(struct r5conf *conf, 343 struct list_head *temp_inactive_list, 344 int hash) 345 { 346 int size; 347 unsigned long do_wakeup = 0; 348 int i = 0; 349 unsigned long flags; 350 351 if (hash == NR_STRIPE_HASH_LOCKS) { 352 size = NR_STRIPE_HASH_LOCKS; 353 hash = NR_STRIPE_HASH_LOCKS - 1; 354 } else 355 size = 1; 356 while (size) { 357 struct list_head *list = &temp_inactive_list[size - 1]; 358 359 /* 360 * We don't hold any lock here yet, get_active_stripe() might 361 * remove stripes from the list 362 */ 363 if (!list_empty_careful(list)) { 364 spin_lock_irqsave(conf->hash_locks + hash, flags); 365 if (list_empty(conf->inactive_list + hash) && 366 !list_empty(list)) 367 atomic_dec(&conf->empty_inactive_list_nr); 368 list_splice_tail_init(list, conf->inactive_list + hash); 369 do_wakeup |= 1 << hash; 370 spin_unlock_irqrestore(conf->hash_locks + hash, flags); 371 } 372 size--; 373 hash--; 374 } 375 376 for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) { 377 if (do_wakeup & (1 << i)) 378 wake_up(&conf->wait_for_stripe[i]); 379 } 380 381 if (do_wakeup) { 382 if (atomic_read(&conf->active_stripes) == 0) 383 wake_up(&conf->wait_for_quiescent); 384 if (conf->retry_read_aligned) 385 md_wakeup_thread(conf->mddev->thread); 386 } 387 } 388 389 /* should hold conf->device_lock already */ 390 static int release_stripe_list(struct r5conf *conf, 391 struct list_head *temp_inactive_list) 392 { 393 struct stripe_head *sh; 394 int count = 0; 395 struct llist_node *head; 396 397 head = llist_del_all(&conf->released_stripes); 398 head = llist_reverse_order(head); 399 while (head) { 400 int hash; 401 402 sh = llist_entry(head, struct stripe_head, release_list); 403 head = llist_next(head); 404 /* sh could be readded after STRIPE_ON_RELEASE_LIST is cleard */ 405 smp_mb(); 406 clear_bit(STRIPE_ON_RELEASE_LIST, &sh->state); 407 /* 408 * Don't worry the bit is set here, because if the bit is set 409 * again, the count is always > 1. This is true for 410 * STRIPE_ON_UNPLUG_LIST bit too. 411 */ 412 hash = sh->hash_lock_index; 413 __release_stripe(conf, sh, &temp_inactive_list[hash]); 414 count++; 415 } 416 417 return count; 418 } 419 420 static void release_stripe(struct stripe_head *sh) 421 { 422 struct r5conf *conf = sh->raid_conf; 423 unsigned long flags; 424 struct list_head list; 425 int hash; 426 bool wakeup; 427 428 /* Avoid release_list until the last reference. 429 */ 430 if (atomic_add_unless(&sh->count, -1, 1)) 431 return; 432 433 if (unlikely(!conf->mddev->thread) || 434 test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state)) 435 goto slow_path; 436 wakeup = llist_add(&sh->release_list, &conf->released_stripes); 437 if (wakeup) 438 md_wakeup_thread(conf->mddev->thread); 439 return; 440 slow_path: 441 local_irq_save(flags); 442 /* we are ok here if STRIPE_ON_RELEASE_LIST is set or not */ 443 if (atomic_dec_and_lock(&sh->count, &conf->device_lock)) { 444 INIT_LIST_HEAD(&list); 445 hash = sh->hash_lock_index; 446 do_release_stripe(conf, sh, &list); 447 spin_unlock(&conf->device_lock); 448 release_inactive_stripe_list(conf, &list, hash); 449 } 450 local_irq_restore(flags); 451 } 452 453 static inline void remove_hash(struct stripe_head *sh) 454 { 455 pr_debug("remove_hash(), stripe %llu\n", 456 (unsigned long long)sh->sector); 457 458 hlist_del_init(&sh->hash); 459 } 460 461 static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh) 462 { 463 struct hlist_head *hp = stripe_hash(conf, sh->sector); 464 465 pr_debug("insert_hash(), stripe %llu\n", 466 (unsigned long long)sh->sector); 467 468 hlist_add_head(&sh->hash, hp); 469 } 470 471 /* find an idle stripe, make sure it is unhashed, and return it. */ 472 static struct stripe_head *get_free_stripe(struct r5conf *conf, int hash) 473 { 474 struct stripe_head *sh = NULL; 475 struct list_head *first; 476 477 if (list_empty(conf->inactive_list + hash)) 478 goto out; 479 first = (conf->inactive_list + hash)->next; 480 sh = list_entry(first, struct stripe_head, lru); 481 list_del_init(first); 482 remove_hash(sh); 483 atomic_inc(&conf->active_stripes); 484 BUG_ON(hash != sh->hash_lock_index); 485 if (list_empty(conf->inactive_list + hash)) 486 atomic_inc(&conf->empty_inactive_list_nr); 487 out: 488 return sh; 489 } 490 491 static void shrink_buffers(struct stripe_head *sh) 492 { 493 struct page *p; 494 int i; 495 int num = sh->raid_conf->pool_size; 496 497 for (i = 0; i < num ; i++) { 498 WARN_ON(sh->dev[i].page != sh->dev[i].orig_page); 499 p = sh->dev[i].page; 500 if (!p) 501 continue; 502 sh->dev[i].page = NULL; 503 put_page(p); 504 } 505 } 506 507 static int grow_buffers(struct stripe_head *sh, gfp_t gfp) 508 { 509 int i; 510 int num = sh->raid_conf->pool_size; 511 512 for (i = 0; i < num; i++) { 513 struct page *page; 514 515 if (!(page = alloc_page(gfp))) { 516 return 1; 517 } 518 sh->dev[i].page = page; 519 sh->dev[i].orig_page = page; 520 } 521 return 0; 522 } 523 524 static void raid5_build_block(struct stripe_head *sh, int i, int previous); 525 static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous, 526 struct stripe_head *sh); 527 528 static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) 529 { 530 struct r5conf *conf = sh->raid_conf; 531 int i, seq; 532 533 BUG_ON(atomic_read(&sh->count) != 0); 534 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); 535 BUG_ON(stripe_operations_active(sh)); 536 BUG_ON(sh->batch_head); 537 538 pr_debug("init_stripe called, stripe %llu\n", 539 (unsigned long long)sector); 540 retry: 541 seq = read_seqcount_begin(&conf->gen_lock); 542 sh->generation = conf->generation - previous; 543 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks; 544 sh->sector = sector; 545 stripe_set_idx(sector, conf, previous, sh); 546 sh->state = 0; 547 548 for (i = sh->disks; i--; ) { 549 struct r5dev *dev = &sh->dev[i]; 550 551 if (dev->toread || dev->read || dev->towrite || dev->written || 552 test_bit(R5_LOCKED, &dev->flags)) { 553 printk(KERN_ERR "sector=%llx i=%d %p %p %p %p %d\n", 554 (unsigned long long)sh->sector, i, dev->toread, 555 dev->read, dev->towrite, dev->written, 556 test_bit(R5_LOCKED, &dev->flags)); 557 WARN_ON(1); 558 } 559 dev->flags = 0; 560 raid5_build_block(sh, i, previous); 561 } 562 if (read_seqcount_retry(&conf->gen_lock, seq)) 563 goto retry; 564 sh->overwrite_disks = 0; 565 insert_hash(conf, sh); 566 sh->cpu = smp_processor_id(); 567 set_bit(STRIPE_BATCH_READY, &sh->state); 568 } 569 570 static struct stripe_head *__find_stripe(struct r5conf *conf, sector_t sector, 571 short generation) 572 { 573 struct stripe_head *sh; 574 575 pr_debug("__find_stripe, sector %llu\n", (unsigned long long)sector); 576 hlist_for_each_entry(sh, stripe_hash(conf, sector), hash) 577 if (sh->sector == sector && sh->generation == generation) 578 return sh; 579 pr_debug("__stripe %llu not in cache\n", (unsigned long long)sector); 580 return NULL; 581 } 582 583 /* 584 * Need to check if array has failed when deciding whether to: 585 * - start an array 586 * - remove non-faulty devices 587 * - add a spare 588 * - allow a reshape 589 * This determination is simple when no reshape is happening. 590 * However if there is a reshape, we need to carefully check 591 * both the before and after sections. 592 * This is because some failed devices may only affect one 593 * of the two sections, and some non-in_sync devices may 594 * be insync in the section most affected by failed devices. 595 */ 596 static int calc_degraded(struct r5conf *conf) 597 { 598 int degraded, degraded2; 599 int i; 600 601 rcu_read_lock(); 602 degraded = 0; 603 for (i = 0; i < conf->previous_raid_disks; i++) { 604 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); 605 if (rdev && test_bit(Faulty, &rdev->flags)) 606 rdev = rcu_dereference(conf->disks[i].replacement); 607 if (!rdev || test_bit(Faulty, &rdev->flags)) 608 degraded++; 609 else if (test_bit(In_sync, &rdev->flags)) 610 ; 611 else 612 /* not in-sync or faulty. 613 * If the reshape increases the number of devices, 614 * this is being recovered by the reshape, so 615 * this 'previous' section is not in_sync. 616 * If the number of devices is being reduced however, 617 * the device can only be part of the array if 618 * we are reverting a reshape, so this section will 619 * be in-sync. 620 */ 621 if (conf->raid_disks >= conf->previous_raid_disks) 622 degraded++; 623 } 624 rcu_read_unlock(); 625 if (conf->raid_disks == conf->previous_raid_disks) 626 return degraded; 627 rcu_read_lock(); 628 degraded2 = 0; 629 for (i = 0; i < conf->raid_disks; i++) { 630 struct md_rdev *rdev = rcu_dereference(conf->disks[i].rdev); 631 if (rdev && test_bit(Faulty, &rdev->flags)) 632 rdev = rcu_dereference(conf->disks[i].replacement); 633 if (!rdev || test_bit(Faulty, &rdev->flags)) 634 degraded2++; 635 else if (test_bit(In_sync, &rdev->flags)) 636 ; 637 else 638 /* not in-sync or faulty. 639 * If reshape increases the number of devices, this 640 * section has already been recovered, else it 641 * almost certainly hasn't. 642 */ 643 if (conf->raid_disks <= conf->previous_raid_disks) 644 degraded2++; 645 } 646 rcu_read_unlock(); 647 if (degraded2 > degraded) 648 return degraded2; 649 return degraded; 650 } 651 652 static int has_failed(struct r5conf *conf) 653 { 654 int degraded; 655 656 if (conf->mddev->reshape_position == MaxSector) 657 return conf->mddev->degraded > conf->max_degraded; 658 659 degraded = calc_degraded(conf); 660 if (degraded > conf->max_degraded) 661 return 1; 662 return 0; 663 } 664 665 static struct stripe_head * 666 get_active_stripe(struct r5conf *conf, sector_t sector, 667 int previous, int noblock, int noquiesce) 668 { 669 struct stripe_head *sh; 670 int hash = stripe_hash_locks_hash(sector); 671 672 pr_debug("get_stripe, sector %llu\n", (unsigned long long)sector); 673 674 spin_lock_irq(conf->hash_locks + hash); 675 676 do { 677 wait_event_lock_irq(conf->wait_for_quiescent, 678 conf->quiesce == 0 || noquiesce, 679 *(conf->hash_locks + hash)); 680 sh = __find_stripe(conf, sector, conf->generation - previous); 681 if (!sh) { 682 if (!test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state)) { 683 sh = get_free_stripe(conf, hash); 684 if (!sh && !test_bit(R5_DID_ALLOC, 685 &conf->cache_state)) 686 set_bit(R5_ALLOC_MORE, 687 &conf->cache_state); 688 } 689 if (noblock && sh == NULL) 690 break; 691 if (!sh) { 692 set_bit(R5_INACTIVE_BLOCKED, 693 &conf->cache_state); 694 wait_event_exclusive_cmd( 695 conf->wait_for_stripe[hash], 696 !list_empty(conf->inactive_list + hash) && 697 (atomic_read(&conf->active_stripes) 698 < (conf->max_nr_stripes * 3 / 4) 699 || !test_bit(R5_INACTIVE_BLOCKED, 700 &conf->cache_state)), 701 spin_unlock_irq(conf->hash_locks + hash), 702 spin_lock_irq(conf->hash_locks + hash)); 703 clear_bit(R5_INACTIVE_BLOCKED, 704 &conf->cache_state); 705 } else { 706 init_stripe(sh, sector, previous); 707 atomic_inc(&sh->count); 708 } 709 } else if (!atomic_inc_not_zero(&sh->count)) { 710 spin_lock(&conf->device_lock); 711 if (!atomic_read(&sh->count)) { 712 if (!test_bit(STRIPE_HANDLE, &sh->state)) 713 atomic_inc(&conf->active_stripes); 714 BUG_ON(list_empty(&sh->lru) && 715 !test_bit(STRIPE_EXPANDING, &sh->state)); 716 list_del_init(&sh->lru); 717 if (sh->group) { 718 sh->group->stripes_cnt--; 719 sh->group = NULL; 720 } 721 } 722 atomic_inc(&sh->count); 723 spin_unlock(&conf->device_lock); 724 } 725 } while (sh == NULL); 726 727 if (!list_empty(conf->inactive_list + hash)) 728 wake_up(&conf->wait_for_stripe[hash]); 729 730 spin_unlock_irq(conf->hash_locks + hash); 731 return sh; 732 } 733 734 static bool is_full_stripe_write(struct stripe_head *sh) 735 { 736 BUG_ON(sh->overwrite_disks > (sh->disks - sh->raid_conf->max_degraded)); 737 return sh->overwrite_disks == (sh->disks - sh->raid_conf->max_degraded); 738 } 739 740 static void lock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2) 741 { 742 local_irq_disable(); 743 if (sh1 > sh2) { 744 spin_lock(&sh2->stripe_lock); 745 spin_lock_nested(&sh1->stripe_lock, 1); 746 } else { 747 spin_lock(&sh1->stripe_lock); 748 spin_lock_nested(&sh2->stripe_lock, 1); 749 } 750 } 751 752 static void unlock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2) 753 { 754 spin_unlock(&sh1->stripe_lock); 755 spin_unlock(&sh2->stripe_lock); 756 local_irq_enable(); 757 } 758 759 /* Only freshly new full stripe normal write stripe can be added to a batch list */ 760 static bool stripe_can_batch(struct stripe_head *sh) 761 { 762 return test_bit(STRIPE_BATCH_READY, &sh->state) && 763 !test_bit(STRIPE_BITMAP_PENDING, &sh->state) && 764 is_full_stripe_write(sh); 765 } 766 767 /* we only do back search */ 768 static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh) 769 { 770 struct stripe_head *head; 771 sector_t head_sector, tmp_sec; 772 int hash; 773 int dd_idx; 774 775 if (!stripe_can_batch(sh)) 776 return; 777 /* Don't cross chunks, so stripe pd_idx/qd_idx is the same */ 778 tmp_sec = sh->sector; 779 if (!sector_div(tmp_sec, conf->chunk_sectors)) 780 return; 781 head_sector = sh->sector - STRIPE_SECTORS; 782 783 hash = stripe_hash_locks_hash(head_sector); 784 spin_lock_irq(conf->hash_locks + hash); 785 head = __find_stripe(conf, head_sector, conf->generation); 786 if (head && !atomic_inc_not_zero(&head->count)) { 787 spin_lock(&conf->device_lock); 788 if (!atomic_read(&head->count)) { 789 if (!test_bit(STRIPE_HANDLE, &head->state)) 790 atomic_inc(&conf->active_stripes); 791 BUG_ON(list_empty(&head->lru) && 792 !test_bit(STRIPE_EXPANDING, &head->state)); 793 list_del_init(&head->lru); 794 if (head->group) { 795 head->group->stripes_cnt--; 796 head->group = NULL; 797 } 798 } 799 atomic_inc(&head->count); 800 spin_unlock(&conf->device_lock); 801 } 802 spin_unlock_irq(conf->hash_locks + hash); 803 804 if (!head) 805 return; 806 if (!stripe_can_batch(head)) 807 goto out; 808 809 lock_two_stripes(head, sh); 810 /* clear_batch_ready clear the flag */ 811 if (!stripe_can_batch(head) || !stripe_can_batch(sh)) 812 goto unlock_out; 813 814 if (sh->batch_head) 815 goto unlock_out; 816 817 dd_idx = 0; 818 while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx) 819 dd_idx++; 820 if (head->dev[dd_idx].towrite->bi_rw != sh->dev[dd_idx].towrite->bi_rw) 821 goto unlock_out; 822 823 if (head->batch_head) { 824 spin_lock(&head->batch_head->batch_lock); 825 /* This batch list is already running */ 826 if (!stripe_can_batch(head)) { 827 spin_unlock(&head->batch_head->batch_lock); 828 goto unlock_out; 829 } 830 831 /* 832 * at this point, head's BATCH_READY could be cleared, but we 833 * can still add the stripe to batch list 834 */ 835 list_add(&sh->batch_list, &head->batch_list); 836 spin_unlock(&head->batch_head->batch_lock); 837 838 sh->batch_head = head->batch_head; 839 } else { 840 head->batch_head = head; 841 sh->batch_head = head->batch_head; 842 spin_lock(&head->batch_lock); 843 list_add_tail(&sh->batch_list, &head->batch_list); 844 spin_unlock(&head->batch_lock); 845 } 846 847 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 848 if (atomic_dec_return(&conf->preread_active_stripes) 849 < IO_THRESHOLD) 850 md_wakeup_thread(conf->mddev->thread); 851 852 if (test_and_clear_bit(STRIPE_BIT_DELAY, &sh->state)) { 853 int seq = sh->bm_seq; 854 if (test_bit(STRIPE_BIT_DELAY, &sh->batch_head->state) && 855 sh->batch_head->bm_seq > seq) 856 seq = sh->batch_head->bm_seq; 857 set_bit(STRIPE_BIT_DELAY, &sh->batch_head->state); 858 sh->batch_head->bm_seq = seq; 859 } 860 861 atomic_inc(&sh->count); 862 unlock_out: 863 unlock_two_stripes(head, sh); 864 out: 865 release_stripe(head); 866 } 867 868 /* Determine if 'data_offset' or 'new_data_offset' should be used 869 * in this stripe_head. 870 */ 871 static int use_new_offset(struct r5conf *conf, struct stripe_head *sh) 872 { 873 sector_t progress = conf->reshape_progress; 874 /* Need a memory barrier to make sure we see the value 875 * of conf->generation, or ->data_offset that was set before 876 * reshape_progress was updated. 877 */ 878 smp_rmb(); 879 if (progress == MaxSector) 880 return 0; 881 if (sh->generation == conf->generation - 1) 882 return 0; 883 /* We are in a reshape, and this is a new-generation stripe, 884 * so use new_data_offset. 885 */ 886 return 1; 887 } 888 889 static void 890 raid5_end_read_request(struct bio *bi); 891 static void 892 raid5_end_write_request(struct bio *bi); 893 894 static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) 895 { 896 struct r5conf *conf = sh->raid_conf; 897 int i, disks = sh->disks; 898 struct stripe_head *head_sh = sh; 899 900 might_sleep(); 901 902 for (i = disks; i--; ) { 903 int rw; 904 int replace_only = 0; 905 struct bio *bi, *rbi; 906 struct md_rdev *rdev, *rrdev = NULL; 907 908 sh = head_sh; 909 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) { 910 if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags)) 911 rw = WRITE_FUA; 912 else 913 rw = WRITE; 914 if (test_bit(R5_Discard, &sh->dev[i].flags)) 915 rw |= REQ_DISCARD; 916 } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) 917 rw = READ; 918 else if (test_and_clear_bit(R5_WantReplace, 919 &sh->dev[i].flags)) { 920 rw = WRITE; 921 replace_only = 1; 922 } else 923 continue; 924 if (test_and_clear_bit(R5_SyncIO, &sh->dev[i].flags)) 925 rw |= REQ_SYNC; 926 927 again: 928 bi = &sh->dev[i].req; 929 rbi = &sh->dev[i].rreq; /* For writing to replacement */ 930 931 rcu_read_lock(); 932 rrdev = rcu_dereference(conf->disks[i].replacement); 933 smp_mb(); /* Ensure that if rrdev is NULL, rdev won't be */ 934 rdev = rcu_dereference(conf->disks[i].rdev); 935 if (!rdev) { 936 rdev = rrdev; 937 rrdev = NULL; 938 } 939 if (rw & WRITE) { 940 if (replace_only) 941 rdev = NULL; 942 if (rdev == rrdev) 943 /* We raced and saw duplicates */ 944 rrdev = NULL; 945 } else { 946 if (test_bit(R5_ReadRepl, &head_sh->dev[i].flags) && rrdev) 947 rdev = rrdev; 948 rrdev = NULL; 949 } 950 951 if (rdev && test_bit(Faulty, &rdev->flags)) 952 rdev = NULL; 953 if (rdev) 954 atomic_inc(&rdev->nr_pending); 955 if (rrdev && test_bit(Faulty, &rrdev->flags)) 956 rrdev = NULL; 957 if (rrdev) 958 atomic_inc(&rrdev->nr_pending); 959 rcu_read_unlock(); 960 961 /* We have already checked bad blocks for reads. Now 962 * need to check for writes. We never accept write errors 963 * on the replacement, so we don't to check rrdev. 964 */ 965 while ((rw & WRITE) && rdev && 966 test_bit(WriteErrorSeen, &rdev->flags)) { 967 sector_t first_bad; 968 int bad_sectors; 969 int bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS, 970 &first_bad, &bad_sectors); 971 if (!bad) 972 break; 973 974 if (bad < 0) { 975 set_bit(BlockedBadBlocks, &rdev->flags); 976 if (!conf->mddev->external && 977 conf->mddev->flags) { 978 /* It is very unlikely, but we might 979 * still need to write out the 980 * bad block log - better give it 981 * a chance*/ 982 md_check_recovery(conf->mddev); 983 } 984 /* 985 * Because md_wait_for_blocked_rdev 986 * will dec nr_pending, we must 987 * increment it first. 988 */ 989 atomic_inc(&rdev->nr_pending); 990 md_wait_for_blocked_rdev(rdev, conf->mddev); 991 } else { 992 /* Acknowledged bad block - skip the write */ 993 rdev_dec_pending(rdev, conf->mddev); 994 rdev = NULL; 995 } 996 } 997 998 if (rdev) { 999 if (s->syncing || s->expanding || s->expanded 1000 || s->replacing) 1001 md_sync_acct(rdev->bdev, STRIPE_SECTORS); 1002 1003 set_bit(STRIPE_IO_STARTED, &sh->state); 1004 1005 bio_reset(bi); 1006 bi->bi_bdev = rdev->bdev; 1007 bi->bi_rw = rw; 1008 bi->bi_end_io = (rw & WRITE) 1009 ? raid5_end_write_request 1010 : raid5_end_read_request; 1011 bi->bi_private = sh; 1012 1013 pr_debug("%s: for %llu schedule op %ld on disc %d\n", 1014 __func__, (unsigned long long)sh->sector, 1015 bi->bi_rw, i); 1016 atomic_inc(&sh->count); 1017 if (sh != head_sh) 1018 atomic_inc(&head_sh->count); 1019 if (use_new_offset(conf, sh)) 1020 bi->bi_iter.bi_sector = (sh->sector 1021 + rdev->new_data_offset); 1022 else 1023 bi->bi_iter.bi_sector = (sh->sector 1024 + rdev->data_offset); 1025 if (test_bit(R5_ReadNoMerge, &head_sh->dev[i].flags)) 1026 bi->bi_rw |= REQ_NOMERGE; 1027 1028 if (test_bit(R5_SkipCopy, &sh->dev[i].flags)) 1029 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); 1030 sh->dev[i].vec.bv_page = sh->dev[i].page; 1031 bi->bi_vcnt = 1; 1032 bi->bi_io_vec[0].bv_len = STRIPE_SIZE; 1033 bi->bi_io_vec[0].bv_offset = 0; 1034 bi->bi_iter.bi_size = STRIPE_SIZE; 1035 /* 1036 * If this is discard request, set bi_vcnt 0. We don't 1037 * want to confuse SCSI because SCSI will replace payload 1038 */ 1039 if (rw & REQ_DISCARD) 1040 bi->bi_vcnt = 0; 1041 if (rrdev) 1042 set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags); 1043 1044 if (conf->mddev->gendisk) 1045 trace_block_bio_remap(bdev_get_queue(bi->bi_bdev), 1046 bi, disk_devt(conf->mddev->gendisk), 1047 sh->dev[i].sector); 1048 generic_make_request(bi); 1049 } 1050 if (rrdev) { 1051 if (s->syncing || s->expanding || s->expanded 1052 || s->replacing) 1053 md_sync_acct(rrdev->bdev, STRIPE_SECTORS); 1054 1055 set_bit(STRIPE_IO_STARTED, &sh->state); 1056 1057 bio_reset(rbi); 1058 rbi->bi_bdev = rrdev->bdev; 1059 rbi->bi_rw = rw; 1060 BUG_ON(!(rw & WRITE)); 1061 rbi->bi_end_io = raid5_end_write_request; 1062 rbi->bi_private = sh; 1063 1064 pr_debug("%s: for %llu schedule op %ld on " 1065 "replacement disc %d\n", 1066 __func__, (unsigned long long)sh->sector, 1067 rbi->bi_rw, i); 1068 atomic_inc(&sh->count); 1069 if (sh != head_sh) 1070 atomic_inc(&head_sh->count); 1071 if (use_new_offset(conf, sh)) 1072 rbi->bi_iter.bi_sector = (sh->sector 1073 + rrdev->new_data_offset); 1074 else 1075 rbi->bi_iter.bi_sector = (sh->sector 1076 + rrdev->data_offset); 1077 if (test_bit(R5_SkipCopy, &sh->dev[i].flags)) 1078 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); 1079 sh->dev[i].rvec.bv_page = sh->dev[i].page; 1080 rbi->bi_vcnt = 1; 1081 rbi->bi_io_vec[0].bv_len = STRIPE_SIZE; 1082 rbi->bi_io_vec[0].bv_offset = 0; 1083 rbi->bi_iter.bi_size = STRIPE_SIZE; 1084 /* 1085 * If this is discard request, set bi_vcnt 0. We don't 1086 * want to confuse SCSI because SCSI will replace payload 1087 */ 1088 if (rw & REQ_DISCARD) 1089 rbi->bi_vcnt = 0; 1090 if (conf->mddev->gendisk) 1091 trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev), 1092 rbi, disk_devt(conf->mddev->gendisk), 1093 sh->dev[i].sector); 1094 generic_make_request(rbi); 1095 } 1096 if (!rdev && !rrdev) { 1097 if (rw & WRITE) 1098 set_bit(STRIPE_DEGRADED, &sh->state); 1099 pr_debug("skip op %ld on disc %d for sector %llu\n", 1100 bi->bi_rw, i, (unsigned long long)sh->sector); 1101 clear_bit(R5_LOCKED, &sh->dev[i].flags); 1102 set_bit(STRIPE_HANDLE, &sh->state); 1103 } 1104 1105 if (!head_sh->batch_head) 1106 continue; 1107 sh = list_first_entry(&sh->batch_list, struct stripe_head, 1108 batch_list); 1109 if (sh != head_sh) 1110 goto again; 1111 } 1112 } 1113 1114 static struct dma_async_tx_descriptor * 1115 async_copy_data(int frombio, struct bio *bio, struct page **page, 1116 sector_t sector, struct dma_async_tx_descriptor *tx, 1117 struct stripe_head *sh) 1118 { 1119 struct bio_vec bvl; 1120 struct bvec_iter iter; 1121 struct page *bio_page; 1122 int page_offset; 1123 struct async_submit_ctl submit; 1124 enum async_tx_flags flags = 0; 1125 1126 if (bio->bi_iter.bi_sector >= sector) 1127 page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512; 1128 else 1129 page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512; 1130 1131 if (frombio) 1132 flags |= ASYNC_TX_FENCE; 1133 init_async_submit(&submit, flags, tx, NULL, NULL, NULL); 1134 1135 bio_for_each_segment(bvl, bio, iter) { 1136 int len = bvl.bv_len; 1137 int clen; 1138 int b_offset = 0; 1139 1140 if (page_offset < 0) { 1141 b_offset = -page_offset; 1142 page_offset += b_offset; 1143 len -= b_offset; 1144 } 1145 1146 if (len > 0 && page_offset + len > STRIPE_SIZE) 1147 clen = STRIPE_SIZE - page_offset; 1148 else 1149 clen = len; 1150 1151 if (clen > 0) { 1152 b_offset += bvl.bv_offset; 1153 bio_page = bvl.bv_page; 1154 if (frombio) { 1155 if (sh->raid_conf->skip_copy && 1156 b_offset == 0 && page_offset == 0 && 1157 clen == STRIPE_SIZE) 1158 *page = bio_page; 1159 else 1160 tx = async_memcpy(*page, bio_page, page_offset, 1161 b_offset, clen, &submit); 1162 } else 1163 tx = async_memcpy(bio_page, *page, b_offset, 1164 page_offset, clen, &submit); 1165 } 1166 /* chain the operations */ 1167 submit.depend_tx = tx; 1168 1169 if (clen < len) /* hit end of page */ 1170 break; 1171 page_offset += len; 1172 } 1173 1174 return tx; 1175 } 1176 1177 static void ops_complete_biofill(void *stripe_head_ref) 1178 { 1179 struct stripe_head *sh = stripe_head_ref; 1180 struct bio *return_bi = NULL; 1181 int i; 1182 1183 pr_debug("%s: stripe %llu\n", __func__, 1184 (unsigned long long)sh->sector); 1185 1186 /* clear completed biofills */ 1187 for (i = sh->disks; i--; ) { 1188 struct r5dev *dev = &sh->dev[i]; 1189 1190 /* acknowledge completion of a biofill operation */ 1191 /* and check if we need to reply to a read request, 1192 * new R5_Wantfill requests are held off until 1193 * !STRIPE_BIOFILL_RUN 1194 */ 1195 if (test_and_clear_bit(R5_Wantfill, &dev->flags)) { 1196 struct bio *rbi, *rbi2; 1197 1198 BUG_ON(!dev->read); 1199 rbi = dev->read; 1200 dev->read = NULL; 1201 while (rbi && rbi->bi_iter.bi_sector < 1202 dev->sector + STRIPE_SECTORS) { 1203 rbi2 = r5_next_bio(rbi, dev->sector); 1204 if (!raid5_dec_bi_active_stripes(rbi)) { 1205 rbi->bi_next = return_bi; 1206 return_bi = rbi; 1207 } 1208 rbi = rbi2; 1209 } 1210 } 1211 } 1212 clear_bit(STRIPE_BIOFILL_RUN, &sh->state); 1213 1214 return_io(return_bi); 1215 1216 set_bit(STRIPE_HANDLE, &sh->state); 1217 release_stripe(sh); 1218 } 1219 1220 static void ops_run_biofill(struct stripe_head *sh) 1221 { 1222 struct dma_async_tx_descriptor *tx = NULL; 1223 struct async_submit_ctl submit; 1224 int i; 1225 1226 BUG_ON(sh->batch_head); 1227 pr_debug("%s: stripe %llu\n", __func__, 1228 (unsigned long long)sh->sector); 1229 1230 for (i = sh->disks; i--; ) { 1231 struct r5dev *dev = &sh->dev[i]; 1232 if (test_bit(R5_Wantfill, &dev->flags)) { 1233 struct bio *rbi; 1234 spin_lock_irq(&sh->stripe_lock); 1235 dev->read = rbi = dev->toread; 1236 dev->toread = NULL; 1237 spin_unlock_irq(&sh->stripe_lock); 1238 while (rbi && rbi->bi_iter.bi_sector < 1239 dev->sector + STRIPE_SECTORS) { 1240 tx = async_copy_data(0, rbi, &dev->page, 1241 dev->sector, tx, sh); 1242 rbi = r5_next_bio(rbi, dev->sector); 1243 } 1244 } 1245 } 1246 1247 atomic_inc(&sh->count); 1248 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL); 1249 async_trigger_callback(&submit); 1250 } 1251 1252 static void mark_target_uptodate(struct stripe_head *sh, int target) 1253 { 1254 struct r5dev *tgt; 1255 1256 if (target < 0) 1257 return; 1258 1259 tgt = &sh->dev[target]; 1260 set_bit(R5_UPTODATE, &tgt->flags); 1261 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 1262 clear_bit(R5_Wantcompute, &tgt->flags); 1263 } 1264 1265 static void ops_complete_compute(void *stripe_head_ref) 1266 { 1267 struct stripe_head *sh = stripe_head_ref; 1268 1269 pr_debug("%s: stripe %llu\n", __func__, 1270 (unsigned long long)sh->sector); 1271 1272 /* mark the computed target(s) as uptodate */ 1273 mark_target_uptodate(sh, sh->ops.target); 1274 mark_target_uptodate(sh, sh->ops.target2); 1275 1276 clear_bit(STRIPE_COMPUTE_RUN, &sh->state); 1277 if (sh->check_state == check_state_compute_run) 1278 sh->check_state = check_state_compute_result; 1279 set_bit(STRIPE_HANDLE, &sh->state); 1280 release_stripe(sh); 1281 } 1282 1283 /* return a pointer to the address conversion region of the scribble buffer */ 1284 static addr_conv_t *to_addr_conv(struct stripe_head *sh, 1285 struct raid5_percpu *percpu, int i) 1286 { 1287 void *addr; 1288 1289 addr = flex_array_get(percpu->scribble, i); 1290 return addr + sizeof(struct page *) * (sh->disks + 2); 1291 } 1292 1293 /* return a pointer to the address conversion region of the scribble buffer */ 1294 static struct page **to_addr_page(struct raid5_percpu *percpu, int i) 1295 { 1296 void *addr; 1297 1298 addr = flex_array_get(percpu->scribble, i); 1299 return addr; 1300 } 1301 1302 static struct dma_async_tx_descriptor * 1303 ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu) 1304 { 1305 int disks = sh->disks; 1306 struct page **xor_srcs = to_addr_page(percpu, 0); 1307 int target = sh->ops.target; 1308 struct r5dev *tgt = &sh->dev[target]; 1309 struct page *xor_dest = tgt->page; 1310 int count = 0; 1311 struct dma_async_tx_descriptor *tx; 1312 struct async_submit_ctl submit; 1313 int i; 1314 1315 BUG_ON(sh->batch_head); 1316 1317 pr_debug("%s: stripe %llu block: %d\n", 1318 __func__, (unsigned long long)sh->sector, target); 1319 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 1320 1321 for (i = disks; i--; ) 1322 if (i != target) 1323 xor_srcs[count++] = sh->dev[i].page; 1324 1325 atomic_inc(&sh->count); 1326 1327 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, NULL, 1328 ops_complete_compute, sh, to_addr_conv(sh, percpu, 0)); 1329 if (unlikely(count == 1)) 1330 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit); 1331 else 1332 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit); 1333 1334 return tx; 1335 } 1336 1337 /* set_syndrome_sources - populate source buffers for gen_syndrome 1338 * @srcs - (struct page *) array of size sh->disks 1339 * @sh - stripe_head to parse 1340 * 1341 * Populates srcs in proper layout order for the stripe and returns the 1342 * 'count' of sources to be used in a call to async_gen_syndrome. The P 1343 * destination buffer is recorded in srcs[count] and the Q destination 1344 * is recorded in srcs[count+1]]. 1345 */ 1346 static int set_syndrome_sources(struct page **srcs, 1347 struct stripe_head *sh, 1348 int srctype) 1349 { 1350 int disks = sh->disks; 1351 int syndrome_disks = sh->ddf_layout ? disks : (disks - 2); 1352 int d0_idx = raid6_d0(sh); 1353 int count; 1354 int i; 1355 1356 for (i = 0; i < disks; i++) 1357 srcs[i] = NULL; 1358 1359 count = 0; 1360 i = d0_idx; 1361 do { 1362 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); 1363 struct r5dev *dev = &sh->dev[i]; 1364 1365 if (i == sh->qd_idx || i == sh->pd_idx || 1366 (srctype == SYNDROME_SRC_ALL) || 1367 (srctype == SYNDROME_SRC_WANT_DRAIN && 1368 test_bit(R5_Wantdrain, &dev->flags)) || 1369 (srctype == SYNDROME_SRC_WRITTEN && 1370 dev->written)) 1371 srcs[slot] = sh->dev[i].page; 1372 i = raid6_next_disk(i, disks); 1373 } while (i != d0_idx); 1374 1375 return syndrome_disks; 1376 } 1377 1378 static struct dma_async_tx_descriptor * 1379 ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu) 1380 { 1381 int disks = sh->disks; 1382 struct page **blocks = to_addr_page(percpu, 0); 1383 int target; 1384 int qd_idx = sh->qd_idx; 1385 struct dma_async_tx_descriptor *tx; 1386 struct async_submit_ctl submit; 1387 struct r5dev *tgt; 1388 struct page *dest; 1389 int i; 1390 int count; 1391 1392 BUG_ON(sh->batch_head); 1393 if (sh->ops.target < 0) 1394 target = sh->ops.target2; 1395 else if (sh->ops.target2 < 0) 1396 target = sh->ops.target; 1397 else 1398 /* we should only have one valid target */ 1399 BUG(); 1400 BUG_ON(target < 0); 1401 pr_debug("%s: stripe %llu block: %d\n", 1402 __func__, (unsigned long long)sh->sector, target); 1403 1404 tgt = &sh->dev[target]; 1405 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 1406 dest = tgt->page; 1407 1408 atomic_inc(&sh->count); 1409 1410 if (target == qd_idx) { 1411 count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_ALL); 1412 blocks[count] = NULL; /* regenerating p is not necessary */ 1413 BUG_ON(blocks[count+1] != dest); /* q should already be set */ 1414 init_async_submit(&submit, ASYNC_TX_FENCE, NULL, 1415 ops_complete_compute, sh, 1416 to_addr_conv(sh, percpu, 0)); 1417 tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit); 1418 } else { 1419 /* Compute any data- or p-drive using XOR */ 1420 count = 0; 1421 for (i = disks; i-- ; ) { 1422 if (i == target || i == qd_idx) 1423 continue; 1424 blocks[count++] = sh->dev[i].page; 1425 } 1426 1427 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, 1428 NULL, ops_complete_compute, sh, 1429 to_addr_conv(sh, percpu, 0)); 1430 tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, &submit); 1431 } 1432 1433 return tx; 1434 } 1435 1436 static struct dma_async_tx_descriptor * 1437 ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu) 1438 { 1439 int i, count, disks = sh->disks; 1440 int syndrome_disks = sh->ddf_layout ? disks : disks-2; 1441 int d0_idx = raid6_d0(sh); 1442 int faila = -1, failb = -1; 1443 int target = sh->ops.target; 1444 int target2 = sh->ops.target2; 1445 struct r5dev *tgt = &sh->dev[target]; 1446 struct r5dev *tgt2 = &sh->dev[target2]; 1447 struct dma_async_tx_descriptor *tx; 1448 struct page **blocks = to_addr_page(percpu, 0); 1449 struct async_submit_ctl submit; 1450 1451 BUG_ON(sh->batch_head); 1452 pr_debug("%s: stripe %llu block1: %d block2: %d\n", 1453 __func__, (unsigned long long)sh->sector, target, target2); 1454 BUG_ON(target < 0 || target2 < 0); 1455 BUG_ON(!test_bit(R5_Wantcompute, &tgt->flags)); 1456 BUG_ON(!test_bit(R5_Wantcompute, &tgt2->flags)); 1457 1458 /* we need to open-code set_syndrome_sources to handle the 1459 * slot number conversion for 'faila' and 'failb' 1460 */ 1461 for (i = 0; i < disks ; i++) 1462 blocks[i] = NULL; 1463 count = 0; 1464 i = d0_idx; 1465 do { 1466 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); 1467 1468 blocks[slot] = sh->dev[i].page; 1469 1470 if (i == target) 1471 faila = slot; 1472 if (i == target2) 1473 failb = slot; 1474 i = raid6_next_disk(i, disks); 1475 } while (i != d0_idx); 1476 1477 BUG_ON(faila == failb); 1478 if (failb < faila) 1479 swap(faila, failb); 1480 pr_debug("%s: stripe: %llu faila: %d failb: %d\n", 1481 __func__, (unsigned long long)sh->sector, faila, failb); 1482 1483 atomic_inc(&sh->count); 1484 1485 if (failb == syndrome_disks+1) { 1486 /* Q disk is one of the missing disks */ 1487 if (faila == syndrome_disks) { 1488 /* Missing P+Q, just recompute */ 1489 init_async_submit(&submit, ASYNC_TX_FENCE, NULL, 1490 ops_complete_compute, sh, 1491 to_addr_conv(sh, percpu, 0)); 1492 return async_gen_syndrome(blocks, 0, syndrome_disks+2, 1493 STRIPE_SIZE, &submit); 1494 } else { 1495 struct page *dest; 1496 int data_target; 1497 int qd_idx = sh->qd_idx; 1498 1499 /* Missing D+Q: recompute D from P, then recompute Q */ 1500 if (target == qd_idx) 1501 data_target = target2; 1502 else 1503 data_target = target; 1504 1505 count = 0; 1506 for (i = disks; i-- ; ) { 1507 if (i == data_target || i == qd_idx) 1508 continue; 1509 blocks[count++] = sh->dev[i].page; 1510 } 1511 dest = sh->dev[data_target].page; 1512 init_async_submit(&submit, 1513 ASYNC_TX_FENCE|ASYNC_TX_XOR_ZERO_DST, 1514 NULL, NULL, NULL, 1515 to_addr_conv(sh, percpu, 0)); 1516 tx = async_xor(dest, blocks, 0, count, STRIPE_SIZE, 1517 &submit); 1518 1519 count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_ALL); 1520 init_async_submit(&submit, ASYNC_TX_FENCE, tx, 1521 ops_complete_compute, sh, 1522 to_addr_conv(sh, percpu, 0)); 1523 return async_gen_syndrome(blocks, 0, count+2, 1524 STRIPE_SIZE, &submit); 1525 } 1526 } else { 1527 init_async_submit(&submit, ASYNC_TX_FENCE, NULL, 1528 ops_complete_compute, sh, 1529 to_addr_conv(sh, percpu, 0)); 1530 if (failb == syndrome_disks) { 1531 /* We're missing D+P. */ 1532 return async_raid6_datap_recov(syndrome_disks+2, 1533 STRIPE_SIZE, faila, 1534 blocks, &submit); 1535 } else { 1536 /* We're missing D+D. */ 1537 return async_raid6_2data_recov(syndrome_disks+2, 1538 STRIPE_SIZE, faila, failb, 1539 blocks, &submit); 1540 } 1541 } 1542 } 1543 1544 static void ops_complete_prexor(void *stripe_head_ref) 1545 { 1546 struct stripe_head *sh = stripe_head_ref; 1547 1548 pr_debug("%s: stripe %llu\n", __func__, 1549 (unsigned long long)sh->sector); 1550 } 1551 1552 static struct dma_async_tx_descriptor * 1553 ops_run_prexor5(struct stripe_head *sh, struct raid5_percpu *percpu, 1554 struct dma_async_tx_descriptor *tx) 1555 { 1556 int disks = sh->disks; 1557 struct page **xor_srcs = to_addr_page(percpu, 0); 1558 int count = 0, pd_idx = sh->pd_idx, i; 1559 struct async_submit_ctl submit; 1560 1561 /* existing parity data subtracted */ 1562 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; 1563 1564 BUG_ON(sh->batch_head); 1565 pr_debug("%s: stripe %llu\n", __func__, 1566 (unsigned long long)sh->sector); 1567 1568 for (i = disks; i--; ) { 1569 struct r5dev *dev = &sh->dev[i]; 1570 /* Only process blocks that are known to be uptodate */ 1571 if (test_bit(R5_Wantdrain, &dev->flags)) 1572 xor_srcs[count++] = dev->page; 1573 } 1574 1575 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_XOR_DROP_DST, tx, 1576 ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0)); 1577 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit); 1578 1579 return tx; 1580 } 1581 1582 static struct dma_async_tx_descriptor * 1583 ops_run_prexor6(struct stripe_head *sh, struct raid5_percpu *percpu, 1584 struct dma_async_tx_descriptor *tx) 1585 { 1586 struct page **blocks = to_addr_page(percpu, 0); 1587 int count; 1588 struct async_submit_ctl submit; 1589 1590 pr_debug("%s: stripe %llu\n", __func__, 1591 (unsigned long long)sh->sector); 1592 1593 count = set_syndrome_sources(blocks, sh, SYNDROME_SRC_WANT_DRAIN); 1594 1595 init_async_submit(&submit, ASYNC_TX_FENCE|ASYNC_TX_PQ_XOR_DST, tx, 1596 ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0)); 1597 tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit); 1598 1599 return tx; 1600 } 1601 1602 static struct dma_async_tx_descriptor * 1603 ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) 1604 { 1605 int disks = sh->disks; 1606 int i; 1607 struct stripe_head *head_sh = sh; 1608 1609 pr_debug("%s: stripe %llu\n", __func__, 1610 (unsigned long long)sh->sector); 1611 1612 for (i = disks; i--; ) { 1613 struct r5dev *dev; 1614 struct bio *chosen; 1615 1616 sh = head_sh; 1617 if (test_and_clear_bit(R5_Wantdrain, &head_sh->dev[i].flags)) { 1618 struct bio *wbi; 1619 1620 again: 1621 dev = &sh->dev[i]; 1622 spin_lock_irq(&sh->stripe_lock); 1623 chosen = dev->towrite; 1624 dev->towrite = NULL; 1625 sh->overwrite_disks = 0; 1626 BUG_ON(dev->written); 1627 wbi = dev->written = chosen; 1628 spin_unlock_irq(&sh->stripe_lock); 1629 WARN_ON(dev->page != dev->orig_page); 1630 1631 while (wbi && wbi->bi_iter.bi_sector < 1632 dev->sector + STRIPE_SECTORS) { 1633 if (wbi->bi_rw & REQ_FUA) 1634 set_bit(R5_WantFUA, &dev->flags); 1635 if (wbi->bi_rw & REQ_SYNC) 1636 set_bit(R5_SyncIO, &dev->flags); 1637 if (wbi->bi_rw & REQ_DISCARD) 1638 set_bit(R5_Discard, &dev->flags); 1639 else { 1640 tx = async_copy_data(1, wbi, &dev->page, 1641 dev->sector, tx, sh); 1642 if (dev->page != dev->orig_page) { 1643 set_bit(R5_SkipCopy, &dev->flags); 1644 clear_bit(R5_UPTODATE, &dev->flags); 1645 clear_bit(R5_OVERWRITE, &dev->flags); 1646 } 1647 } 1648 wbi = r5_next_bio(wbi, dev->sector); 1649 } 1650 1651 if (head_sh->batch_head) { 1652 sh = list_first_entry(&sh->batch_list, 1653 struct stripe_head, 1654 batch_list); 1655 if (sh == head_sh) 1656 continue; 1657 goto again; 1658 } 1659 } 1660 } 1661 1662 return tx; 1663 } 1664 1665 static void ops_complete_reconstruct(void *stripe_head_ref) 1666 { 1667 struct stripe_head *sh = stripe_head_ref; 1668 int disks = sh->disks; 1669 int pd_idx = sh->pd_idx; 1670 int qd_idx = sh->qd_idx; 1671 int i; 1672 bool fua = false, sync = false, discard = false; 1673 1674 pr_debug("%s: stripe %llu\n", __func__, 1675 (unsigned long long)sh->sector); 1676 1677 for (i = disks; i--; ) { 1678 fua |= test_bit(R5_WantFUA, &sh->dev[i].flags); 1679 sync |= test_bit(R5_SyncIO, &sh->dev[i].flags); 1680 discard |= test_bit(R5_Discard, &sh->dev[i].flags); 1681 } 1682 1683 for (i = disks; i--; ) { 1684 struct r5dev *dev = &sh->dev[i]; 1685 1686 if (dev->written || i == pd_idx || i == qd_idx) { 1687 if (!discard && !test_bit(R5_SkipCopy, &dev->flags)) 1688 set_bit(R5_UPTODATE, &dev->flags); 1689 if (fua) 1690 set_bit(R5_WantFUA, &dev->flags); 1691 if (sync) 1692 set_bit(R5_SyncIO, &dev->flags); 1693 } 1694 } 1695 1696 if (sh->reconstruct_state == reconstruct_state_drain_run) 1697 sh->reconstruct_state = reconstruct_state_drain_result; 1698 else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) 1699 sh->reconstruct_state = reconstruct_state_prexor_drain_result; 1700 else { 1701 BUG_ON(sh->reconstruct_state != reconstruct_state_run); 1702 sh->reconstruct_state = reconstruct_state_result; 1703 } 1704 1705 set_bit(STRIPE_HANDLE, &sh->state); 1706 release_stripe(sh); 1707 } 1708 1709 static void 1710 ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu, 1711 struct dma_async_tx_descriptor *tx) 1712 { 1713 int disks = sh->disks; 1714 struct page **xor_srcs; 1715 struct async_submit_ctl submit; 1716 int count, pd_idx = sh->pd_idx, i; 1717 struct page *xor_dest; 1718 int prexor = 0; 1719 unsigned long flags; 1720 int j = 0; 1721 struct stripe_head *head_sh = sh; 1722 int last_stripe; 1723 1724 pr_debug("%s: stripe %llu\n", __func__, 1725 (unsigned long long)sh->sector); 1726 1727 for (i = 0; i < sh->disks; i++) { 1728 if (pd_idx == i) 1729 continue; 1730 if (!test_bit(R5_Discard, &sh->dev[i].flags)) 1731 break; 1732 } 1733 if (i >= sh->disks) { 1734 atomic_inc(&sh->count); 1735 set_bit(R5_Discard, &sh->dev[pd_idx].flags); 1736 ops_complete_reconstruct(sh); 1737 return; 1738 } 1739 again: 1740 count = 0; 1741 xor_srcs = to_addr_page(percpu, j); 1742 /* check if prexor is active which means only process blocks 1743 * that are part of a read-modify-write (written) 1744 */ 1745 if (head_sh->reconstruct_state == reconstruct_state_prexor_drain_run) { 1746 prexor = 1; 1747 xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; 1748 for (i = disks; i--; ) { 1749 struct r5dev *dev = &sh->dev[i]; 1750 if (head_sh->dev[i].written) 1751 xor_srcs[count++] = dev->page; 1752 } 1753 } else { 1754 xor_dest = sh->dev[pd_idx].page; 1755 for (i = disks; i--; ) { 1756 struct r5dev *dev = &sh->dev[i]; 1757 if (i != pd_idx) 1758 xor_srcs[count++] = dev->page; 1759 } 1760 } 1761 1762 /* 1/ if we prexor'd then the dest is reused as a source 1763 * 2/ if we did not prexor then we are redoing the parity 1764 * set ASYNC_TX_XOR_DROP_DST and ASYNC_TX_XOR_ZERO_DST 1765 * for the synchronous xor case 1766 */ 1767 last_stripe = !head_sh->batch_head || 1768 list_first_entry(&sh->batch_list, 1769 struct stripe_head, batch_list) == head_sh; 1770 if (last_stripe) { 1771 flags = ASYNC_TX_ACK | 1772 (prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST); 1773 1774 atomic_inc(&head_sh->count); 1775 init_async_submit(&submit, flags, tx, ops_complete_reconstruct, head_sh, 1776 to_addr_conv(sh, percpu, j)); 1777 } else { 1778 flags = prexor ? ASYNC_TX_XOR_DROP_DST : ASYNC_TX_XOR_ZERO_DST; 1779 init_async_submit(&submit, flags, tx, NULL, NULL, 1780 to_addr_conv(sh, percpu, j)); 1781 } 1782 1783 if (unlikely(count == 1)) 1784 tx = async_memcpy(xor_dest, xor_srcs[0], 0, 0, STRIPE_SIZE, &submit); 1785 else 1786 tx = async_xor(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, &submit); 1787 if (!last_stripe) { 1788 j++; 1789 sh = list_first_entry(&sh->batch_list, struct stripe_head, 1790 batch_list); 1791 goto again; 1792 } 1793 } 1794 1795 static void 1796 ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu, 1797 struct dma_async_tx_descriptor *tx) 1798 { 1799 struct async_submit_ctl submit; 1800 struct page **blocks; 1801 int count, i, j = 0; 1802 struct stripe_head *head_sh = sh; 1803 int last_stripe; 1804 int synflags; 1805 unsigned long txflags; 1806 1807 pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector); 1808 1809 for (i = 0; i < sh->disks; i++) { 1810 if (sh->pd_idx == i || sh->qd_idx == i) 1811 continue; 1812 if (!test_bit(R5_Discard, &sh->dev[i].flags)) 1813 break; 1814 } 1815 if (i >= sh->disks) { 1816 atomic_inc(&sh->count); 1817 set_bit(R5_Discard, &sh->dev[sh->pd_idx].flags); 1818 set_bit(R5_Discard, &sh->dev[sh->qd_idx].flags); 1819 ops_complete_reconstruct(sh); 1820 return; 1821 } 1822 1823 again: 1824 blocks = to_addr_page(percpu, j); 1825 1826 if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) { 1827 synflags = SYNDROME_SRC_WRITTEN; 1828 txflags = ASYNC_TX_ACK | ASYNC_TX_PQ_XOR_DST; 1829 } else { 1830 synflags = SYNDROME_SRC_ALL; 1831 txflags = ASYNC_TX_ACK; 1832 } 1833 1834 count = set_syndrome_sources(blocks, sh, synflags); 1835 last_stripe = !head_sh->batch_head || 1836 list_first_entry(&sh->batch_list, 1837 struct stripe_head, batch_list) == head_sh; 1838 1839 if (last_stripe) { 1840 atomic_inc(&head_sh->count); 1841 init_async_submit(&submit, txflags, tx, ops_complete_reconstruct, 1842 head_sh, to_addr_conv(sh, percpu, j)); 1843 } else 1844 init_async_submit(&submit, 0, tx, NULL, NULL, 1845 to_addr_conv(sh, percpu, j)); 1846 tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE, &submit); 1847 if (!last_stripe) { 1848 j++; 1849 sh = list_first_entry(&sh->batch_list, struct stripe_head, 1850 batch_list); 1851 goto again; 1852 } 1853 } 1854 1855 static void ops_complete_check(void *stripe_head_ref) 1856 { 1857 struct stripe_head *sh = stripe_head_ref; 1858 1859 pr_debug("%s: stripe %llu\n", __func__, 1860 (unsigned long long)sh->sector); 1861 1862 sh->check_state = check_state_check_result; 1863 set_bit(STRIPE_HANDLE, &sh->state); 1864 release_stripe(sh); 1865 } 1866 1867 static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu) 1868 { 1869 int disks = sh->disks; 1870 int pd_idx = sh->pd_idx; 1871 int qd_idx = sh->qd_idx; 1872 struct page *xor_dest; 1873 struct page **xor_srcs = to_addr_page(percpu, 0); 1874 struct dma_async_tx_descriptor *tx; 1875 struct async_submit_ctl submit; 1876 int count; 1877 int i; 1878 1879 pr_debug("%s: stripe %llu\n", __func__, 1880 (unsigned long long)sh->sector); 1881 1882 BUG_ON(sh->batch_head); 1883 count = 0; 1884 xor_dest = sh->dev[pd_idx].page; 1885 xor_srcs[count++] = xor_dest; 1886 for (i = disks; i--; ) { 1887 if (i == pd_idx || i == qd_idx) 1888 continue; 1889 xor_srcs[count++] = sh->dev[i].page; 1890 } 1891 1892 init_async_submit(&submit, 0, NULL, NULL, NULL, 1893 to_addr_conv(sh, percpu, 0)); 1894 tx = async_xor_val(xor_dest, xor_srcs, 0, count, STRIPE_SIZE, 1895 &sh->ops.zero_sum_result, &submit); 1896 1897 atomic_inc(&sh->count); 1898 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL); 1899 tx = async_trigger_callback(&submit); 1900 } 1901 1902 static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp) 1903 { 1904 struct page **srcs = to_addr_page(percpu, 0); 1905 struct async_submit_ctl submit; 1906 int count; 1907 1908 pr_debug("%s: stripe %llu checkp: %d\n", __func__, 1909 (unsigned long long)sh->sector, checkp); 1910 1911 BUG_ON(sh->batch_head); 1912 count = set_syndrome_sources(srcs, sh, SYNDROME_SRC_ALL); 1913 if (!checkp) 1914 srcs[count] = NULL; 1915 1916 atomic_inc(&sh->count); 1917 init_async_submit(&submit, ASYNC_TX_ACK, NULL, ops_complete_check, 1918 sh, to_addr_conv(sh, percpu, 0)); 1919 async_syndrome_val(srcs, 0, count+2, STRIPE_SIZE, 1920 &sh->ops.zero_sum_result, percpu->spare_page, &submit); 1921 } 1922 1923 static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) 1924 { 1925 int overlap_clear = 0, i, disks = sh->disks; 1926 struct dma_async_tx_descriptor *tx = NULL; 1927 struct r5conf *conf = sh->raid_conf; 1928 int level = conf->level; 1929 struct raid5_percpu *percpu; 1930 unsigned long cpu; 1931 1932 cpu = get_cpu(); 1933 percpu = per_cpu_ptr(conf->percpu, cpu); 1934 if (test_bit(STRIPE_OP_BIOFILL, &ops_request)) { 1935 ops_run_biofill(sh); 1936 overlap_clear++; 1937 } 1938 1939 if (test_bit(STRIPE_OP_COMPUTE_BLK, &ops_request)) { 1940 if (level < 6) 1941 tx = ops_run_compute5(sh, percpu); 1942 else { 1943 if (sh->ops.target2 < 0 || sh->ops.target < 0) 1944 tx = ops_run_compute6_1(sh, percpu); 1945 else 1946 tx = ops_run_compute6_2(sh, percpu); 1947 } 1948 /* terminate the chain if reconstruct is not set to be run */ 1949 if (tx && !test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) 1950 async_tx_ack(tx); 1951 } 1952 1953 if (test_bit(STRIPE_OP_PREXOR, &ops_request)) { 1954 if (level < 6) 1955 tx = ops_run_prexor5(sh, percpu, tx); 1956 else 1957 tx = ops_run_prexor6(sh, percpu, tx); 1958 } 1959 1960 if (test_bit(STRIPE_OP_BIODRAIN, &ops_request)) { 1961 tx = ops_run_biodrain(sh, tx); 1962 overlap_clear++; 1963 } 1964 1965 if (test_bit(STRIPE_OP_RECONSTRUCT, &ops_request)) { 1966 if (level < 6) 1967 ops_run_reconstruct5(sh, percpu, tx); 1968 else 1969 ops_run_reconstruct6(sh, percpu, tx); 1970 } 1971 1972 if (test_bit(STRIPE_OP_CHECK, &ops_request)) { 1973 if (sh->check_state == check_state_run) 1974 ops_run_check_p(sh, percpu); 1975 else if (sh->check_state == check_state_run_q) 1976 ops_run_check_pq(sh, percpu, 0); 1977 else if (sh->check_state == check_state_run_pq) 1978 ops_run_check_pq(sh, percpu, 1); 1979 else 1980 BUG(); 1981 } 1982 1983 if (overlap_clear && !sh->batch_head) 1984 for (i = disks; i--; ) { 1985 struct r5dev *dev = &sh->dev[i]; 1986 if (test_and_clear_bit(R5_Overlap, &dev->flags)) 1987 wake_up(&sh->raid_conf->wait_for_overlap); 1988 } 1989 put_cpu(); 1990 } 1991 1992 static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp) 1993 { 1994 struct stripe_head *sh; 1995 1996 sh = kmem_cache_zalloc(sc, gfp); 1997 if (sh) { 1998 spin_lock_init(&sh->stripe_lock); 1999 spin_lock_init(&sh->batch_lock); 2000 INIT_LIST_HEAD(&sh->batch_list); 2001 INIT_LIST_HEAD(&sh->lru); 2002 atomic_set(&sh->count, 1); 2003 } 2004 return sh; 2005 } 2006 static int grow_one_stripe(struct r5conf *conf, gfp_t gfp) 2007 { 2008 struct stripe_head *sh; 2009 2010 sh = alloc_stripe(conf->slab_cache, gfp); 2011 if (!sh) 2012 return 0; 2013 2014 sh->raid_conf = conf; 2015 2016 if (grow_buffers(sh, gfp)) { 2017 shrink_buffers(sh); 2018 kmem_cache_free(conf->slab_cache, sh); 2019 return 0; 2020 } 2021 sh->hash_lock_index = 2022 conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS; 2023 /* we just created an active stripe so... */ 2024 atomic_inc(&conf->active_stripes); 2025 2026 release_stripe(sh); 2027 conf->max_nr_stripes++; 2028 return 1; 2029 } 2030 2031 static int grow_stripes(struct r5conf *conf, int num) 2032 { 2033 struct kmem_cache *sc; 2034 int devs = max(conf->raid_disks, conf->previous_raid_disks); 2035 2036 if (conf->mddev->gendisk) 2037 sprintf(conf->cache_name[0], 2038 "raid%d-%s", conf->level, mdname(conf->mddev)); 2039 else 2040 sprintf(conf->cache_name[0], 2041 "raid%d-%p", conf->level, conf->mddev); 2042 sprintf(conf->cache_name[1], "%s-alt", conf->cache_name[0]); 2043 2044 conf->active_name = 0; 2045 sc = kmem_cache_create(conf->cache_name[conf->active_name], 2046 sizeof(struct stripe_head)+(devs-1)*sizeof(struct r5dev), 2047 0, 0, NULL); 2048 if (!sc) 2049 return 1; 2050 conf->slab_cache = sc; 2051 conf->pool_size = devs; 2052 while (num--) 2053 if (!grow_one_stripe(conf, GFP_KERNEL)) 2054 return 1; 2055 2056 return 0; 2057 } 2058 2059 /** 2060 * scribble_len - return the required size of the scribble region 2061 * @num - total number of disks in the array 2062 * 2063 * The size must be enough to contain: 2064 * 1/ a struct page pointer for each device in the array +2 2065 * 2/ room to convert each entry in (1) to its corresponding dma 2066 * (dma_map_page()) or page (page_address()) address. 2067 * 2068 * Note: the +2 is for the destination buffers of the ddf/raid6 case where we 2069 * calculate over all devices (not just the data blocks), using zeros in place 2070 * of the P and Q blocks. 2071 */ 2072 static struct flex_array *scribble_alloc(int num, int cnt, gfp_t flags) 2073 { 2074 struct flex_array *ret; 2075 size_t len; 2076 2077 len = sizeof(struct page *) * (num+2) + sizeof(addr_conv_t) * (num+2); 2078 ret = flex_array_alloc(len, cnt, flags); 2079 if (!ret) 2080 return NULL; 2081 /* always prealloc all elements, so no locking is required */ 2082 if (flex_array_prealloc(ret, 0, cnt, flags)) { 2083 flex_array_free(ret); 2084 return NULL; 2085 } 2086 return ret; 2087 } 2088 2089 static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors) 2090 { 2091 unsigned long cpu; 2092 int err = 0; 2093 2094 mddev_suspend(conf->mddev); 2095 get_online_cpus(); 2096 for_each_present_cpu(cpu) { 2097 struct raid5_percpu *percpu; 2098 struct flex_array *scribble; 2099 2100 percpu = per_cpu_ptr(conf->percpu, cpu); 2101 scribble = scribble_alloc(new_disks, 2102 new_sectors / STRIPE_SECTORS, 2103 GFP_NOIO); 2104 2105 if (scribble) { 2106 flex_array_free(percpu->scribble); 2107 percpu->scribble = scribble; 2108 } else { 2109 err = -ENOMEM; 2110 break; 2111 } 2112 } 2113 put_online_cpus(); 2114 mddev_resume(conf->mddev); 2115 return err; 2116 } 2117 2118 static int resize_stripes(struct r5conf *conf, int newsize) 2119 { 2120 /* Make all the stripes able to hold 'newsize' devices. 2121 * New slots in each stripe get 'page' set to a new page. 2122 * 2123 * This happens in stages: 2124 * 1/ create a new kmem_cache and allocate the required number of 2125 * stripe_heads. 2126 * 2/ gather all the old stripe_heads and transfer the pages across 2127 * to the new stripe_heads. This will have the side effect of 2128 * freezing the array as once all stripe_heads have been collected, 2129 * no IO will be possible. Old stripe heads are freed once their 2130 * pages have been transferred over, and the old kmem_cache is 2131 * freed when all stripes are done. 2132 * 3/ reallocate conf->disks to be suitable bigger. If this fails, 2133 * we simple return a failre status - no need to clean anything up. 2134 * 4/ allocate new pages for the new slots in the new stripe_heads. 2135 * If this fails, we don't bother trying the shrink the 2136 * stripe_heads down again, we just leave them as they are. 2137 * As each stripe_head is processed the new one is released into 2138 * active service. 2139 * 2140 * Once step2 is started, we cannot afford to wait for a write, 2141 * so we use GFP_NOIO allocations. 2142 */ 2143 struct stripe_head *osh, *nsh; 2144 LIST_HEAD(newstripes); 2145 struct disk_info *ndisks; 2146 int err; 2147 struct kmem_cache *sc; 2148 int i; 2149 int hash, cnt; 2150 2151 if (newsize <= conf->pool_size) 2152 return 0; /* never bother to shrink */ 2153 2154 err = md_allow_write(conf->mddev); 2155 if (err) 2156 return err; 2157 2158 /* Step 1 */ 2159 sc = kmem_cache_create(conf->cache_name[1-conf->active_name], 2160 sizeof(struct stripe_head)+(newsize-1)*sizeof(struct r5dev), 2161 0, 0, NULL); 2162 if (!sc) 2163 return -ENOMEM; 2164 2165 /* Need to ensure auto-resizing doesn't interfere */ 2166 mutex_lock(&conf->cache_size_mutex); 2167 2168 for (i = conf->max_nr_stripes; i; i--) { 2169 nsh = alloc_stripe(sc, GFP_KERNEL); 2170 if (!nsh) 2171 break; 2172 2173 nsh->raid_conf = conf; 2174 list_add(&nsh->lru, &newstripes); 2175 } 2176 if (i) { 2177 /* didn't get enough, give up */ 2178 while (!list_empty(&newstripes)) { 2179 nsh = list_entry(newstripes.next, struct stripe_head, lru); 2180 list_del(&nsh->lru); 2181 kmem_cache_free(sc, nsh); 2182 } 2183 kmem_cache_destroy(sc); 2184 mutex_unlock(&conf->cache_size_mutex); 2185 return -ENOMEM; 2186 } 2187 /* Step 2 - Must use GFP_NOIO now. 2188 * OK, we have enough stripes, start collecting inactive 2189 * stripes and copying them over 2190 */ 2191 hash = 0; 2192 cnt = 0; 2193 list_for_each_entry(nsh, &newstripes, lru) { 2194 lock_device_hash_lock(conf, hash); 2195 wait_event_exclusive_cmd(conf->wait_for_stripe[hash], 2196 !list_empty(conf->inactive_list + hash), 2197 unlock_device_hash_lock(conf, hash), 2198 lock_device_hash_lock(conf, hash)); 2199 osh = get_free_stripe(conf, hash); 2200 unlock_device_hash_lock(conf, hash); 2201 2202 for(i=0; i<conf->pool_size; i++) { 2203 nsh->dev[i].page = osh->dev[i].page; 2204 nsh->dev[i].orig_page = osh->dev[i].page; 2205 } 2206 nsh->hash_lock_index = hash; 2207 kmem_cache_free(conf->slab_cache, osh); 2208 cnt++; 2209 if (cnt >= conf->max_nr_stripes / NR_STRIPE_HASH_LOCKS + 2210 !!((conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS) > hash)) { 2211 hash++; 2212 cnt = 0; 2213 } 2214 } 2215 kmem_cache_destroy(conf->slab_cache); 2216 2217 /* Step 3. 2218 * At this point, we are holding all the stripes so the array 2219 * is completely stalled, so now is a good time to resize 2220 * conf->disks and the scribble region 2221 */ 2222 ndisks = kzalloc(newsize * sizeof(struct disk_info), GFP_NOIO); 2223 if (ndisks) { 2224 for (i=0; i<conf->raid_disks; i++) 2225 ndisks[i] = conf->disks[i]; 2226 kfree(conf->disks); 2227 conf->disks = ndisks; 2228 } else 2229 err = -ENOMEM; 2230 2231 mutex_unlock(&conf->cache_size_mutex); 2232 /* Step 4, return new stripes to service */ 2233 while(!list_empty(&newstripes)) { 2234 nsh = list_entry(newstripes.next, struct stripe_head, lru); 2235 list_del_init(&nsh->lru); 2236 2237 for (i=conf->raid_disks; i < newsize; i++) 2238 if (nsh->dev[i].page == NULL) { 2239 struct page *p = alloc_page(GFP_NOIO); 2240 nsh->dev[i].page = p; 2241 nsh->dev[i].orig_page = p; 2242 if (!p) 2243 err = -ENOMEM; 2244 } 2245 release_stripe(nsh); 2246 } 2247 /* critical section pass, GFP_NOIO no longer needed */ 2248 2249 conf->slab_cache = sc; 2250 conf->active_name = 1-conf->active_name; 2251 if (!err) 2252 conf->pool_size = newsize; 2253 return err; 2254 } 2255 2256 static int drop_one_stripe(struct r5conf *conf) 2257 { 2258 struct stripe_head *sh; 2259 int hash = (conf->max_nr_stripes - 1) & STRIPE_HASH_LOCKS_MASK; 2260 2261 spin_lock_irq(conf->hash_locks + hash); 2262 sh = get_free_stripe(conf, hash); 2263 spin_unlock_irq(conf->hash_locks + hash); 2264 if (!sh) 2265 return 0; 2266 BUG_ON(atomic_read(&sh->count)); 2267 shrink_buffers(sh); 2268 kmem_cache_free(conf->slab_cache, sh); 2269 atomic_dec(&conf->active_stripes); 2270 conf->max_nr_stripes--; 2271 return 1; 2272 } 2273 2274 static void shrink_stripes(struct r5conf *conf) 2275 { 2276 while (conf->max_nr_stripes && 2277 drop_one_stripe(conf)) 2278 ; 2279 2280 if (conf->slab_cache) 2281 kmem_cache_destroy(conf->slab_cache); 2282 conf->slab_cache = NULL; 2283 } 2284 2285 static void raid5_end_read_request(struct bio * bi) 2286 { 2287 struct stripe_head *sh = bi->bi_private; 2288 struct r5conf *conf = sh->raid_conf; 2289 int disks = sh->disks, i; 2290 char b[BDEVNAME_SIZE]; 2291 struct md_rdev *rdev = NULL; 2292 sector_t s; 2293 2294 for (i=0 ; i<disks; i++) 2295 if (bi == &sh->dev[i].req) 2296 break; 2297 2298 pr_debug("end_read_request %llu/%d, count: %d, error %d.\n", 2299 (unsigned long long)sh->sector, i, atomic_read(&sh->count), 2300 bi->bi_error); 2301 if (i == disks) { 2302 BUG(); 2303 return; 2304 } 2305 if (test_bit(R5_ReadRepl, &sh->dev[i].flags)) 2306 /* If replacement finished while this request was outstanding, 2307 * 'replacement' might be NULL already. 2308 * In that case it moved down to 'rdev'. 2309 * rdev is not removed until all requests are finished. 2310 */ 2311 rdev = conf->disks[i].replacement; 2312 if (!rdev) 2313 rdev = conf->disks[i].rdev; 2314 2315 if (use_new_offset(conf, sh)) 2316 s = sh->sector + rdev->new_data_offset; 2317 else 2318 s = sh->sector + rdev->data_offset; 2319 if (!bi->bi_error) { 2320 set_bit(R5_UPTODATE, &sh->dev[i].flags); 2321 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 2322 /* Note that this cannot happen on a 2323 * replacement device. We just fail those on 2324 * any error 2325 */ 2326 printk_ratelimited( 2327 KERN_INFO 2328 "md/raid:%s: read error corrected" 2329 " (%lu sectors at %llu on %s)\n", 2330 mdname(conf->mddev), STRIPE_SECTORS, 2331 (unsigned long long)s, 2332 bdevname(rdev->bdev, b)); 2333 atomic_add(STRIPE_SECTORS, &rdev->corrected_errors); 2334 clear_bit(R5_ReadError, &sh->dev[i].flags); 2335 clear_bit(R5_ReWrite, &sh->dev[i].flags); 2336 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) 2337 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); 2338 2339 if (atomic_read(&rdev->read_errors)) 2340 atomic_set(&rdev->read_errors, 0); 2341 } else { 2342 const char *bdn = bdevname(rdev->bdev, b); 2343 int retry = 0; 2344 int set_bad = 0; 2345 2346 clear_bit(R5_UPTODATE, &sh->dev[i].flags); 2347 atomic_inc(&rdev->read_errors); 2348 if (test_bit(R5_ReadRepl, &sh->dev[i].flags)) 2349 printk_ratelimited( 2350 KERN_WARNING 2351 "md/raid:%s: read error on replacement device " 2352 "(sector %llu on %s).\n", 2353 mdname(conf->mddev), 2354 (unsigned long long)s, 2355 bdn); 2356 else if (conf->mddev->degraded >= conf->max_degraded) { 2357 set_bad = 1; 2358 printk_ratelimited( 2359 KERN_WARNING 2360 "md/raid:%s: read error not correctable " 2361 "(sector %llu on %s).\n", 2362 mdname(conf->mddev), 2363 (unsigned long long)s, 2364 bdn); 2365 } else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) { 2366 /* Oh, no!!! */ 2367 set_bad = 1; 2368 printk_ratelimited( 2369 KERN_WARNING 2370 "md/raid:%s: read error NOT corrected!! " 2371 "(sector %llu on %s).\n", 2372 mdname(conf->mddev), 2373 (unsigned long long)s, 2374 bdn); 2375 } else if (atomic_read(&rdev->read_errors) 2376 > conf->max_nr_stripes) 2377 printk(KERN_WARNING 2378 "md/raid:%s: Too many read errors, failing device %s.\n", 2379 mdname(conf->mddev), bdn); 2380 else 2381 retry = 1; 2382 if (set_bad && test_bit(In_sync, &rdev->flags) 2383 && !test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) 2384 retry = 1; 2385 if (retry) 2386 if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) { 2387 set_bit(R5_ReadError, &sh->dev[i].flags); 2388 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); 2389 } else 2390 set_bit(R5_ReadNoMerge, &sh->dev[i].flags); 2391 else { 2392 clear_bit(R5_ReadError, &sh->dev[i].flags); 2393 clear_bit(R5_ReWrite, &sh->dev[i].flags); 2394 if (!(set_bad 2395 && test_bit(In_sync, &rdev->flags) 2396 && rdev_set_badblocks( 2397 rdev, sh->sector, STRIPE_SECTORS, 0))) 2398 md_error(conf->mddev, rdev); 2399 } 2400 } 2401 rdev_dec_pending(rdev, conf->mddev); 2402 clear_bit(R5_LOCKED, &sh->dev[i].flags); 2403 set_bit(STRIPE_HANDLE, &sh->state); 2404 release_stripe(sh); 2405 } 2406 2407 static void raid5_end_write_request(struct bio *bi) 2408 { 2409 struct stripe_head *sh = bi->bi_private; 2410 struct r5conf *conf = sh->raid_conf; 2411 int disks = sh->disks, i; 2412 struct md_rdev *uninitialized_var(rdev); 2413 sector_t first_bad; 2414 int bad_sectors; 2415 int replacement = 0; 2416 2417 for (i = 0 ; i < disks; i++) { 2418 if (bi == &sh->dev[i].req) { 2419 rdev = conf->disks[i].rdev; 2420 break; 2421 } 2422 if (bi == &sh->dev[i].rreq) { 2423 rdev = conf->disks[i].replacement; 2424 if (rdev) 2425 replacement = 1; 2426 else 2427 /* rdev was removed and 'replacement' 2428 * replaced it. rdev is not removed 2429 * until all requests are finished. 2430 */ 2431 rdev = conf->disks[i].rdev; 2432 break; 2433 } 2434 } 2435 pr_debug("end_write_request %llu/%d, count %d, error: %d.\n", 2436 (unsigned long long)sh->sector, i, atomic_read(&sh->count), 2437 bi->bi_error); 2438 if (i == disks) { 2439 BUG(); 2440 return; 2441 } 2442 2443 if (replacement) { 2444 if (bi->bi_error) 2445 md_error(conf->mddev, rdev); 2446 else if (is_badblock(rdev, sh->sector, 2447 STRIPE_SECTORS, 2448 &first_bad, &bad_sectors)) 2449 set_bit(R5_MadeGoodRepl, &sh->dev[i].flags); 2450 } else { 2451 if (bi->bi_error) { 2452 set_bit(STRIPE_DEGRADED, &sh->state); 2453 set_bit(WriteErrorSeen, &rdev->flags); 2454 set_bit(R5_WriteError, &sh->dev[i].flags); 2455 if (!test_and_set_bit(WantReplacement, &rdev->flags)) 2456 set_bit(MD_RECOVERY_NEEDED, 2457 &rdev->mddev->recovery); 2458 } else if (is_badblock(rdev, sh->sector, 2459 STRIPE_SECTORS, 2460 &first_bad, &bad_sectors)) { 2461 set_bit(R5_MadeGood, &sh->dev[i].flags); 2462 if (test_bit(R5_ReadError, &sh->dev[i].flags)) 2463 /* That was a successful write so make 2464 * sure it looks like we already did 2465 * a re-write. 2466 */ 2467 set_bit(R5_ReWrite, &sh->dev[i].flags); 2468 } 2469 } 2470 rdev_dec_pending(rdev, conf->mddev); 2471 2472 if (sh->batch_head && bi->bi_error && !replacement) 2473 set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state); 2474 2475 if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags)) 2476 clear_bit(R5_LOCKED, &sh->dev[i].flags); 2477 set_bit(STRIPE_HANDLE, &sh->state); 2478 release_stripe(sh); 2479 2480 if (sh->batch_head && sh != sh->batch_head) 2481 release_stripe(sh->batch_head); 2482 } 2483 2484 static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous); 2485 2486 static void raid5_build_block(struct stripe_head *sh, int i, int previous) 2487 { 2488 struct r5dev *dev = &sh->dev[i]; 2489 2490 bio_init(&dev->req); 2491 dev->req.bi_io_vec = &dev->vec; 2492 dev->req.bi_max_vecs = 1; 2493 dev->req.bi_private = sh; 2494 2495 bio_init(&dev->rreq); 2496 dev->rreq.bi_io_vec = &dev->rvec; 2497 dev->rreq.bi_max_vecs = 1; 2498 dev->rreq.bi_private = sh; 2499 2500 dev->flags = 0; 2501 dev->sector = compute_blocknr(sh, i, previous); 2502 } 2503 2504 static void error(struct mddev *mddev, struct md_rdev *rdev) 2505 { 2506 char b[BDEVNAME_SIZE]; 2507 struct r5conf *conf = mddev->private; 2508 unsigned long flags; 2509 pr_debug("raid456: error called\n"); 2510 2511 spin_lock_irqsave(&conf->device_lock, flags); 2512 clear_bit(In_sync, &rdev->flags); 2513 mddev->degraded = calc_degraded(conf); 2514 spin_unlock_irqrestore(&conf->device_lock, flags); 2515 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 2516 2517 set_bit(Blocked, &rdev->flags); 2518 set_bit(Faulty, &rdev->flags); 2519 set_bit(MD_CHANGE_DEVS, &mddev->flags); 2520 printk(KERN_ALERT 2521 "md/raid:%s: Disk failure on %s, disabling device.\n" 2522 "md/raid:%s: Operation continuing on %d devices.\n", 2523 mdname(mddev), 2524 bdevname(rdev->bdev, b), 2525 mdname(mddev), 2526 conf->raid_disks - mddev->degraded); 2527 } 2528 2529 /* 2530 * Input: a 'big' sector number, 2531 * Output: index of the data and parity disk, and the sector # in them. 2532 */ 2533 static sector_t raid5_compute_sector(struct r5conf *conf, sector_t r_sector, 2534 int previous, int *dd_idx, 2535 struct stripe_head *sh) 2536 { 2537 sector_t stripe, stripe2; 2538 sector_t chunk_number; 2539 unsigned int chunk_offset; 2540 int pd_idx, qd_idx; 2541 int ddf_layout = 0; 2542 sector_t new_sector; 2543 int algorithm = previous ? conf->prev_algo 2544 : conf->algorithm; 2545 int sectors_per_chunk = previous ? conf->prev_chunk_sectors 2546 : conf->chunk_sectors; 2547 int raid_disks = previous ? conf->previous_raid_disks 2548 : conf->raid_disks; 2549 int data_disks = raid_disks - conf->max_degraded; 2550 2551 /* First compute the information on this sector */ 2552 2553 /* 2554 * Compute the chunk number and the sector offset inside the chunk 2555 */ 2556 chunk_offset = sector_div(r_sector, sectors_per_chunk); 2557 chunk_number = r_sector; 2558 2559 /* 2560 * Compute the stripe number 2561 */ 2562 stripe = chunk_number; 2563 *dd_idx = sector_div(stripe, data_disks); 2564 stripe2 = stripe; 2565 /* 2566 * Select the parity disk based on the user selected algorithm. 2567 */ 2568 pd_idx = qd_idx = -1; 2569 switch(conf->level) { 2570 case 4: 2571 pd_idx = data_disks; 2572 break; 2573 case 5: 2574 switch (algorithm) { 2575 case ALGORITHM_LEFT_ASYMMETRIC: 2576 pd_idx = data_disks - sector_div(stripe2, raid_disks); 2577 if (*dd_idx >= pd_idx) 2578 (*dd_idx)++; 2579 break; 2580 case ALGORITHM_RIGHT_ASYMMETRIC: 2581 pd_idx = sector_div(stripe2, raid_disks); 2582 if (*dd_idx >= pd_idx) 2583 (*dd_idx)++; 2584 break; 2585 case ALGORITHM_LEFT_SYMMETRIC: 2586 pd_idx = data_disks - sector_div(stripe2, raid_disks); 2587 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 2588 break; 2589 case ALGORITHM_RIGHT_SYMMETRIC: 2590 pd_idx = sector_div(stripe2, raid_disks); 2591 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 2592 break; 2593 case ALGORITHM_PARITY_0: 2594 pd_idx = 0; 2595 (*dd_idx)++; 2596 break; 2597 case ALGORITHM_PARITY_N: 2598 pd_idx = data_disks; 2599 break; 2600 default: 2601 BUG(); 2602 } 2603 break; 2604 case 6: 2605 2606 switch (algorithm) { 2607 case ALGORITHM_LEFT_ASYMMETRIC: 2608 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); 2609 qd_idx = pd_idx + 1; 2610 if (pd_idx == raid_disks-1) { 2611 (*dd_idx)++; /* Q D D D P */ 2612 qd_idx = 0; 2613 } else if (*dd_idx >= pd_idx) 2614 (*dd_idx) += 2; /* D D P Q D */ 2615 break; 2616 case ALGORITHM_RIGHT_ASYMMETRIC: 2617 pd_idx = sector_div(stripe2, raid_disks); 2618 qd_idx = pd_idx + 1; 2619 if (pd_idx == raid_disks-1) { 2620 (*dd_idx)++; /* Q D D D P */ 2621 qd_idx = 0; 2622 } else if (*dd_idx >= pd_idx) 2623 (*dd_idx) += 2; /* D D P Q D */ 2624 break; 2625 case ALGORITHM_LEFT_SYMMETRIC: 2626 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); 2627 qd_idx = (pd_idx + 1) % raid_disks; 2628 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; 2629 break; 2630 case ALGORITHM_RIGHT_SYMMETRIC: 2631 pd_idx = sector_div(stripe2, raid_disks); 2632 qd_idx = (pd_idx + 1) % raid_disks; 2633 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; 2634 break; 2635 2636 case ALGORITHM_PARITY_0: 2637 pd_idx = 0; 2638 qd_idx = 1; 2639 (*dd_idx) += 2; 2640 break; 2641 case ALGORITHM_PARITY_N: 2642 pd_idx = data_disks; 2643 qd_idx = data_disks + 1; 2644 break; 2645 2646 case ALGORITHM_ROTATING_ZERO_RESTART: 2647 /* Exactly the same as RIGHT_ASYMMETRIC, but or 2648 * of blocks for computing Q is different. 2649 */ 2650 pd_idx = sector_div(stripe2, raid_disks); 2651 qd_idx = pd_idx + 1; 2652 if (pd_idx == raid_disks-1) { 2653 (*dd_idx)++; /* Q D D D P */ 2654 qd_idx = 0; 2655 } else if (*dd_idx >= pd_idx) 2656 (*dd_idx) += 2; /* D D P Q D */ 2657 ddf_layout = 1; 2658 break; 2659 2660 case ALGORITHM_ROTATING_N_RESTART: 2661 /* Same a left_asymmetric, by first stripe is 2662 * D D D P Q rather than 2663 * Q D D D P 2664 */ 2665 stripe2 += 1; 2666 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); 2667 qd_idx = pd_idx + 1; 2668 if (pd_idx == raid_disks-1) { 2669 (*dd_idx)++; /* Q D D D P */ 2670 qd_idx = 0; 2671 } else if (*dd_idx >= pd_idx) 2672 (*dd_idx) += 2; /* D D P Q D */ 2673 ddf_layout = 1; 2674 break; 2675 2676 case ALGORITHM_ROTATING_N_CONTINUE: 2677 /* Same as left_symmetric but Q is before P */ 2678 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); 2679 qd_idx = (pd_idx + raid_disks - 1) % raid_disks; 2680 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; 2681 ddf_layout = 1; 2682 break; 2683 2684 case ALGORITHM_LEFT_ASYMMETRIC_6: 2685 /* RAID5 left_asymmetric, with Q on last device */ 2686 pd_idx = data_disks - sector_div(stripe2, raid_disks-1); 2687 if (*dd_idx >= pd_idx) 2688 (*dd_idx)++; 2689 qd_idx = raid_disks - 1; 2690 break; 2691 2692 case ALGORITHM_RIGHT_ASYMMETRIC_6: 2693 pd_idx = sector_div(stripe2, raid_disks-1); 2694 if (*dd_idx >= pd_idx) 2695 (*dd_idx)++; 2696 qd_idx = raid_disks - 1; 2697 break; 2698 2699 case ALGORITHM_LEFT_SYMMETRIC_6: 2700 pd_idx = data_disks - sector_div(stripe2, raid_disks-1); 2701 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); 2702 qd_idx = raid_disks - 1; 2703 break; 2704 2705 case ALGORITHM_RIGHT_SYMMETRIC_6: 2706 pd_idx = sector_div(stripe2, raid_disks-1); 2707 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); 2708 qd_idx = raid_disks - 1; 2709 break; 2710 2711 case ALGORITHM_PARITY_0_6: 2712 pd_idx = 0; 2713 (*dd_idx)++; 2714 qd_idx = raid_disks - 1; 2715 break; 2716 2717 default: 2718 BUG(); 2719 } 2720 break; 2721 } 2722 2723 if (sh) { 2724 sh->pd_idx = pd_idx; 2725 sh->qd_idx = qd_idx; 2726 sh->ddf_layout = ddf_layout; 2727 } 2728 /* 2729 * Finally, compute the new sector number 2730 */ 2731 new_sector = (sector_t)stripe * sectors_per_chunk + chunk_offset; 2732 return new_sector; 2733 } 2734 2735 static sector_t compute_blocknr(struct stripe_head *sh, int i, int previous) 2736 { 2737 struct r5conf *conf = sh->raid_conf; 2738 int raid_disks = sh->disks; 2739 int data_disks = raid_disks - conf->max_degraded; 2740 sector_t new_sector = sh->sector, check; 2741 int sectors_per_chunk = previous ? conf->prev_chunk_sectors 2742 : conf->chunk_sectors; 2743 int algorithm = previous ? conf->prev_algo 2744 : conf->algorithm; 2745 sector_t stripe; 2746 int chunk_offset; 2747 sector_t chunk_number; 2748 int dummy1, dd_idx = i; 2749 sector_t r_sector; 2750 struct stripe_head sh2; 2751 2752 chunk_offset = sector_div(new_sector, sectors_per_chunk); 2753 stripe = new_sector; 2754 2755 if (i == sh->pd_idx) 2756 return 0; 2757 switch(conf->level) { 2758 case 4: break; 2759 case 5: 2760 switch (algorithm) { 2761 case ALGORITHM_LEFT_ASYMMETRIC: 2762 case ALGORITHM_RIGHT_ASYMMETRIC: 2763 if (i > sh->pd_idx) 2764 i--; 2765 break; 2766 case ALGORITHM_LEFT_SYMMETRIC: 2767 case ALGORITHM_RIGHT_SYMMETRIC: 2768 if (i < sh->pd_idx) 2769 i += raid_disks; 2770 i -= (sh->pd_idx + 1); 2771 break; 2772 case ALGORITHM_PARITY_0: 2773 i -= 1; 2774 break; 2775 case ALGORITHM_PARITY_N: 2776 break; 2777 default: 2778 BUG(); 2779 } 2780 break; 2781 case 6: 2782 if (i == sh->qd_idx) 2783 return 0; /* It is the Q disk */ 2784 switch (algorithm) { 2785 case ALGORITHM_LEFT_ASYMMETRIC: 2786 case ALGORITHM_RIGHT_ASYMMETRIC: 2787 case ALGORITHM_ROTATING_ZERO_RESTART: 2788 case ALGORITHM_ROTATING_N_RESTART: 2789 if (sh->pd_idx == raid_disks-1) 2790 i--; /* Q D D D P */ 2791 else if (i > sh->pd_idx) 2792 i -= 2; /* D D P Q D */ 2793 break; 2794 case ALGORITHM_LEFT_SYMMETRIC: 2795 case ALGORITHM_RIGHT_SYMMETRIC: 2796 if (sh->pd_idx == raid_disks-1) 2797 i--; /* Q D D D P */ 2798 else { 2799 /* D D P Q D */ 2800 if (i < sh->pd_idx) 2801 i += raid_disks; 2802 i -= (sh->pd_idx + 2); 2803 } 2804 break; 2805 case ALGORITHM_PARITY_0: 2806 i -= 2; 2807 break; 2808 case ALGORITHM_PARITY_N: 2809 break; 2810 case ALGORITHM_ROTATING_N_CONTINUE: 2811 /* Like left_symmetric, but P is before Q */ 2812 if (sh->pd_idx == 0) 2813 i--; /* P D D D Q */ 2814 else { 2815 /* D D Q P D */ 2816 if (i < sh->pd_idx) 2817 i += raid_disks; 2818 i -= (sh->pd_idx + 1); 2819 } 2820 break; 2821 case ALGORITHM_LEFT_ASYMMETRIC_6: 2822 case ALGORITHM_RIGHT_ASYMMETRIC_6: 2823 if (i > sh->pd_idx) 2824 i--; 2825 break; 2826 case ALGORITHM_LEFT_SYMMETRIC_6: 2827 case ALGORITHM_RIGHT_SYMMETRIC_6: 2828 if (i < sh->pd_idx) 2829 i += data_disks + 1; 2830 i -= (sh->pd_idx + 1); 2831 break; 2832 case ALGORITHM_PARITY_0_6: 2833 i -= 1; 2834 break; 2835 default: 2836 BUG(); 2837 } 2838 break; 2839 } 2840 2841 chunk_number = stripe * data_disks + i; 2842 r_sector = chunk_number * sectors_per_chunk + chunk_offset; 2843 2844 check = raid5_compute_sector(conf, r_sector, 2845 previous, &dummy1, &sh2); 2846 if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx 2847 || sh2.qd_idx != sh->qd_idx) { 2848 printk(KERN_ERR "md/raid:%s: compute_blocknr: map not correct\n", 2849 mdname(conf->mddev)); 2850 return 0; 2851 } 2852 return r_sector; 2853 } 2854 2855 static void 2856 schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, 2857 int rcw, int expand) 2858 { 2859 int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx, disks = sh->disks; 2860 struct r5conf *conf = sh->raid_conf; 2861 int level = conf->level; 2862 2863 if (rcw) { 2864 2865 for (i = disks; i--; ) { 2866 struct r5dev *dev = &sh->dev[i]; 2867 2868 if (dev->towrite) { 2869 set_bit(R5_LOCKED, &dev->flags); 2870 set_bit(R5_Wantdrain, &dev->flags); 2871 if (!expand) 2872 clear_bit(R5_UPTODATE, &dev->flags); 2873 s->locked++; 2874 } 2875 } 2876 /* if we are not expanding this is a proper write request, and 2877 * there will be bios with new data to be drained into the 2878 * stripe cache 2879 */ 2880 if (!expand) { 2881 if (!s->locked) 2882 /* False alarm, nothing to do */ 2883 return; 2884 sh->reconstruct_state = reconstruct_state_drain_run; 2885 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); 2886 } else 2887 sh->reconstruct_state = reconstruct_state_run; 2888 2889 set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request); 2890 2891 if (s->locked + conf->max_degraded == disks) 2892 if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) 2893 atomic_inc(&conf->pending_full_writes); 2894 } else { 2895 BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) || 2896 test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags))); 2897 BUG_ON(level == 6 && 2898 (!(test_bit(R5_UPTODATE, &sh->dev[qd_idx].flags) || 2899 test_bit(R5_Wantcompute, &sh->dev[qd_idx].flags)))); 2900 2901 for (i = disks; i--; ) { 2902 struct r5dev *dev = &sh->dev[i]; 2903 if (i == pd_idx || i == qd_idx) 2904 continue; 2905 2906 if (dev->towrite && 2907 (test_bit(R5_UPTODATE, &dev->flags) || 2908 test_bit(R5_Wantcompute, &dev->flags))) { 2909 set_bit(R5_Wantdrain, &dev->flags); 2910 set_bit(R5_LOCKED, &dev->flags); 2911 clear_bit(R5_UPTODATE, &dev->flags); 2912 s->locked++; 2913 } 2914 } 2915 if (!s->locked) 2916 /* False alarm - nothing to do */ 2917 return; 2918 sh->reconstruct_state = reconstruct_state_prexor_drain_run; 2919 set_bit(STRIPE_OP_PREXOR, &s->ops_request); 2920 set_bit(STRIPE_OP_BIODRAIN, &s->ops_request); 2921 set_bit(STRIPE_OP_RECONSTRUCT, &s->ops_request); 2922 } 2923 2924 /* keep the parity disk(s) locked while asynchronous operations 2925 * are in flight 2926 */ 2927 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); 2928 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 2929 s->locked++; 2930 2931 if (level == 6) { 2932 int qd_idx = sh->qd_idx; 2933 struct r5dev *dev = &sh->dev[qd_idx]; 2934 2935 set_bit(R5_LOCKED, &dev->flags); 2936 clear_bit(R5_UPTODATE, &dev->flags); 2937 s->locked++; 2938 } 2939 2940 pr_debug("%s: stripe %llu locked: %d ops_request: %lx\n", 2941 __func__, (unsigned long long)sh->sector, 2942 s->locked, s->ops_request); 2943 } 2944 2945 /* 2946 * Each stripe/dev can have one or more bion attached. 2947 * toread/towrite point to the first in a chain. 2948 * The bi_next chain must be in order. 2949 */ 2950 static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, 2951 int forwrite, int previous) 2952 { 2953 struct bio **bip; 2954 struct r5conf *conf = sh->raid_conf; 2955 int firstwrite=0; 2956 2957 pr_debug("adding bi b#%llu to stripe s#%llu\n", 2958 (unsigned long long)bi->bi_iter.bi_sector, 2959 (unsigned long long)sh->sector); 2960 2961 /* 2962 * If several bio share a stripe. The bio bi_phys_segments acts as a 2963 * reference count to avoid race. The reference count should already be 2964 * increased before this function is called (for example, in 2965 * make_request()), so other bio sharing this stripe will not free the 2966 * stripe. If a stripe is owned by one stripe, the stripe lock will 2967 * protect it. 2968 */ 2969 spin_lock_irq(&sh->stripe_lock); 2970 /* Don't allow new IO added to stripes in batch list */ 2971 if (sh->batch_head) 2972 goto overlap; 2973 if (forwrite) { 2974 bip = &sh->dev[dd_idx].towrite; 2975 if (*bip == NULL) 2976 firstwrite = 1; 2977 } else 2978 bip = &sh->dev[dd_idx].toread; 2979 while (*bip && (*bip)->bi_iter.bi_sector < bi->bi_iter.bi_sector) { 2980 if (bio_end_sector(*bip) > bi->bi_iter.bi_sector) 2981 goto overlap; 2982 bip = & (*bip)->bi_next; 2983 } 2984 if (*bip && (*bip)->bi_iter.bi_sector < bio_end_sector(bi)) 2985 goto overlap; 2986 2987 if (!forwrite || previous) 2988 clear_bit(STRIPE_BATCH_READY, &sh->state); 2989 2990 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next); 2991 if (*bip) 2992 bi->bi_next = *bip; 2993 *bip = bi; 2994 raid5_inc_bi_active_stripes(bi); 2995 2996 if (forwrite) { 2997 /* check if page is covered */ 2998 sector_t sector = sh->dev[dd_idx].sector; 2999 for (bi=sh->dev[dd_idx].towrite; 3000 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS && 3001 bi && bi->bi_iter.bi_sector <= sector; 3002 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) { 3003 if (bio_end_sector(bi) >= sector) 3004 sector = bio_end_sector(bi); 3005 } 3006 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS) 3007 if (!test_and_set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags)) 3008 sh->overwrite_disks++; 3009 } 3010 3011 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n", 3012 (unsigned long long)(*bip)->bi_iter.bi_sector, 3013 (unsigned long long)sh->sector, dd_idx); 3014 3015 if (conf->mddev->bitmap && firstwrite) { 3016 /* Cannot hold spinlock over bitmap_startwrite, 3017 * but must ensure this isn't added to a batch until 3018 * we have added to the bitmap and set bm_seq. 3019 * So set STRIPE_BITMAP_PENDING to prevent 3020 * batching. 3021 * If multiple add_stripe_bio() calls race here they 3022 * much all set STRIPE_BITMAP_PENDING. So only the first one 3023 * to complete "bitmap_startwrite" gets to set 3024 * STRIPE_BIT_DELAY. This is important as once a stripe 3025 * is added to a batch, STRIPE_BIT_DELAY cannot be changed 3026 * any more. 3027 */ 3028 set_bit(STRIPE_BITMAP_PENDING, &sh->state); 3029 spin_unlock_irq(&sh->stripe_lock); 3030 bitmap_startwrite(conf->mddev->bitmap, sh->sector, 3031 STRIPE_SECTORS, 0); 3032 spin_lock_irq(&sh->stripe_lock); 3033 clear_bit(STRIPE_BITMAP_PENDING, &sh->state); 3034 if (!sh->batch_head) { 3035 sh->bm_seq = conf->seq_flush+1; 3036 set_bit(STRIPE_BIT_DELAY, &sh->state); 3037 } 3038 } 3039 spin_unlock_irq(&sh->stripe_lock); 3040 3041 if (stripe_can_batch(sh)) 3042 stripe_add_to_batch_list(conf, sh); 3043 return 1; 3044 3045 overlap: 3046 set_bit(R5_Overlap, &sh->dev[dd_idx].flags); 3047 spin_unlock_irq(&sh->stripe_lock); 3048 return 0; 3049 } 3050 3051 static void end_reshape(struct r5conf *conf); 3052 3053 static void stripe_set_idx(sector_t stripe, struct r5conf *conf, int previous, 3054 struct stripe_head *sh) 3055 { 3056 int sectors_per_chunk = 3057 previous ? conf->prev_chunk_sectors : conf->chunk_sectors; 3058 int dd_idx; 3059 int chunk_offset = sector_div(stripe, sectors_per_chunk); 3060 int disks = previous ? conf->previous_raid_disks : conf->raid_disks; 3061 3062 raid5_compute_sector(conf, 3063 stripe * (disks - conf->max_degraded) 3064 *sectors_per_chunk + chunk_offset, 3065 previous, 3066 &dd_idx, sh); 3067 } 3068 3069 static void 3070 handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, 3071 struct stripe_head_state *s, int disks, 3072 struct bio **return_bi) 3073 { 3074 int i; 3075 BUG_ON(sh->batch_head); 3076 for (i = disks; i--; ) { 3077 struct bio *bi; 3078 int bitmap_end = 0; 3079 3080 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { 3081 struct md_rdev *rdev; 3082 rcu_read_lock(); 3083 rdev = rcu_dereference(conf->disks[i].rdev); 3084 if (rdev && test_bit(In_sync, &rdev->flags)) 3085 atomic_inc(&rdev->nr_pending); 3086 else 3087 rdev = NULL; 3088 rcu_read_unlock(); 3089 if (rdev) { 3090 if (!rdev_set_badblocks( 3091 rdev, 3092 sh->sector, 3093 STRIPE_SECTORS, 0)) 3094 md_error(conf->mddev, rdev); 3095 rdev_dec_pending(rdev, conf->mddev); 3096 } 3097 } 3098 spin_lock_irq(&sh->stripe_lock); 3099 /* fail all writes first */ 3100 bi = sh->dev[i].towrite; 3101 sh->dev[i].towrite = NULL; 3102 sh->overwrite_disks = 0; 3103 spin_unlock_irq(&sh->stripe_lock); 3104 if (bi) 3105 bitmap_end = 1; 3106 3107 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 3108 wake_up(&conf->wait_for_overlap); 3109 3110 while (bi && bi->bi_iter.bi_sector < 3111 sh->dev[i].sector + STRIPE_SECTORS) { 3112 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); 3113 3114 bi->bi_error = -EIO; 3115 if (!raid5_dec_bi_active_stripes(bi)) { 3116 md_write_end(conf->mddev); 3117 bi->bi_next = *return_bi; 3118 *return_bi = bi; 3119 } 3120 bi = nextbi; 3121 } 3122 if (bitmap_end) 3123 bitmap_endwrite(conf->mddev->bitmap, sh->sector, 3124 STRIPE_SECTORS, 0, 0); 3125 bitmap_end = 0; 3126 /* and fail all 'written' */ 3127 bi = sh->dev[i].written; 3128 sh->dev[i].written = NULL; 3129 if (test_and_clear_bit(R5_SkipCopy, &sh->dev[i].flags)) { 3130 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); 3131 sh->dev[i].page = sh->dev[i].orig_page; 3132 } 3133 3134 if (bi) bitmap_end = 1; 3135 while (bi && bi->bi_iter.bi_sector < 3136 sh->dev[i].sector + STRIPE_SECTORS) { 3137 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); 3138 3139 bi->bi_error = -EIO; 3140 if (!raid5_dec_bi_active_stripes(bi)) { 3141 md_write_end(conf->mddev); 3142 bi->bi_next = *return_bi; 3143 *return_bi = bi; 3144 } 3145 bi = bi2; 3146 } 3147 3148 /* fail any reads if this device is non-operational and 3149 * the data has not reached the cache yet. 3150 */ 3151 if (!test_bit(R5_Wantfill, &sh->dev[i].flags) && 3152 (!test_bit(R5_Insync, &sh->dev[i].flags) || 3153 test_bit(R5_ReadError, &sh->dev[i].flags))) { 3154 spin_lock_irq(&sh->stripe_lock); 3155 bi = sh->dev[i].toread; 3156 sh->dev[i].toread = NULL; 3157 spin_unlock_irq(&sh->stripe_lock); 3158 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 3159 wake_up(&conf->wait_for_overlap); 3160 while (bi && bi->bi_iter.bi_sector < 3161 sh->dev[i].sector + STRIPE_SECTORS) { 3162 struct bio *nextbi = 3163 r5_next_bio(bi, sh->dev[i].sector); 3164 3165 bi->bi_error = -EIO; 3166 if (!raid5_dec_bi_active_stripes(bi)) { 3167 bi->bi_next = *return_bi; 3168 *return_bi = bi; 3169 } 3170 bi = nextbi; 3171 } 3172 } 3173 if (bitmap_end) 3174 bitmap_endwrite(conf->mddev->bitmap, sh->sector, 3175 STRIPE_SECTORS, 0, 0); 3176 /* If we were in the middle of a write the parity block might 3177 * still be locked - so just clear all R5_LOCKED flags 3178 */ 3179 clear_bit(R5_LOCKED, &sh->dev[i].flags); 3180 } 3181 3182 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) 3183 if (atomic_dec_and_test(&conf->pending_full_writes)) 3184 md_wakeup_thread(conf->mddev->thread); 3185 } 3186 3187 static void 3188 handle_failed_sync(struct r5conf *conf, struct stripe_head *sh, 3189 struct stripe_head_state *s) 3190 { 3191 int abort = 0; 3192 int i; 3193 3194 BUG_ON(sh->batch_head); 3195 clear_bit(STRIPE_SYNCING, &sh->state); 3196 if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) 3197 wake_up(&conf->wait_for_overlap); 3198 s->syncing = 0; 3199 s->replacing = 0; 3200 /* There is nothing more to do for sync/check/repair. 3201 * Don't even need to abort as that is handled elsewhere 3202 * if needed, and not always wanted e.g. if there is a known 3203 * bad block here. 3204 * For recover/replace we need to record a bad block on all 3205 * non-sync devices, or abort the recovery 3206 */ 3207 if (test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery)) { 3208 /* During recovery devices cannot be removed, so 3209 * locking and refcounting of rdevs is not needed 3210 */ 3211 for (i = 0; i < conf->raid_disks; i++) { 3212 struct md_rdev *rdev = conf->disks[i].rdev; 3213 if (rdev 3214 && !test_bit(Faulty, &rdev->flags) 3215 && !test_bit(In_sync, &rdev->flags) 3216 && !rdev_set_badblocks(rdev, sh->sector, 3217 STRIPE_SECTORS, 0)) 3218 abort = 1; 3219 rdev = conf->disks[i].replacement; 3220 if (rdev 3221 && !test_bit(Faulty, &rdev->flags) 3222 && !test_bit(In_sync, &rdev->flags) 3223 && !rdev_set_badblocks(rdev, sh->sector, 3224 STRIPE_SECTORS, 0)) 3225 abort = 1; 3226 } 3227 if (abort) 3228 conf->recovery_disabled = 3229 conf->mddev->recovery_disabled; 3230 } 3231 md_done_sync(conf->mddev, STRIPE_SECTORS, !abort); 3232 } 3233 3234 static int want_replace(struct stripe_head *sh, int disk_idx) 3235 { 3236 struct md_rdev *rdev; 3237 int rv = 0; 3238 /* Doing recovery so rcu locking not required */ 3239 rdev = sh->raid_conf->disks[disk_idx].replacement; 3240 if (rdev 3241 && !test_bit(Faulty, &rdev->flags) 3242 && !test_bit(In_sync, &rdev->flags) 3243 && (rdev->recovery_offset <= sh->sector 3244 || rdev->mddev->recovery_cp <= sh->sector)) 3245 rv = 1; 3246 3247 return rv; 3248 } 3249 3250 /* fetch_block - checks the given member device to see if its data needs 3251 * to be read or computed to satisfy a request. 3252 * 3253 * Returns 1 when no more member devices need to be checked, otherwise returns 3254 * 0 to tell the loop in handle_stripe_fill to continue 3255 */ 3256 3257 static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s, 3258 int disk_idx, int disks) 3259 { 3260 struct r5dev *dev = &sh->dev[disk_idx]; 3261 struct r5dev *fdev[2] = { &sh->dev[s->failed_num[0]], 3262 &sh->dev[s->failed_num[1]] }; 3263 int i; 3264 3265 3266 if (test_bit(R5_LOCKED, &dev->flags) || 3267 test_bit(R5_UPTODATE, &dev->flags)) 3268 /* No point reading this as we already have it or have 3269 * decided to get it. 3270 */ 3271 return 0; 3272 3273 if (dev->toread || 3274 (dev->towrite && !test_bit(R5_OVERWRITE, &dev->flags))) 3275 /* We need this block to directly satisfy a request */ 3276 return 1; 3277 3278 if (s->syncing || s->expanding || 3279 (s->replacing && want_replace(sh, disk_idx))) 3280 /* When syncing, or expanding we read everything. 3281 * When replacing, we need the replaced block. 3282 */ 3283 return 1; 3284 3285 if ((s->failed >= 1 && fdev[0]->toread) || 3286 (s->failed >= 2 && fdev[1]->toread)) 3287 /* If we want to read from a failed device, then 3288 * we need to actually read every other device. 3289 */ 3290 return 1; 3291 3292 /* Sometimes neither read-modify-write nor reconstruct-write 3293 * cycles can work. In those cases we read every block we 3294 * can. Then the parity-update is certain to have enough to 3295 * work with. 3296 * This can only be a problem when we need to write something, 3297 * and some device has failed. If either of those tests 3298 * fail we need look no further. 3299 */ 3300 if (!s->failed || !s->to_write) 3301 return 0; 3302 3303 if (test_bit(R5_Insync, &dev->flags) && 3304 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 3305 /* Pre-reads at not permitted until after short delay 3306 * to gather multiple requests. However if this 3307 * device is no Insync, the block could only be be computed 3308 * and there is no need to delay that. 3309 */ 3310 return 0; 3311 3312 for (i = 0; i < s->failed; i++) { 3313 if (fdev[i]->towrite && 3314 !test_bit(R5_UPTODATE, &fdev[i]->flags) && 3315 !test_bit(R5_OVERWRITE, &fdev[i]->flags)) 3316 /* If we have a partial write to a failed 3317 * device, then we will need to reconstruct 3318 * the content of that device, so all other 3319 * devices must be read. 3320 */ 3321 return 1; 3322 } 3323 3324 /* If we are forced to do a reconstruct-write, either because 3325 * the current RAID6 implementation only supports that, or 3326 * or because parity cannot be trusted and we are currently 3327 * recovering it, there is extra need to be careful. 3328 * If one of the devices that we would need to read, because 3329 * it is not being overwritten (and maybe not written at all) 3330 * is missing/faulty, then we need to read everything we can. 3331 */ 3332 if (sh->raid_conf->level != 6 && 3333 sh->sector < sh->raid_conf->mddev->recovery_cp) 3334 /* reconstruct-write isn't being forced */ 3335 return 0; 3336 for (i = 0; i < s->failed; i++) { 3337 if (s->failed_num[i] != sh->pd_idx && 3338 s->failed_num[i] != sh->qd_idx && 3339 !test_bit(R5_UPTODATE, &fdev[i]->flags) && 3340 !test_bit(R5_OVERWRITE, &fdev[i]->flags)) 3341 return 1; 3342 } 3343 3344 return 0; 3345 } 3346 3347 static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s, 3348 int disk_idx, int disks) 3349 { 3350 struct r5dev *dev = &sh->dev[disk_idx]; 3351 3352 /* is the data in this block needed, and can we get it? */ 3353 if (need_this_block(sh, s, disk_idx, disks)) { 3354 /* we would like to get this block, possibly by computing it, 3355 * otherwise read it if the backing disk is insync 3356 */ 3357 BUG_ON(test_bit(R5_Wantcompute, &dev->flags)); 3358 BUG_ON(test_bit(R5_Wantread, &dev->flags)); 3359 BUG_ON(sh->batch_head); 3360 if ((s->uptodate == disks - 1) && 3361 (s->failed && (disk_idx == s->failed_num[0] || 3362 disk_idx == s->failed_num[1]))) { 3363 /* have disk failed, and we're requested to fetch it; 3364 * do compute it 3365 */ 3366 pr_debug("Computing stripe %llu block %d\n", 3367 (unsigned long long)sh->sector, disk_idx); 3368 set_bit(STRIPE_COMPUTE_RUN, &sh->state); 3369 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 3370 set_bit(R5_Wantcompute, &dev->flags); 3371 sh->ops.target = disk_idx; 3372 sh->ops.target2 = -1; /* no 2nd target */ 3373 s->req_compute = 1; 3374 /* Careful: from this point on 'uptodate' is in the eye 3375 * of raid_run_ops which services 'compute' operations 3376 * before writes. R5_Wantcompute flags a block that will 3377 * be R5_UPTODATE by the time it is needed for a 3378 * subsequent operation. 3379 */ 3380 s->uptodate++; 3381 return 1; 3382 } else if (s->uptodate == disks-2 && s->failed >= 2) { 3383 /* Computing 2-failure is *very* expensive; only 3384 * do it if failed >= 2 3385 */ 3386 int other; 3387 for (other = disks; other--; ) { 3388 if (other == disk_idx) 3389 continue; 3390 if (!test_bit(R5_UPTODATE, 3391 &sh->dev[other].flags)) 3392 break; 3393 } 3394 BUG_ON(other < 0); 3395 pr_debug("Computing stripe %llu blocks %d,%d\n", 3396 (unsigned long long)sh->sector, 3397 disk_idx, other); 3398 set_bit(STRIPE_COMPUTE_RUN, &sh->state); 3399 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 3400 set_bit(R5_Wantcompute, &sh->dev[disk_idx].flags); 3401 set_bit(R5_Wantcompute, &sh->dev[other].flags); 3402 sh->ops.target = disk_idx; 3403 sh->ops.target2 = other; 3404 s->uptodate += 2; 3405 s->req_compute = 1; 3406 return 1; 3407 } else if (test_bit(R5_Insync, &dev->flags)) { 3408 set_bit(R5_LOCKED, &dev->flags); 3409 set_bit(R5_Wantread, &dev->flags); 3410 s->locked++; 3411 pr_debug("Reading block %d (sync=%d)\n", 3412 disk_idx, s->syncing); 3413 } 3414 } 3415 3416 return 0; 3417 } 3418 3419 /** 3420 * handle_stripe_fill - read or compute data to satisfy pending requests. 3421 */ 3422 static void handle_stripe_fill(struct stripe_head *sh, 3423 struct stripe_head_state *s, 3424 int disks) 3425 { 3426 int i; 3427 3428 /* look for blocks to read/compute, skip this if a compute 3429 * is already in flight, or if the stripe contents are in the 3430 * midst of changing due to a write 3431 */ 3432 if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state && 3433 !sh->reconstruct_state) 3434 for (i = disks; i--; ) 3435 if (fetch_block(sh, s, i, disks)) 3436 break; 3437 set_bit(STRIPE_HANDLE, &sh->state); 3438 } 3439 3440 static void break_stripe_batch_list(struct stripe_head *head_sh, 3441 unsigned long handle_flags); 3442 /* handle_stripe_clean_event 3443 * any written block on an uptodate or failed drive can be returned. 3444 * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but 3445 * never LOCKED, so we don't need to test 'failed' directly. 3446 */ 3447 static void handle_stripe_clean_event(struct r5conf *conf, 3448 struct stripe_head *sh, int disks, struct bio **return_bi) 3449 { 3450 int i; 3451 struct r5dev *dev; 3452 int discard_pending = 0; 3453 struct stripe_head *head_sh = sh; 3454 bool do_endio = false; 3455 3456 for (i = disks; i--; ) 3457 if (sh->dev[i].written) { 3458 dev = &sh->dev[i]; 3459 if (!test_bit(R5_LOCKED, &dev->flags) && 3460 (test_bit(R5_UPTODATE, &dev->flags) || 3461 test_bit(R5_Discard, &dev->flags) || 3462 test_bit(R5_SkipCopy, &dev->flags))) { 3463 /* We can return any write requests */ 3464 struct bio *wbi, *wbi2; 3465 pr_debug("Return write for disc %d\n", i); 3466 if (test_and_clear_bit(R5_Discard, &dev->flags)) 3467 clear_bit(R5_UPTODATE, &dev->flags); 3468 if (test_and_clear_bit(R5_SkipCopy, &dev->flags)) { 3469 WARN_ON(test_bit(R5_UPTODATE, &dev->flags)); 3470 } 3471 do_endio = true; 3472 3473 returnbi: 3474 dev->page = dev->orig_page; 3475 wbi = dev->written; 3476 dev->written = NULL; 3477 while (wbi && wbi->bi_iter.bi_sector < 3478 dev->sector + STRIPE_SECTORS) { 3479 wbi2 = r5_next_bio(wbi, dev->sector); 3480 if (!raid5_dec_bi_active_stripes(wbi)) { 3481 md_write_end(conf->mddev); 3482 wbi->bi_next = *return_bi; 3483 *return_bi = wbi; 3484 } 3485 wbi = wbi2; 3486 } 3487 bitmap_endwrite(conf->mddev->bitmap, sh->sector, 3488 STRIPE_SECTORS, 3489 !test_bit(STRIPE_DEGRADED, &sh->state), 3490 0); 3491 if (head_sh->batch_head) { 3492 sh = list_first_entry(&sh->batch_list, 3493 struct stripe_head, 3494 batch_list); 3495 if (sh != head_sh) { 3496 dev = &sh->dev[i]; 3497 goto returnbi; 3498 } 3499 } 3500 sh = head_sh; 3501 dev = &sh->dev[i]; 3502 } else if (test_bit(R5_Discard, &dev->flags)) 3503 discard_pending = 1; 3504 WARN_ON(test_bit(R5_SkipCopy, &dev->flags)); 3505 WARN_ON(dev->page != dev->orig_page); 3506 } 3507 if (!discard_pending && 3508 test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) { 3509 clear_bit(R5_Discard, &sh->dev[sh->pd_idx].flags); 3510 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); 3511 if (sh->qd_idx >= 0) { 3512 clear_bit(R5_Discard, &sh->dev[sh->qd_idx].flags); 3513 clear_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags); 3514 } 3515 /* now that discard is done we can proceed with any sync */ 3516 clear_bit(STRIPE_DISCARD, &sh->state); 3517 /* 3518 * SCSI discard will change some bio fields and the stripe has 3519 * no updated data, so remove it from hash list and the stripe 3520 * will be reinitialized 3521 */ 3522 spin_lock_irq(&conf->device_lock); 3523 unhash: 3524 remove_hash(sh); 3525 if (head_sh->batch_head) { 3526 sh = list_first_entry(&sh->batch_list, 3527 struct stripe_head, batch_list); 3528 if (sh != head_sh) 3529 goto unhash; 3530 } 3531 spin_unlock_irq(&conf->device_lock); 3532 sh = head_sh; 3533 3534 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) 3535 set_bit(STRIPE_HANDLE, &sh->state); 3536 3537 } 3538 3539 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) 3540 if (atomic_dec_and_test(&conf->pending_full_writes)) 3541 md_wakeup_thread(conf->mddev->thread); 3542 3543 if (head_sh->batch_head && do_endio) 3544 break_stripe_batch_list(head_sh, STRIPE_EXPAND_SYNC_FLAGS); 3545 } 3546 3547 static void handle_stripe_dirtying(struct r5conf *conf, 3548 struct stripe_head *sh, 3549 struct stripe_head_state *s, 3550 int disks) 3551 { 3552 int rmw = 0, rcw = 0, i; 3553 sector_t recovery_cp = conf->mddev->recovery_cp; 3554 3555 /* Check whether resync is now happening or should start. 3556 * If yes, then the array is dirty (after unclean shutdown or 3557 * initial creation), so parity in some stripes might be inconsistent. 3558 * In this case, we need to always do reconstruct-write, to ensure 3559 * that in case of drive failure or read-error correction, we 3560 * generate correct data from the parity. 3561 */ 3562 if (conf->rmw_level == PARITY_DISABLE_RMW || 3563 (recovery_cp < MaxSector && sh->sector >= recovery_cp && 3564 s->failed == 0)) { 3565 /* Calculate the real rcw later - for now make it 3566 * look like rcw is cheaper 3567 */ 3568 rcw = 1; rmw = 2; 3569 pr_debug("force RCW rmw_level=%u, recovery_cp=%llu sh->sector=%llu\n", 3570 conf->rmw_level, (unsigned long long)recovery_cp, 3571 (unsigned long long)sh->sector); 3572 } else for (i = disks; i--; ) { 3573 /* would I have to read this buffer for read_modify_write */ 3574 struct r5dev *dev = &sh->dev[i]; 3575 if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx) && 3576 !test_bit(R5_LOCKED, &dev->flags) && 3577 !(test_bit(R5_UPTODATE, &dev->flags) || 3578 test_bit(R5_Wantcompute, &dev->flags))) { 3579 if (test_bit(R5_Insync, &dev->flags)) 3580 rmw++; 3581 else 3582 rmw += 2*disks; /* cannot read it */ 3583 } 3584 /* Would I have to read this buffer for reconstruct_write */ 3585 if (!test_bit(R5_OVERWRITE, &dev->flags) && 3586 i != sh->pd_idx && i != sh->qd_idx && 3587 !test_bit(R5_LOCKED, &dev->flags) && 3588 !(test_bit(R5_UPTODATE, &dev->flags) || 3589 test_bit(R5_Wantcompute, &dev->flags))) { 3590 if (test_bit(R5_Insync, &dev->flags)) 3591 rcw++; 3592 else 3593 rcw += 2*disks; 3594 } 3595 } 3596 pr_debug("for sector %llu, rmw=%d rcw=%d\n", 3597 (unsigned long long)sh->sector, rmw, rcw); 3598 set_bit(STRIPE_HANDLE, &sh->state); 3599 if ((rmw < rcw || (rmw == rcw && conf->rmw_level == PARITY_ENABLE_RMW)) && rmw > 0) { 3600 /* prefer read-modify-write, but need to get some data */ 3601 if (conf->mddev->queue) 3602 blk_add_trace_msg(conf->mddev->queue, 3603 "raid5 rmw %llu %d", 3604 (unsigned long long)sh->sector, rmw); 3605 for (i = disks; i--; ) { 3606 struct r5dev *dev = &sh->dev[i]; 3607 if ((dev->towrite || i == sh->pd_idx || i == sh->qd_idx) && 3608 !test_bit(R5_LOCKED, &dev->flags) && 3609 !(test_bit(R5_UPTODATE, &dev->flags) || 3610 test_bit(R5_Wantcompute, &dev->flags)) && 3611 test_bit(R5_Insync, &dev->flags)) { 3612 if (test_bit(STRIPE_PREREAD_ACTIVE, 3613 &sh->state)) { 3614 pr_debug("Read_old block %d for r-m-w\n", 3615 i); 3616 set_bit(R5_LOCKED, &dev->flags); 3617 set_bit(R5_Wantread, &dev->flags); 3618 s->locked++; 3619 } else { 3620 set_bit(STRIPE_DELAYED, &sh->state); 3621 set_bit(STRIPE_HANDLE, &sh->state); 3622 } 3623 } 3624 } 3625 } 3626 if ((rcw < rmw || (rcw == rmw && conf->rmw_level != PARITY_ENABLE_RMW)) && rcw > 0) { 3627 /* want reconstruct write, but need to get some data */ 3628 int qread =0; 3629 rcw = 0; 3630 for (i = disks; i--; ) { 3631 struct r5dev *dev = &sh->dev[i]; 3632 if (!test_bit(R5_OVERWRITE, &dev->flags) && 3633 i != sh->pd_idx && i != sh->qd_idx && 3634 !test_bit(R5_LOCKED, &dev->flags) && 3635 !(test_bit(R5_UPTODATE, &dev->flags) || 3636 test_bit(R5_Wantcompute, &dev->flags))) { 3637 rcw++; 3638 if (test_bit(R5_Insync, &dev->flags) && 3639 test_bit(STRIPE_PREREAD_ACTIVE, 3640 &sh->state)) { 3641 pr_debug("Read_old block " 3642 "%d for Reconstruct\n", i); 3643 set_bit(R5_LOCKED, &dev->flags); 3644 set_bit(R5_Wantread, &dev->flags); 3645 s->locked++; 3646 qread++; 3647 } else { 3648 set_bit(STRIPE_DELAYED, &sh->state); 3649 set_bit(STRIPE_HANDLE, &sh->state); 3650 } 3651 } 3652 } 3653 if (rcw && conf->mddev->queue) 3654 blk_add_trace_msg(conf->mddev->queue, "raid5 rcw %llu %d %d %d", 3655 (unsigned long long)sh->sector, 3656 rcw, qread, test_bit(STRIPE_DELAYED, &sh->state)); 3657 } 3658 3659 if (rcw > disks && rmw > disks && 3660 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 3661 set_bit(STRIPE_DELAYED, &sh->state); 3662 3663 /* now if nothing is locked, and if we have enough data, 3664 * we can start a write request 3665 */ 3666 /* since handle_stripe can be called at any time we need to handle the 3667 * case where a compute block operation has been submitted and then a 3668 * subsequent call wants to start a write request. raid_run_ops only 3669 * handles the case where compute block and reconstruct are requested 3670 * simultaneously. If this is not the case then new writes need to be 3671 * held off until the compute completes. 3672 */ 3673 if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) && 3674 (s->locked == 0 && (rcw == 0 || rmw == 0) && 3675 !test_bit(STRIPE_BIT_DELAY, &sh->state))) 3676 schedule_reconstruction(sh, s, rcw == 0, 0); 3677 } 3678 3679 static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh, 3680 struct stripe_head_state *s, int disks) 3681 { 3682 struct r5dev *dev = NULL; 3683 3684 BUG_ON(sh->batch_head); 3685 set_bit(STRIPE_HANDLE, &sh->state); 3686 3687 switch (sh->check_state) { 3688 case check_state_idle: 3689 /* start a new check operation if there are no failures */ 3690 if (s->failed == 0) { 3691 BUG_ON(s->uptodate != disks); 3692 sh->check_state = check_state_run; 3693 set_bit(STRIPE_OP_CHECK, &s->ops_request); 3694 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); 3695 s->uptodate--; 3696 break; 3697 } 3698 dev = &sh->dev[s->failed_num[0]]; 3699 /* fall through */ 3700 case check_state_compute_result: 3701 sh->check_state = check_state_idle; 3702 if (!dev) 3703 dev = &sh->dev[sh->pd_idx]; 3704 3705 /* check that a write has not made the stripe insync */ 3706 if (test_bit(STRIPE_INSYNC, &sh->state)) 3707 break; 3708 3709 /* either failed parity check, or recovery is happening */ 3710 BUG_ON(!test_bit(R5_UPTODATE, &dev->flags)); 3711 BUG_ON(s->uptodate != disks); 3712 3713 set_bit(R5_LOCKED, &dev->flags); 3714 s->locked++; 3715 set_bit(R5_Wantwrite, &dev->flags); 3716 3717 clear_bit(STRIPE_DEGRADED, &sh->state); 3718 set_bit(STRIPE_INSYNC, &sh->state); 3719 break; 3720 case check_state_run: 3721 break; /* we will be called again upon completion */ 3722 case check_state_check_result: 3723 sh->check_state = check_state_idle; 3724 3725 /* if a failure occurred during the check operation, leave 3726 * STRIPE_INSYNC not set and let the stripe be handled again 3727 */ 3728 if (s->failed) 3729 break; 3730 3731 /* handle a successful check operation, if parity is correct 3732 * we are done. Otherwise update the mismatch count and repair 3733 * parity if !MD_RECOVERY_CHECK 3734 */ 3735 if ((sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) == 0) 3736 /* parity is correct (on disc, 3737 * not in buffer any more) 3738 */ 3739 set_bit(STRIPE_INSYNC, &sh->state); 3740 else { 3741 atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches); 3742 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) 3743 /* don't try to repair!! */ 3744 set_bit(STRIPE_INSYNC, &sh->state); 3745 else { 3746 sh->check_state = check_state_compute_run; 3747 set_bit(STRIPE_COMPUTE_RUN, &sh->state); 3748 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 3749 set_bit(R5_Wantcompute, 3750 &sh->dev[sh->pd_idx].flags); 3751 sh->ops.target = sh->pd_idx; 3752 sh->ops.target2 = -1; 3753 s->uptodate++; 3754 } 3755 } 3756 break; 3757 case check_state_compute_run: 3758 break; 3759 default: 3760 printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n", 3761 __func__, sh->check_state, 3762 (unsigned long long) sh->sector); 3763 BUG(); 3764 } 3765 } 3766 3767 static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh, 3768 struct stripe_head_state *s, 3769 int disks) 3770 { 3771 int pd_idx = sh->pd_idx; 3772 int qd_idx = sh->qd_idx; 3773 struct r5dev *dev; 3774 3775 BUG_ON(sh->batch_head); 3776 set_bit(STRIPE_HANDLE, &sh->state); 3777 3778 BUG_ON(s->failed > 2); 3779 3780 /* Want to check and possibly repair P and Q. 3781 * However there could be one 'failed' device, in which 3782 * case we can only check one of them, possibly using the 3783 * other to generate missing data 3784 */ 3785 3786 switch (sh->check_state) { 3787 case check_state_idle: 3788 /* start a new check operation if there are < 2 failures */ 3789 if (s->failed == s->q_failed) { 3790 /* The only possible failed device holds Q, so it 3791 * makes sense to check P (If anything else were failed, 3792 * we would have used P to recreate it). 3793 */ 3794 sh->check_state = check_state_run; 3795 } 3796 if (!s->q_failed && s->failed < 2) { 3797 /* Q is not failed, and we didn't use it to generate 3798 * anything, so it makes sense to check it 3799 */ 3800 if (sh->check_state == check_state_run) 3801 sh->check_state = check_state_run_pq; 3802 else 3803 sh->check_state = check_state_run_q; 3804 } 3805 3806 /* discard potentially stale zero_sum_result */ 3807 sh->ops.zero_sum_result = 0; 3808 3809 if (sh->check_state == check_state_run) { 3810 /* async_xor_zero_sum destroys the contents of P */ 3811 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); 3812 s->uptodate--; 3813 } 3814 if (sh->check_state >= check_state_run && 3815 sh->check_state <= check_state_run_pq) { 3816 /* async_syndrome_zero_sum preserves P and Q, so 3817 * no need to mark them !uptodate here 3818 */ 3819 set_bit(STRIPE_OP_CHECK, &s->ops_request); 3820 break; 3821 } 3822 3823 /* we have 2-disk failure */ 3824 BUG_ON(s->failed != 2); 3825 /* fall through */ 3826 case check_state_compute_result: 3827 sh->check_state = check_state_idle; 3828 3829 /* check that a write has not made the stripe insync */ 3830 if (test_bit(STRIPE_INSYNC, &sh->state)) 3831 break; 3832 3833 /* now write out any block on a failed drive, 3834 * or P or Q if they were recomputed 3835 */ 3836 BUG_ON(s->uptodate < disks - 1); /* We don't need Q to recover */ 3837 if (s->failed == 2) { 3838 dev = &sh->dev[s->failed_num[1]]; 3839 s->locked++; 3840 set_bit(R5_LOCKED, &dev->flags); 3841 set_bit(R5_Wantwrite, &dev->flags); 3842 } 3843 if (s->failed >= 1) { 3844 dev = &sh->dev[s->failed_num[0]]; 3845 s->locked++; 3846 set_bit(R5_LOCKED, &dev->flags); 3847 set_bit(R5_Wantwrite, &dev->flags); 3848 } 3849 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) { 3850 dev = &sh->dev[pd_idx]; 3851 s->locked++; 3852 set_bit(R5_LOCKED, &dev->flags); 3853 set_bit(R5_Wantwrite, &dev->flags); 3854 } 3855 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) { 3856 dev = &sh->dev[qd_idx]; 3857 s->locked++; 3858 set_bit(R5_LOCKED, &dev->flags); 3859 set_bit(R5_Wantwrite, &dev->flags); 3860 } 3861 clear_bit(STRIPE_DEGRADED, &sh->state); 3862 3863 set_bit(STRIPE_INSYNC, &sh->state); 3864 break; 3865 case check_state_run: 3866 case check_state_run_q: 3867 case check_state_run_pq: 3868 break; /* we will be called again upon completion */ 3869 case check_state_check_result: 3870 sh->check_state = check_state_idle; 3871 3872 /* handle a successful check operation, if parity is correct 3873 * we are done. Otherwise update the mismatch count and repair 3874 * parity if !MD_RECOVERY_CHECK 3875 */ 3876 if (sh->ops.zero_sum_result == 0) { 3877 /* both parities are correct */ 3878 if (!s->failed) 3879 set_bit(STRIPE_INSYNC, &sh->state); 3880 else { 3881 /* in contrast to the raid5 case we can validate 3882 * parity, but still have a failure to write 3883 * back 3884 */ 3885 sh->check_state = check_state_compute_result; 3886 /* Returning at this point means that we may go 3887 * off and bring p and/or q uptodate again so 3888 * we make sure to check zero_sum_result again 3889 * to verify if p or q need writeback 3890 */ 3891 } 3892 } else { 3893 atomic64_add(STRIPE_SECTORS, &conf->mddev->resync_mismatches); 3894 if (test_bit(MD_RECOVERY_CHECK, &conf->mddev->recovery)) 3895 /* don't try to repair!! */ 3896 set_bit(STRIPE_INSYNC, &sh->state); 3897 else { 3898 int *target = &sh->ops.target; 3899 3900 sh->ops.target = -1; 3901 sh->ops.target2 = -1; 3902 sh->check_state = check_state_compute_run; 3903 set_bit(STRIPE_COMPUTE_RUN, &sh->state); 3904 set_bit(STRIPE_OP_COMPUTE_BLK, &s->ops_request); 3905 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) { 3906 set_bit(R5_Wantcompute, 3907 &sh->dev[pd_idx].flags); 3908 *target = pd_idx; 3909 target = &sh->ops.target2; 3910 s->uptodate++; 3911 } 3912 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) { 3913 set_bit(R5_Wantcompute, 3914 &sh->dev[qd_idx].flags); 3915 *target = qd_idx; 3916 s->uptodate++; 3917 } 3918 } 3919 } 3920 break; 3921 case check_state_compute_run: 3922 break; 3923 default: 3924 printk(KERN_ERR "%s: unknown check_state: %d sector: %llu\n", 3925 __func__, sh->check_state, 3926 (unsigned long long) sh->sector); 3927 BUG(); 3928 } 3929 } 3930 3931 static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh) 3932 { 3933 int i; 3934 3935 /* We have read all the blocks in this stripe and now we need to 3936 * copy some of them into a target stripe for expand. 3937 */ 3938 struct dma_async_tx_descriptor *tx = NULL; 3939 BUG_ON(sh->batch_head); 3940 clear_bit(STRIPE_EXPAND_SOURCE, &sh->state); 3941 for (i = 0; i < sh->disks; i++) 3942 if (i != sh->pd_idx && i != sh->qd_idx) { 3943 int dd_idx, j; 3944 struct stripe_head *sh2; 3945 struct async_submit_ctl submit; 3946 3947 sector_t bn = compute_blocknr(sh, i, 1); 3948 sector_t s = raid5_compute_sector(conf, bn, 0, 3949 &dd_idx, NULL); 3950 sh2 = get_active_stripe(conf, s, 0, 1, 1); 3951 if (sh2 == NULL) 3952 /* so far only the early blocks of this stripe 3953 * have been requested. When later blocks 3954 * get requested, we will try again 3955 */ 3956 continue; 3957 if (!test_bit(STRIPE_EXPANDING, &sh2->state) || 3958 test_bit(R5_Expanded, &sh2->dev[dd_idx].flags)) { 3959 /* must have already done this block */ 3960 release_stripe(sh2); 3961 continue; 3962 } 3963 3964 /* place all the copies on one channel */ 3965 init_async_submit(&submit, 0, tx, NULL, NULL, NULL); 3966 tx = async_memcpy(sh2->dev[dd_idx].page, 3967 sh->dev[i].page, 0, 0, STRIPE_SIZE, 3968 &submit); 3969 3970 set_bit(R5_Expanded, &sh2->dev[dd_idx].flags); 3971 set_bit(R5_UPTODATE, &sh2->dev[dd_idx].flags); 3972 for (j = 0; j < conf->raid_disks; j++) 3973 if (j != sh2->pd_idx && 3974 j != sh2->qd_idx && 3975 !test_bit(R5_Expanded, &sh2->dev[j].flags)) 3976 break; 3977 if (j == conf->raid_disks) { 3978 set_bit(STRIPE_EXPAND_READY, &sh2->state); 3979 set_bit(STRIPE_HANDLE, &sh2->state); 3980 } 3981 release_stripe(sh2); 3982 3983 } 3984 /* done submitting copies, wait for them to complete */ 3985 async_tx_quiesce(&tx); 3986 } 3987 3988 /* 3989 * handle_stripe - do things to a stripe. 3990 * 3991 * We lock the stripe by setting STRIPE_ACTIVE and then examine the 3992 * state of various bits to see what needs to be done. 3993 * Possible results: 3994 * return some read requests which now have data 3995 * return some write requests which are safely on storage 3996 * schedule a read on some buffers 3997 * schedule a write of some buffers 3998 * return confirmation of parity correctness 3999 * 4000 */ 4001 4002 static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) 4003 { 4004 struct r5conf *conf = sh->raid_conf; 4005 int disks = sh->disks; 4006 struct r5dev *dev; 4007 int i; 4008 int do_recovery = 0; 4009 4010 memset(s, 0, sizeof(*s)); 4011 4012 s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state) && !sh->batch_head; 4013 s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state) && !sh->batch_head; 4014 s->failed_num[0] = -1; 4015 s->failed_num[1] = -1; 4016 4017 /* Now to look around and see what can be done */ 4018 rcu_read_lock(); 4019 for (i=disks; i--; ) { 4020 struct md_rdev *rdev; 4021 sector_t first_bad; 4022 int bad_sectors; 4023 int is_bad = 0; 4024 4025 dev = &sh->dev[i]; 4026 4027 pr_debug("check %d: state 0x%lx read %p write %p written %p\n", 4028 i, dev->flags, 4029 dev->toread, dev->towrite, dev->written); 4030 /* maybe we can reply to a read 4031 * 4032 * new wantfill requests are only permitted while 4033 * ops_complete_biofill is guaranteed to be inactive 4034 */ 4035 if (test_bit(R5_UPTODATE, &dev->flags) && dev->toread && 4036 !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) 4037 set_bit(R5_Wantfill, &dev->flags); 4038 4039 /* now count some things */ 4040 if (test_bit(R5_LOCKED, &dev->flags)) 4041 s->locked++; 4042 if (test_bit(R5_UPTODATE, &dev->flags)) 4043 s->uptodate++; 4044 if (test_bit(R5_Wantcompute, &dev->flags)) { 4045 s->compute++; 4046 BUG_ON(s->compute > 2); 4047 } 4048 4049 if (test_bit(R5_Wantfill, &dev->flags)) 4050 s->to_fill++; 4051 else if (dev->toread) 4052 s->to_read++; 4053 if (dev->towrite) { 4054 s->to_write++; 4055 if (!test_bit(R5_OVERWRITE, &dev->flags)) 4056 s->non_overwrite++; 4057 } 4058 if (dev->written) 4059 s->written++; 4060 /* Prefer to use the replacement for reads, but only 4061 * if it is recovered enough and has no bad blocks. 4062 */ 4063 rdev = rcu_dereference(conf->disks[i].replacement); 4064 if (rdev && !test_bit(Faulty, &rdev->flags) && 4065 rdev->recovery_offset >= sh->sector + STRIPE_SECTORS && 4066 !is_badblock(rdev, sh->sector, STRIPE_SECTORS, 4067 &first_bad, &bad_sectors)) 4068 set_bit(R5_ReadRepl, &dev->flags); 4069 else { 4070 if (rdev && !test_bit(Faulty, &rdev->flags)) 4071 set_bit(R5_NeedReplace, &dev->flags); 4072 else 4073 clear_bit(R5_NeedReplace, &dev->flags); 4074 rdev = rcu_dereference(conf->disks[i].rdev); 4075 clear_bit(R5_ReadRepl, &dev->flags); 4076 } 4077 if (rdev && test_bit(Faulty, &rdev->flags)) 4078 rdev = NULL; 4079 if (rdev) { 4080 is_bad = is_badblock(rdev, sh->sector, STRIPE_SECTORS, 4081 &first_bad, &bad_sectors); 4082 if (s->blocked_rdev == NULL 4083 && (test_bit(Blocked, &rdev->flags) 4084 || is_bad < 0)) { 4085 if (is_bad < 0) 4086 set_bit(BlockedBadBlocks, 4087 &rdev->flags); 4088 s->blocked_rdev = rdev; 4089 atomic_inc(&rdev->nr_pending); 4090 } 4091 } 4092 clear_bit(R5_Insync, &dev->flags); 4093 if (!rdev) 4094 /* Not in-sync */; 4095 else if (is_bad) { 4096 /* also not in-sync */ 4097 if (!test_bit(WriteErrorSeen, &rdev->flags) && 4098 test_bit(R5_UPTODATE, &dev->flags)) { 4099 /* treat as in-sync, but with a read error 4100 * which we can now try to correct 4101 */ 4102 set_bit(R5_Insync, &dev->flags); 4103 set_bit(R5_ReadError, &dev->flags); 4104 } 4105 } else if (test_bit(In_sync, &rdev->flags)) 4106 set_bit(R5_Insync, &dev->flags); 4107 else if (sh->sector + STRIPE_SECTORS <= rdev->recovery_offset) 4108 /* in sync if before recovery_offset */ 4109 set_bit(R5_Insync, &dev->flags); 4110 else if (test_bit(R5_UPTODATE, &dev->flags) && 4111 test_bit(R5_Expanded, &dev->flags)) 4112 /* If we've reshaped into here, we assume it is Insync. 4113 * We will shortly update recovery_offset to make 4114 * it official. 4115 */ 4116 set_bit(R5_Insync, &dev->flags); 4117 4118 if (test_bit(R5_WriteError, &dev->flags)) { 4119 /* This flag does not apply to '.replacement' 4120 * only to .rdev, so make sure to check that*/ 4121 struct md_rdev *rdev2 = rcu_dereference( 4122 conf->disks[i].rdev); 4123 if (rdev2 == rdev) 4124 clear_bit(R5_Insync, &dev->flags); 4125 if (rdev2 && !test_bit(Faulty, &rdev2->flags)) { 4126 s->handle_bad_blocks = 1; 4127 atomic_inc(&rdev2->nr_pending); 4128 } else 4129 clear_bit(R5_WriteError, &dev->flags); 4130 } 4131 if (test_bit(R5_MadeGood, &dev->flags)) { 4132 /* This flag does not apply to '.replacement' 4133 * only to .rdev, so make sure to check that*/ 4134 struct md_rdev *rdev2 = rcu_dereference( 4135 conf->disks[i].rdev); 4136 if (rdev2 && !test_bit(Faulty, &rdev2->flags)) { 4137 s->handle_bad_blocks = 1; 4138 atomic_inc(&rdev2->nr_pending); 4139 } else 4140 clear_bit(R5_MadeGood, &dev->flags); 4141 } 4142 if (test_bit(R5_MadeGoodRepl, &dev->flags)) { 4143 struct md_rdev *rdev2 = rcu_dereference( 4144 conf->disks[i].replacement); 4145 if (rdev2 && !test_bit(Faulty, &rdev2->flags)) { 4146 s->handle_bad_blocks = 1; 4147 atomic_inc(&rdev2->nr_pending); 4148 } else 4149 clear_bit(R5_MadeGoodRepl, &dev->flags); 4150 } 4151 if (!test_bit(R5_Insync, &dev->flags)) { 4152 /* The ReadError flag will just be confusing now */ 4153 clear_bit(R5_ReadError, &dev->flags); 4154 clear_bit(R5_ReWrite, &dev->flags); 4155 } 4156 if (test_bit(R5_ReadError, &dev->flags)) 4157 clear_bit(R5_Insync, &dev->flags); 4158 if (!test_bit(R5_Insync, &dev->flags)) { 4159 if (s->failed < 2) 4160 s->failed_num[s->failed] = i; 4161 s->failed++; 4162 if (rdev && !test_bit(Faulty, &rdev->flags)) 4163 do_recovery = 1; 4164 } 4165 } 4166 if (test_bit(STRIPE_SYNCING, &sh->state)) { 4167 /* If there is a failed device being replaced, 4168 * we must be recovering. 4169 * else if we are after recovery_cp, we must be syncing 4170 * else if MD_RECOVERY_REQUESTED is set, we also are syncing. 4171 * else we can only be replacing 4172 * sync and recovery both need to read all devices, and so 4173 * use the same flag. 4174 */ 4175 if (do_recovery || 4176 sh->sector >= conf->mddev->recovery_cp || 4177 test_bit(MD_RECOVERY_REQUESTED, &(conf->mddev->recovery))) 4178 s->syncing = 1; 4179 else 4180 s->replacing = 1; 4181 } 4182 rcu_read_unlock(); 4183 } 4184 4185 static int clear_batch_ready(struct stripe_head *sh) 4186 { 4187 /* Return '1' if this is a member of batch, or 4188 * '0' if it is a lone stripe or a head which can now be 4189 * handled. 4190 */ 4191 struct stripe_head *tmp; 4192 if (!test_and_clear_bit(STRIPE_BATCH_READY, &sh->state)) 4193 return (sh->batch_head && sh->batch_head != sh); 4194 spin_lock(&sh->stripe_lock); 4195 if (!sh->batch_head) { 4196 spin_unlock(&sh->stripe_lock); 4197 return 0; 4198 } 4199 4200 /* 4201 * this stripe could be added to a batch list before we check 4202 * BATCH_READY, skips it 4203 */ 4204 if (sh->batch_head != sh) { 4205 spin_unlock(&sh->stripe_lock); 4206 return 1; 4207 } 4208 spin_lock(&sh->batch_lock); 4209 list_for_each_entry(tmp, &sh->batch_list, batch_list) 4210 clear_bit(STRIPE_BATCH_READY, &tmp->state); 4211 spin_unlock(&sh->batch_lock); 4212 spin_unlock(&sh->stripe_lock); 4213 4214 /* 4215 * BATCH_READY is cleared, no new stripes can be added. 4216 * batch_list can be accessed without lock 4217 */ 4218 return 0; 4219 } 4220 4221 static void break_stripe_batch_list(struct stripe_head *head_sh, 4222 unsigned long handle_flags) 4223 { 4224 struct stripe_head *sh, *next; 4225 int i; 4226 int do_wakeup = 0; 4227 4228 list_for_each_entry_safe(sh, next, &head_sh->batch_list, batch_list) { 4229 4230 list_del_init(&sh->batch_list); 4231 4232 WARN_ON_ONCE(sh->state & ((1 << STRIPE_ACTIVE) | 4233 (1 << STRIPE_SYNCING) | 4234 (1 << STRIPE_REPLACED) | 4235 (1 << STRIPE_PREREAD_ACTIVE) | 4236 (1 << STRIPE_DELAYED) | 4237 (1 << STRIPE_BIT_DELAY) | 4238 (1 << STRIPE_FULL_WRITE) | 4239 (1 << STRIPE_BIOFILL_RUN) | 4240 (1 << STRIPE_COMPUTE_RUN) | 4241 (1 << STRIPE_OPS_REQ_PENDING) | 4242 (1 << STRIPE_DISCARD) | 4243 (1 << STRIPE_BATCH_READY) | 4244 (1 << STRIPE_BATCH_ERR) | 4245 (1 << STRIPE_BITMAP_PENDING))); 4246 WARN_ON_ONCE(head_sh->state & ((1 << STRIPE_DISCARD) | 4247 (1 << STRIPE_REPLACED))); 4248 4249 set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS | 4250 (1 << STRIPE_DEGRADED)), 4251 head_sh->state & (1 << STRIPE_INSYNC)); 4252 4253 sh->check_state = head_sh->check_state; 4254 sh->reconstruct_state = head_sh->reconstruct_state; 4255 for (i = 0; i < sh->disks; i++) { 4256 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 4257 do_wakeup = 1; 4258 sh->dev[i].flags = head_sh->dev[i].flags & 4259 (~((1 << R5_WriteError) | (1 << R5_Overlap))); 4260 } 4261 spin_lock_irq(&sh->stripe_lock); 4262 sh->batch_head = NULL; 4263 spin_unlock_irq(&sh->stripe_lock); 4264 if (handle_flags == 0 || 4265 sh->state & handle_flags) 4266 set_bit(STRIPE_HANDLE, &sh->state); 4267 release_stripe(sh); 4268 } 4269 spin_lock_irq(&head_sh->stripe_lock); 4270 head_sh->batch_head = NULL; 4271 spin_unlock_irq(&head_sh->stripe_lock); 4272 for (i = 0; i < head_sh->disks; i++) 4273 if (test_and_clear_bit(R5_Overlap, &head_sh->dev[i].flags)) 4274 do_wakeup = 1; 4275 if (head_sh->state & handle_flags) 4276 set_bit(STRIPE_HANDLE, &head_sh->state); 4277 4278 if (do_wakeup) 4279 wake_up(&head_sh->raid_conf->wait_for_overlap); 4280 } 4281 4282 static void handle_stripe(struct stripe_head *sh) 4283 { 4284 struct stripe_head_state s; 4285 struct r5conf *conf = sh->raid_conf; 4286 int i; 4287 int prexor; 4288 int disks = sh->disks; 4289 struct r5dev *pdev, *qdev; 4290 4291 clear_bit(STRIPE_HANDLE, &sh->state); 4292 if (test_and_set_bit_lock(STRIPE_ACTIVE, &sh->state)) { 4293 /* already being handled, ensure it gets handled 4294 * again when current action finishes */ 4295 set_bit(STRIPE_HANDLE, &sh->state); 4296 return; 4297 } 4298 4299 if (clear_batch_ready(sh) ) { 4300 clear_bit_unlock(STRIPE_ACTIVE, &sh->state); 4301 return; 4302 } 4303 4304 if (test_and_clear_bit(STRIPE_BATCH_ERR, &sh->state)) 4305 break_stripe_batch_list(sh, 0); 4306 4307 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) && !sh->batch_head) { 4308 spin_lock(&sh->stripe_lock); 4309 /* Cannot process 'sync' concurrently with 'discard' */ 4310 if (!test_bit(STRIPE_DISCARD, &sh->state) && 4311 test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { 4312 set_bit(STRIPE_SYNCING, &sh->state); 4313 clear_bit(STRIPE_INSYNC, &sh->state); 4314 clear_bit(STRIPE_REPLACED, &sh->state); 4315 } 4316 spin_unlock(&sh->stripe_lock); 4317 } 4318 clear_bit(STRIPE_DELAYED, &sh->state); 4319 4320 pr_debug("handling stripe %llu, state=%#lx cnt=%d, " 4321 "pd_idx=%d, qd_idx=%d\n, check:%d, reconstruct:%d\n", 4322 (unsigned long long)sh->sector, sh->state, 4323 atomic_read(&sh->count), sh->pd_idx, sh->qd_idx, 4324 sh->check_state, sh->reconstruct_state); 4325 4326 analyse_stripe(sh, &s); 4327 4328 if (s.handle_bad_blocks) { 4329 set_bit(STRIPE_HANDLE, &sh->state); 4330 goto finish; 4331 } 4332 4333 if (unlikely(s.blocked_rdev)) { 4334 if (s.syncing || s.expanding || s.expanded || 4335 s.replacing || s.to_write || s.written) { 4336 set_bit(STRIPE_HANDLE, &sh->state); 4337 goto finish; 4338 } 4339 /* There is nothing for the blocked_rdev to block */ 4340 rdev_dec_pending(s.blocked_rdev, conf->mddev); 4341 s.blocked_rdev = NULL; 4342 } 4343 4344 if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) { 4345 set_bit(STRIPE_OP_BIOFILL, &s.ops_request); 4346 set_bit(STRIPE_BIOFILL_RUN, &sh->state); 4347 } 4348 4349 pr_debug("locked=%d uptodate=%d to_read=%d" 4350 " to_write=%d failed=%d failed_num=%d,%d\n", 4351 s.locked, s.uptodate, s.to_read, s.to_write, s.failed, 4352 s.failed_num[0], s.failed_num[1]); 4353 /* check if the array has lost more than max_degraded devices and, 4354 * if so, some requests might need to be failed. 4355 */ 4356 if (s.failed > conf->max_degraded) { 4357 sh->check_state = 0; 4358 sh->reconstruct_state = 0; 4359 break_stripe_batch_list(sh, 0); 4360 if (s.to_read+s.to_write+s.written) 4361 handle_failed_stripe(conf, sh, &s, disks, &s.return_bi); 4362 if (s.syncing + s.replacing) 4363 handle_failed_sync(conf, sh, &s); 4364 } 4365 4366 /* Now we check to see if any write operations have recently 4367 * completed 4368 */ 4369 prexor = 0; 4370 if (sh->reconstruct_state == reconstruct_state_prexor_drain_result) 4371 prexor = 1; 4372 if (sh->reconstruct_state == reconstruct_state_drain_result || 4373 sh->reconstruct_state == reconstruct_state_prexor_drain_result) { 4374 sh->reconstruct_state = reconstruct_state_idle; 4375 4376 /* All the 'written' buffers and the parity block are ready to 4377 * be written back to disk 4378 */ 4379 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags) && 4380 !test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)); 4381 BUG_ON(sh->qd_idx >= 0 && 4382 !test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags) && 4383 !test_bit(R5_Discard, &sh->dev[sh->qd_idx].flags)); 4384 for (i = disks; i--; ) { 4385 struct r5dev *dev = &sh->dev[i]; 4386 if (test_bit(R5_LOCKED, &dev->flags) && 4387 (i == sh->pd_idx || i == sh->qd_idx || 4388 dev->written)) { 4389 pr_debug("Writing block %d\n", i); 4390 set_bit(R5_Wantwrite, &dev->flags); 4391 if (prexor) 4392 continue; 4393 if (s.failed > 1) 4394 continue; 4395 if (!test_bit(R5_Insync, &dev->flags) || 4396 ((i == sh->pd_idx || i == sh->qd_idx) && 4397 s.failed == 0)) 4398 set_bit(STRIPE_INSYNC, &sh->state); 4399 } 4400 } 4401 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 4402 s.dec_preread_active = 1; 4403 } 4404 4405 /* 4406 * might be able to return some write requests if the parity blocks 4407 * are safe, or on a failed drive 4408 */ 4409 pdev = &sh->dev[sh->pd_idx]; 4410 s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx) 4411 || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx); 4412 qdev = &sh->dev[sh->qd_idx]; 4413 s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx) 4414 || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx) 4415 || conf->level < 6; 4416 4417 if (s.written && 4418 (s.p_failed || ((test_bit(R5_Insync, &pdev->flags) 4419 && !test_bit(R5_LOCKED, &pdev->flags) 4420 && (test_bit(R5_UPTODATE, &pdev->flags) || 4421 test_bit(R5_Discard, &pdev->flags))))) && 4422 (s.q_failed || ((test_bit(R5_Insync, &qdev->flags) 4423 && !test_bit(R5_LOCKED, &qdev->flags) 4424 && (test_bit(R5_UPTODATE, &qdev->flags) || 4425 test_bit(R5_Discard, &qdev->flags)))))) 4426 handle_stripe_clean_event(conf, sh, disks, &s.return_bi); 4427 4428 /* Now we might consider reading some blocks, either to check/generate 4429 * parity, or to satisfy requests 4430 * or to load a block that is being partially written. 4431 */ 4432 if (s.to_read || s.non_overwrite 4433 || (conf->level == 6 && s.to_write && s.failed) 4434 || (s.syncing && (s.uptodate + s.compute < disks)) 4435 || s.replacing 4436 || s.expanding) 4437 handle_stripe_fill(sh, &s, disks); 4438 4439 /* Now to consider new write requests and what else, if anything 4440 * should be read. We do not handle new writes when: 4441 * 1/ A 'write' operation (copy+xor) is already in flight. 4442 * 2/ A 'check' operation is in flight, as it may clobber the parity 4443 * block. 4444 */ 4445 if (s.to_write && !sh->reconstruct_state && !sh->check_state) 4446 handle_stripe_dirtying(conf, sh, &s, disks); 4447 4448 /* maybe we need to check and possibly fix the parity for this stripe 4449 * Any reads will already have been scheduled, so we just see if enough 4450 * data is available. The parity check is held off while parity 4451 * dependent operations are in flight. 4452 */ 4453 if (sh->check_state || 4454 (s.syncing && s.locked == 0 && 4455 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && 4456 !test_bit(STRIPE_INSYNC, &sh->state))) { 4457 if (conf->level == 6) 4458 handle_parity_checks6(conf, sh, &s, disks); 4459 else 4460 handle_parity_checks5(conf, sh, &s, disks); 4461 } 4462 4463 if ((s.replacing || s.syncing) && s.locked == 0 4464 && !test_bit(STRIPE_COMPUTE_RUN, &sh->state) 4465 && !test_bit(STRIPE_REPLACED, &sh->state)) { 4466 /* Write out to replacement devices where possible */ 4467 for (i = 0; i < conf->raid_disks; i++) 4468 if (test_bit(R5_NeedReplace, &sh->dev[i].flags)) { 4469 WARN_ON(!test_bit(R5_UPTODATE, &sh->dev[i].flags)); 4470 set_bit(R5_WantReplace, &sh->dev[i].flags); 4471 set_bit(R5_LOCKED, &sh->dev[i].flags); 4472 s.locked++; 4473 } 4474 if (s.replacing) 4475 set_bit(STRIPE_INSYNC, &sh->state); 4476 set_bit(STRIPE_REPLACED, &sh->state); 4477 } 4478 if ((s.syncing || s.replacing) && s.locked == 0 && 4479 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && 4480 test_bit(STRIPE_INSYNC, &sh->state)) { 4481 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); 4482 clear_bit(STRIPE_SYNCING, &sh->state); 4483 if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) 4484 wake_up(&conf->wait_for_overlap); 4485 } 4486 4487 /* If the failed drives are just a ReadError, then we might need 4488 * to progress the repair/check process 4489 */ 4490 if (s.failed <= conf->max_degraded && !conf->mddev->ro) 4491 for (i = 0; i < s.failed; i++) { 4492 struct r5dev *dev = &sh->dev[s.failed_num[i]]; 4493 if (test_bit(R5_ReadError, &dev->flags) 4494 && !test_bit(R5_LOCKED, &dev->flags) 4495 && test_bit(R5_UPTODATE, &dev->flags) 4496 ) { 4497 if (!test_bit(R5_ReWrite, &dev->flags)) { 4498 set_bit(R5_Wantwrite, &dev->flags); 4499 set_bit(R5_ReWrite, &dev->flags); 4500 set_bit(R5_LOCKED, &dev->flags); 4501 s.locked++; 4502 } else { 4503 /* let's read it back */ 4504 set_bit(R5_Wantread, &dev->flags); 4505 set_bit(R5_LOCKED, &dev->flags); 4506 s.locked++; 4507 } 4508 } 4509 } 4510 4511 /* Finish reconstruct operations initiated by the expansion process */ 4512 if (sh->reconstruct_state == reconstruct_state_result) { 4513 struct stripe_head *sh_src 4514 = get_active_stripe(conf, sh->sector, 1, 1, 1); 4515 if (sh_src && test_bit(STRIPE_EXPAND_SOURCE, &sh_src->state)) { 4516 /* sh cannot be written until sh_src has been read. 4517 * so arrange for sh to be delayed a little 4518 */ 4519 set_bit(STRIPE_DELAYED, &sh->state); 4520 set_bit(STRIPE_HANDLE, &sh->state); 4521 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, 4522 &sh_src->state)) 4523 atomic_inc(&conf->preread_active_stripes); 4524 release_stripe(sh_src); 4525 goto finish; 4526 } 4527 if (sh_src) 4528 release_stripe(sh_src); 4529 4530 sh->reconstruct_state = reconstruct_state_idle; 4531 clear_bit(STRIPE_EXPANDING, &sh->state); 4532 for (i = conf->raid_disks; i--; ) { 4533 set_bit(R5_Wantwrite, &sh->dev[i].flags); 4534 set_bit(R5_LOCKED, &sh->dev[i].flags); 4535 s.locked++; 4536 } 4537 } 4538 4539 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) && 4540 !sh->reconstruct_state) { 4541 /* Need to write out all blocks after computing parity */ 4542 sh->disks = conf->raid_disks; 4543 stripe_set_idx(sh->sector, conf, 0, sh); 4544 schedule_reconstruction(sh, &s, 1, 1); 4545 } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) { 4546 clear_bit(STRIPE_EXPAND_READY, &sh->state); 4547 atomic_dec(&conf->reshape_stripes); 4548 wake_up(&conf->wait_for_overlap); 4549 md_done_sync(conf->mddev, STRIPE_SECTORS, 1); 4550 } 4551 4552 if (s.expanding && s.locked == 0 && 4553 !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) 4554 handle_stripe_expansion(conf, sh); 4555 4556 finish: 4557 /* wait for this device to become unblocked */ 4558 if (unlikely(s.blocked_rdev)) { 4559 if (conf->mddev->external) 4560 md_wait_for_blocked_rdev(s.blocked_rdev, 4561 conf->mddev); 4562 else 4563 /* Internal metadata will immediately 4564 * be written by raid5d, so we don't 4565 * need to wait here. 4566 */ 4567 rdev_dec_pending(s.blocked_rdev, 4568 conf->mddev); 4569 } 4570 4571 if (s.handle_bad_blocks) 4572 for (i = disks; i--; ) { 4573 struct md_rdev *rdev; 4574 struct r5dev *dev = &sh->dev[i]; 4575 if (test_and_clear_bit(R5_WriteError, &dev->flags)) { 4576 /* We own a safe reference to the rdev */ 4577 rdev = conf->disks[i].rdev; 4578 if (!rdev_set_badblocks(rdev, sh->sector, 4579 STRIPE_SECTORS, 0)) 4580 md_error(conf->mddev, rdev); 4581 rdev_dec_pending(rdev, conf->mddev); 4582 } 4583 if (test_and_clear_bit(R5_MadeGood, &dev->flags)) { 4584 rdev = conf->disks[i].rdev; 4585 rdev_clear_badblocks(rdev, sh->sector, 4586 STRIPE_SECTORS, 0); 4587 rdev_dec_pending(rdev, conf->mddev); 4588 } 4589 if (test_and_clear_bit(R5_MadeGoodRepl, &dev->flags)) { 4590 rdev = conf->disks[i].replacement; 4591 if (!rdev) 4592 /* rdev have been moved down */ 4593 rdev = conf->disks[i].rdev; 4594 rdev_clear_badblocks(rdev, sh->sector, 4595 STRIPE_SECTORS, 0); 4596 rdev_dec_pending(rdev, conf->mddev); 4597 } 4598 } 4599 4600 if (s.ops_request) 4601 raid_run_ops(sh, s.ops_request); 4602 4603 ops_run_io(sh, &s); 4604 4605 if (s.dec_preread_active) { 4606 /* We delay this until after ops_run_io so that if make_request 4607 * is waiting on a flush, it won't continue until the writes 4608 * have actually been submitted. 4609 */ 4610 atomic_dec(&conf->preread_active_stripes); 4611 if (atomic_read(&conf->preread_active_stripes) < 4612 IO_THRESHOLD) 4613 md_wakeup_thread(conf->mddev->thread); 4614 } 4615 4616 return_io(s.return_bi); 4617 4618 clear_bit_unlock(STRIPE_ACTIVE, &sh->state); 4619 } 4620 4621 static void raid5_activate_delayed(struct r5conf *conf) 4622 { 4623 if (atomic_read(&conf->preread_active_stripes) < IO_THRESHOLD) { 4624 while (!list_empty(&conf->delayed_list)) { 4625 struct list_head *l = conf->delayed_list.next; 4626 struct stripe_head *sh; 4627 sh = list_entry(l, struct stripe_head, lru); 4628 list_del_init(l); 4629 clear_bit(STRIPE_DELAYED, &sh->state); 4630 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 4631 atomic_inc(&conf->preread_active_stripes); 4632 list_add_tail(&sh->lru, &conf->hold_list); 4633 raid5_wakeup_stripe_thread(sh); 4634 } 4635 } 4636 } 4637 4638 static void activate_bit_delay(struct r5conf *conf, 4639 struct list_head *temp_inactive_list) 4640 { 4641 /* device_lock is held */ 4642 struct list_head head; 4643 list_add(&head, &conf->bitmap_list); 4644 list_del_init(&conf->bitmap_list); 4645 while (!list_empty(&head)) { 4646 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru); 4647 int hash; 4648 list_del_init(&sh->lru); 4649 atomic_inc(&sh->count); 4650 hash = sh->hash_lock_index; 4651 __release_stripe(conf, sh, &temp_inactive_list[hash]); 4652 } 4653 } 4654 4655 static int raid5_congested(struct mddev *mddev, int bits) 4656 { 4657 struct r5conf *conf = mddev->private; 4658 4659 /* No difference between reads and writes. Just check 4660 * how busy the stripe_cache is 4661 */ 4662 4663 if (test_bit(R5_INACTIVE_BLOCKED, &conf->cache_state)) 4664 return 1; 4665 if (conf->quiesce) 4666 return 1; 4667 if (atomic_read(&conf->empty_inactive_list_nr)) 4668 return 1; 4669 4670 return 0; 4671 } 4672 4673 static int in_chunk_boundary(struct mddev *mddev, struct bio *bio) 4674 { 4675 sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev); 4676 unsigned int chunk_sectors = mddev->chunk_sectors; 4677 unsigned int bio_sectors = bio_sectors(bio); 4678 4679 if (mddev->new_chunk_sectors < mddev->chunk_sectors) 4680 chunk_sectors = mddev->new_chunk_sectors; 4681 return chunk_sectors >= 4682 ((sector & (chunk_sectors - 1)) + bio_sectors); 4683 } 4684 4685 /* 4686 * add bio to the retry LIFO ( in O(1) ... we are in interrupt ) 4687 * later sampled by raid5d. 4688 */ 4689 static void add_bio_to_retry(struct bio *bi,struct r5conf *conf) 4690 { 4691 unsigned long flags; 4692 4693 spin_lock_irqsave(&conf->device_lock, flags); 4694 4695 bi->bi_next = conf->retry_read_aligned_list; 4696 conf->retry_read_aligned_list = bi; 4697 4698 spin_unlock_irqrestore(&conf->device_lock, flags); 4699 md_wakeup_thread(conf->mddev->thread); 4700 } 4701 4702 static struct bio *remove_bio_from_retry(struct r5conf *conf) 4703 { 4704 struct bio *bi; 4705 4706 bi = conf->retry_read_aligned; 4707 if (bi) { 4708 conf->retry_read_aligned = NULL; 4709 return bi; 4710 } 4711 bi = conf->retry_read_aligned_list; 4712 if(bi) { 4713 conf->retry_read_aligned_list = bi->bi_next; 4714 bi->bi_next = NULL; 4715 /* 4716 * this sets the active strip count to 1 and the processed 4717 * strip count to zero (upper 8 bits) 4718 */ 4719 raid5_set_bi_stripes(bi, 1); /* biased count of active stripes */ 4720 } 4721 4722 return bi; 4723 } 4724 4725 /* 4726 * The "raid5_align_endio" should check if the read succeeded and if it 4727 * did, call bio_endio on the original bio (having bio_put the new bio 4728 * first). 4729 * If the read failed.. 4730 */ 4731 static void raid5_align_endio(struct bio *bi) 4732 { 4733 struct bio* raid_bi = bi->bi_private; 4734 struct mddev *mddev; 4735 struct r5conf *conf; 4736 struct md_rdev *rdev; 4737 int error = bi->bi_error; 4738 4739 bio_put(bi); 4740 4741 rdev = (void*)raid_bi->bi_next; 4742 raid_bi->bi_next = NULL; 4743 mddev = rdev->mddev; 4744 conf = mddev->private; 4745 4746 rdev_dec_pending(rdev, conf->mddev); 4747 4748 if (!error) { 4749 trace_block_bio_complete(bdev_get_queue(raid_bi->bi_bdev), 4750 raid_bi, 0); 4751 bio_endio(raid_bi); 4752 if (atomic_dec_and_test(&conf->active_aligned_reads)) 4753 wake_up(&conf->wait_for_quiescent); 4754 return; 4755 } 4756 4757 pr_debug("raid5_align_endio : io error...handing IO for a retry\n"); 4758 4759 add_bio_to_retry(raid_bi, conf); 4760 } 4761 4762 static int raid5_read_one_chunk(struct mddev *mddev, struct bio *raid_bio) 4763 { 4764 struct r5conf *conf = mddev->private; 4765 int dd_idx; 4766 struct bio* align_bi; 4767 struct md_rdev *rdev; 4768 sector_t end_sector; 4769 4770 if (!in_chunk_boundary(mddev, raid_bio)) { 4771 pr_debug("%s: non aligned\n", __func__); 4772 return 0; 4773 } 4774 /* 4775 * use bio_clone_mddev to make a copy of the bio 4776 */ 4777 align_bi = bio_clone_mddev(raid_bio, GFP_NOIO, mddev); 4778 if (!align_bi) 4779 return 0; 4780 /* 4781 * set bi_end_io to a new function, and set bi_private to the 4782 * original bio. 4783 */ 4784 align_bi->bi_end_io = raid5_align_endio; 4785 align_bi->bi_private = raid_bio; 4786 /* 4787 * compute position 4788 */ 4789 align_bi->bi_iter.bi_sector = 4790 raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector, 4791 0, &dd_idx, NULL); 4792 4793 end_sector = bio_end_sector(align_bi); 4794 rcu_read_lock(); 4795 rdev = rcu_dereference(conf->disks[dd_idx].replacement); 4796 if (!rdev || test_bit(Faulty, &rdev->flags) || 4797 rdev->recovery_offset < end_sector) { 4798 rdev = rcu_dereference(conf->disks[dd_idx].rdev); 4799 if (rdev && 4800 (test_bit(Faulty, &rdev->flags) || 4801 !(test_bit(In_sync, &rdev->flags) || 4802 rdev->recovery_offset >= end_sector))) 4803 rdev = NULL; 4804 } 4805 if (rdev) { 4806 sector_t first_bad; 4807 int bad_sectors; 4808 4809 atomic_inc(&rdev->nr_pending); 4810 rcu_read_unlock(); 4811 raid_bio->bi_next = (void*)rdev; 4812 align_bi->bi_bdev = rdev->bdev; 4813 bio_clear_flag(align_bi, BIO_SEG_VALID); 4814 4815 if (is_badblock(rdev, align_bi->bi_iter.bi_sector, 4816 bio_sectors(align_bi), 4817 &first_bad, &bad_sectors)) { 4818 bio_put(align_bi); 4819 rdev_dec_pending(rdev, mddev); 4820 return 0; 4821 } 4822 4823 /* No reshape active, so we can trust rdev->data_offset */ 4824 align_bi->bi_iter.bi_sector += rdev->data_offset; 4825 4826 spin_lock_irq(&conf->device_lock); 4827 wait_event_lock_irq(conf->wait_for_quiescent, 4828 conf->quiesce == 0, 4829 conf->device_lock); 4830 atomic_inc(&conf->active_aligned_reads); 4831 spin_unlock_irq(&conf->device_lock); 4832 4833 if (mddev->gendisk) 4834 trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev), 4835 align_bi, disk_devt(mddev->gendisk), 4836 raid_bio->bi_iter.bi_sector); 4837 generic_make_request(align_bi); 4838 return 1; 4839 } else { 4840 rcu_read_unlock(); 4841 bio_put(align_bi); 4842 return 0; 4843 } 4844 } 4845 4846 static struct bio *chunk_aligned_read(struct mddev *mddev, struct bio *raid_bio) 4847 { 4848 struct bio *split; 4849 4850 do { 4851 sector_t sector = raid_bio->bi_iter.bi_sector; 4852 unsigned chunk_sects = mddev->chunk_sectors; 4853 unsigned sectors = chunk_sects - (sector & (chunk_sects-1)); 4854 4855 if (sectors < bio_sectors(raid_bio)) { 4856 split = bio_split(raid_bio, sectors, GFP_NOIO, fs_bio_set); 4857 bio_chain(split, raid_bio); 4858 } else 4859 split = raid_bio; 4860 4861 if (!raid5_read_one_chunk(mddev, split)) { 4862 if (split != raid_bio) 4863 generic_make_request(raid_bio); 4864 return split; 4865 } 4866 } while (split != raid_bio); 4867 4868 return NULL; 4869 } 4870 4871 /* __get_priority_stripe - get the next stripe to process 4872 * 4873 * Full stripe writes are allowed to pass preread active stripes up until 4874 * the bypass_threshold is exceeded. In general the bypass_count 4875 * increments when the handle_list is handled before the hold_list; however, it 4876 * will not be incremented when STRIPE_IO_STARTED is sampled set signifying a 4877 * stripe with in flight i/o. The bypass_count will be reset when the 4878 * head of the hold_list has changed, i.e. the head was promoted to the 4879 * handle_list. 4880 */ 4881 static struct stripe_head *__get_priority_stripe(struct r5conf *conf, int group) 4882 { 4883 struct stripe_head *sh = NULL, *tmp; 4884 struct list_head *handle_list = NULL; 4885 struct r5worker_group *wg = NULL; 4886 4887 if (conf->worker_cnt_per_group == 0) { 4888 handle_list = &conf->handle_list; 4889 } else if (group != ANY_GROUP) { 4890 handle_list = &conf->worker_groups[group].handle_list; 4891 wg = &conf->worker_groups[group]; 4892 } else { 4893 int i; 4894 for (i = 0; i < conf->group_cnt; i++) { 4895 handle_list = &conf->worker_groups[i].handle_list; 4896 wg = &conf->worker_groups[i]; 4897 if (!list_empty(handle_list)) 4898 break; 4899 } 4900 } 4901 4902 pr_debug("%s: handle: %s hold: %s full_writes: %d bypass_count: %d\n", 4903 __func__, 4904 list_empty(handle_list) ? "empty" : "busy", 4905 list_empty(&conf->hold_list) ? "empty" : "busy", 4906 atomic_read(&conf->pending_full_writes), conf->bypass_count); 4907 4908 if (!list_empty(handle_list)) { 4909 sh = list_entry(handle_list->next, typeof(*sh), lru); 4910 4911 if (list_empty(&conf->hold_list)) 4912 conf->bypass_count = 0; 4913 else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) { 4914 if (conf->hold_list.next == conf->last_hold) 4915 conf->bypass_count++; 4916 else { 4917 conf->last_hold = conf->hold_list.next; 4918 conf->bypass_count -= conf->bypass_threshold; 4919 if (conf->bypass_count < 0) 4920 conf->bypass_count = 0; 4921 } 4922 } 4923 } else if (!list_empty(&conf->hold_list) && 4924 ((conf->bypass_threshold && 4925 conf->bypass_count > conf->bypass_threshold) || 4926 atomic_read(&conf->pending_full_writes) == 0)) { 4927 4928 list_for_each_entry(tmp, &conf->hold_list, lru) { 4929 if (conf->worker_cnt_per_group == 0 || 4930 group == ANY_GROUP || 4931 !cpu_online(tmp->cpu) || 4932 cpu_to_group(tmp->cpu) == group) { 4933 sh = tmp; 4934 break; 4935 } 4936 } 4937 4938 if (sh) { 4939 conf->bypass_count -= conf->bypass_threshold; 4940 if (conf->bypass_count < 0) 4941 conf->bypass_count = 0; 4942 } 4943 wg = NULL; 4944 } 4945 4946 if (!sh) 4947 return NULL; 4948 4949 if (wg) { 4950 wg->stripes_cnt--; 4951 sh->group = NULL; 4952 } 4953 list_del_init(&sh->lru); 4954 BUG_ON(atomic_inc_return(&sh->count) != 1); 4955 return sh; 4956 } 4957 4958 struct raid5_plug_cb { 4959 struct blk_plug_cb cb; 4960 struct list_head list; 4961 struct list_head temp_inactive_list[NR_STRIPE_HASH_LOCKS]; 4962 }; 4963 4964 static void raid5_unplug(struct blk_plug_cb *blk_cb, bool from_schedule) 4965 { 4966 struct raid5_plug_cb *cb = container_of( 4967 blk_cb, struct raid5_plug_cb, cb); 4968 struct stripe_head *sh; 4969 struct mddev *mddev = cb->cb.data; 4970 struct r5conf *conf = mddev->private; 4971 int cnt = 0; 4972 int hash; 4973 4974 if (cb->list.next && !list_empty(&cb->list)) { 4975 spin_lock_irq(&conf->device_lock); 4976 while (!list_empty(&cb->list)) { 4977 sh = list_first_entry(&cb->list, struct stripe_head, lru); 4978 list_del_init(&sh->lru); 4979 /* 4980 * avoid race release_stripe_plug() sees 4981 * STRIPE_ON_UNPLUG_LIST clear but the stripe 4982 * is still in our list 4983 */ 4984 smp_mb__before_atomic(); 4985 clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state); 4986 /* 4987 * STRIPE_ON_RELEASE_LIST could be set here. In that 4988 * case, the count is always > 1 here 4989 */ 4990 hash = sh->hash_lock_index; 4991 __release_stripe(conf, sh, &cb->temp_inactive_list[hash]); 4992 cnt++; 4993 } 4994 spin_unlock_irq(&conf->device_lock); 4995 } 4996 release_inactive_stripe_list(conf, cb->temp_inactive_list, 4997 NR_STRIPE_HASH_LOCKS); 4998 if (mddev->queue) 4999 trace_block_unplug(mddev->queue, cnt, !from_schedule); 5000 kfree(cb); 5001 } 5002 5003 static void release_stripe_plug(struct mddev *mddev, 5004 struct stripe_head *sh) 5005 { 5006 struct blk_plug_cb *blk_cb = blk_check_plugged( 5007 raid5_unplug, mddev, 5008 sizeof(struct raid5_plug_cb)); 5009 struct raid5_plug_cb *cb; 5010 5011 if (!blk_cb) { 5012 release_stripe(sh); 5013 return; 5014 } 5015 5016 cb = container_of(blk_cb, struct raid5_plug_cb, cb); 5017 5018 if (cb->list.next == NULL) { 5019 int i; 5020 INIT_LIST_HEAD(&cb->list); 5021 for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) 5022 INIT_LIST_HEAD(cb->temp_inactive_list + i); 5023 } 5024 5025 if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)) 5026 list_add_tail(&sh->lru, &cb->list); 5027 else 5028 release_stripe(sh); 5029 } 5030 5031 static void make_discard_request(struct mddev *mddev, struct bio *bi) 5032 { 5033 struct r5conf *conf = mddev->private; 5034 sector_t logical_sector, last_sector; 5035 struct stripe_head *sh; 5036 int remaining; 5037 int stripe_sectors; 5038 5039 if (mddev->reshape_position != MaxSector) 5040 /* Skip discard while reshape is happening */ 5041 return; 5042 5043 logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1); 5044 last_sector = bi->bi_iter.bi_sector + (bi->bi_iter.bi_size>>9); 5045 5046 bi->bi_next = NULL; 5047 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ 5048 5049 stripe_sectors = conf->chunk_sectors * 5050 (conf->raid_disks - conf->max_degraded); 5051 logical_sector = DIV_ROUND_UP_SECTOR_T(logical_sector, 5052 stripe_sectors); 5053 sector_div(last_sector, stripe_sectors); 5054 5055 logical_sector *= conf->chunk_sectors; 5056 last_sector *= conf->chunk_sectors; 5057 5058 for (; logical_sector < last_sector; 5059 logical_sector += STRIPE_SECTORS) { 5060 DEFINE_WAIT(w); 5061 int d; 5062 again: 5063 sh = get_active_stripe(conf, logical_sector, 0, 0, 0); 5064 prepare_to_wait(&conf->wait_for_overlap, &w, 5065 TASK_UNINTERRUPTIBLE); 5066 set_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); 5067 if (test_bit(STRIPE_SYNCING, &sh->state)) { 5068 release_stripe(sh); 5069 schedule(); 5070 goto again; 5071 } 5072 clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); 5073 spin_lock_irq(&sh->stripe_lock); 5074 for (d = 0; d < conf->raid_disks; d++) { 5075 if (d == sh->pd_idx || d == sh->qd_idx) 5076 continue; 5077 if (sh->dev[d].towrite || sh->dev[d].toread) { 5078 set_bit(R5_Overlap, &sh->dev[d].flags); 5079 spin_unlock_irq(&sh->stripe_lock); 5080 release_stripe(sh); 5081 schedule(); 5082 goto again; 5083 } 5084 } 5085 set_bit(STRIPE_DISCARD, &sh->state); 5086 finish_wait(&conf->wait_for_overlap, &w); 5087 sh->overwrite_disks = 0; 5088 for (d = 0; d < conf->raid_disks; d++) { 5089 if (d == sh->pd_idx || d == sh->qd_idx) 5090 continue; 5091 sh->dev[d].towrite = bi; 5092 set_bit(R5_OVERWRITE, &sh->dev[d].flags); 5093 raid5_inc_bi_active_stripes(bi); 5094 sh->overwrite_disks++; 5095 } 5096 spin_unlock_irq(&sh->stripe_lock); 5097 if (conf->mddev->bitmap) { 5098 for (d = 0; 5099 d < conf->raid_disks - conf->max_degraded; 5100 d++) 5101 bitmap_startwrite(mddev->bitmap, 5102 sh->sector, 5103 STRIPE_SECTORS, 5104 0); 5105 sh->bm_seq = conf->seq_flush + 1; 5106 set_bit(STRIPE_BIT_DELAY, &sh->state); 5107 } 5108 5109 set_bit(STRIPE_HANDLE, &sh->state); 5110 clear_bit(STRIPE_DELAYED, &sh->state); 5111 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 5112 atomic_inc(&conf->preread_active_stripes); 5113 release_stripe_plug(mddev, sh); 5114 } 5115 5116 remaining = raid5_dec_bi_active_stripes(bi); 5117 if (remaining == 0) { 5118 md_write_end(mddev); 5119 bio_endio(bi); 5120 } 5121 } 5122 5123 static void make_request(struct mddev *mddev, struct bio * bi) 5124 { 5125 struct r5conf *conf = mddev->private; 5126 int dd_idx; 5127 sector_t new_sector; 5128 sector_t logical_sector, last_sector; 5129 struct stripe_head *sh; 5130 const int rw = bio_data_dir(bi); 5131 int remaining; 5132 DEFINE_WAIT(w); 5133 bool do_prepare; 5134 5135 if (unlikely(bi->bi_rw & REQ_FLUSH)) { 5136 md_flush_request(mddev, bi); 5137 return; 5138 } 5139 5140 md_write_start(mddev, bi); 5141 5142 /* 5143 * If array is degraded, better not do chunk aligned read because 5144 * later we might have to read it again in order to reconstruct 5145 * data on failed drives. 5146 */ 5147 if (rw == READ && mddev->degraded == 0 && 5148 mddev->reshape_position == MaxSector) { 5149 bi = chunk_aligned_read(mddev, bi); 5150 if (!bi) 5151 return; 5152 } 5153 5154 if (unlikely(bi->bi_rw & REQ_DISCARD)) { 5155 make_discard_request(mddev, bi); 5156 return; 5157 } 5158 5159 logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1); 5160 last_sector = bio_end_sector(bi); 5161 bi->bi_next = NULL; 5162 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ 5163 5164 prepare_to_wait(&conf->wait_for_overlap, &w, TASK_UNINTERRUPTIBLE); 5165 for (;logical_sector < last_sector; logical_sector += STRIPE_SECTORS) { 5166 int previous; 5167 int seq; 5168 5169 do_prepare = false; 5170 retry: 5171 seq = read_seqcount_begin(&conf->gen_lock); 5172 previous = 0; 5173 if (do_prepare) 5174 prepare_to_wait(&conf->wait_for_overlap, &w, 5175 TASK_UNINTERRUPTIBLE); 5176 if (unlikely(conf->reshape_progress != MaxSector)) { 5177 /* spinlock is needed as reshape_progress may be 5178 * 64bit on a 32bit platform, and so it might be 5179 * possible to see a half-updated value 5180 * Of course reshape_progress could change after 5181 * the lock is dropped, so once we get a reference 5182 * to the stripe that we think it is, we will have 5183 * to check again. 5184 */ 5185 spin_lock_irq(&conf->device_lock); 5186 if (mddev->reshape_backwards 5187 ? logical_sector < conf->reshape_progress 5188 : logical_sector >= conf->reshape_progress) { 5189 previous = 1; 5190 } else { 5191 if (mddev->reshape_backwards 5192 ? logical_sector < conf->reshape_safe 5193 : logical_sector >= conf->reshape_safe) { 5194 spin_unlock_irq(&conf->device_lock); 5195 schedule(); 5196 do_prepare = true; 5197 goto retry; 5198 } 5199 } 5200 spin_unlock_irq(&conf->device_lock); 5201 } 5202 5203 new_sector = raid5_compute_sector(conf, logical_sector, 5204 previous, 5205 &dd_idx, NULL); 5206 pr_debug("raid456: make_request, sector %llu logical %llu\n", 5207 (unsigned long long)new_sector, 5208 (unsigned long long)logical_sector); 5209 5210 sh = get_active_stripe(conf, new_sector, previous, 5211 (bi->bi_rw&RWA_MASK), 0); 5212 if (sh) { 5213 if (unlikely(previous)) { 5214 /* expansion might have moved on while waiting for a 5215 * stripe, so we must do the range check again. 5216 * Expansion could still move past after this 5217 * test, but as we are holding a reference to 5218 * 'sh', we know that if that happens, 5219 * STRIPE_EXPANDING will get set and the expansion 5220 * won't proceed until we finish with the stripe. 5221 */ 5222 int must_retry = 0; 5223 spin_lock_irq(&conf->device_lock); 5224 if (mddev->reshape_backwards 5225 ? logical_sector >= conf->reshape_progress 5226 : logical_sector < conf->reshape_progress) 5227 /* mismatch, need to try again */ 5228 must_retry = 1; 5229 spin_unlock_irq(&conf->device_lock); 5230 if (must_retry) { 5231 release_stripe(sh); 5232 schedule(); 5233 do_prepare = true; 5234 goto retry; 5235 } 5236 } 5237 if (read_seqcount_retry(&conf->gen_lock, seq)) { 5238 /* Might have got the wrong stripe_head 5239 * by accident 5240 */ 5241 release_stripe(sh); 5242 goto retry; 5243 } 5244 5245 if (rw == WRITE && 5246 logical_sector >= mddev->suspend_lo && 5247 logical_sector < mddev->suspend_hi) { 5248 release_stripe(sh); 5249 /* As the suspend_* range is controlled by 5250 * userspace, we want an interruptible 5251 * wait. 5252 */ 5253 flush_signals(current); 5254 prepare_to_wait(&conf->wait_for_overlap, 5255 &w, TASK_INTERRUPTIBLE); 5256 if (logical_sector >= mddev->suspend_lo && 5257 logical_sector < mddev->suspend_hi) { 5258 schedule(); 5259 do_prepare = true; 5260 } 5261 goto retry; 5262 } 5263 5264 if (test_bit(STRIPE_EXPANDING, &sh->state) || 5265 !add_stripe_bio(sh, bi, dd_idx, rw, previous)) { 5266 /* Stripe is busy expanding or 5267 * add failed due to overlap. Flush everything 5268 * and wait a while 5269 */ 5270 md_wakeup_thread(mddev->thread); 5271 release_stripe(sh); 5272 schedule(); 5273 do_prepare = true; 5274 goto retry; 5275 } 5276 set_bit(STRIPE_HANDLE, &sh->state); 5277 clear_bit(STRIPE_DELAYED, &sh->state); 5278 if ((!sh->batch_head || sh == sh->batch_head) && 5279 (bi->bi_rw & REQ_SYNC) && 5280 !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) 5281 atomic_inc(&conf->preread_active_stripes); 5282 release_stripe_plug(mddev, sh); 5283 } else { 5284 /* cannot get stripe for read-ahead, just give-up */ 5285 bi->bi_error = -EIO; 5286 break; 5287 } 5288 } 5289 finish_wait(&conf->wait_for_overlap, &w); 5290 5291 remaining = raid5_dec_bi_active_stripes(bi); 5292 if (remaining == 0) { 5293 5294 if ( rw == WRITE ) 5295 md_write_end(mddev); 5296 5297 trace_block_bio_complete(bdev_get_queue(bi->bi_bdev), 5298 bi, 0); 5299 bio_endio(bi); 5300 } 5301 } 5302 5303 static sector_t raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks); 5304 5305 static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, int *skipped) 5306 { 5307 /* reshaping is quite different to recovery/resync so it is 5308 * handled quite separately ... here. 5309 * 5310 * On each call to sync_request, we gather one chunk worth of 5311 * destination stripes and flag them as expanding. 5312 * Then we find all the source stripes and request reads. 5313 * As the reads complete, handle_stripe will copy the data 5314 * into the destination stripe and release that stripe. 5315 */ 5316 struct r5conf *conf = mddev->private; 5317 struct stripe_head *sh; 5318 sector_t first_sector, last_sector; 5319 int raid_disks = conf->previous_raid_disks; 5320 int data_disks = raid_disks - conf->max_degraded; 5321 int new_data_disks = conf->raid_disks - conf->max_degraded; 5322 int i; 5323 int dd_idx; 5324 sector_t writepos, readpos, safepos; 5325 sector_t stripe_addr; 5326 int reshape_sectors; 5327 struct list_head stripes; 5328 5329 if (sector_nr == 0) { 5330 /* If restarting in the middle, skip the initial sectors */ 5331 if (mddev->reshape_backwards && 5332 conf->reshape_progress < raid5_size(mddev, 0, 0)) { 5333 sector_nr = raid5_size(mddev, 0, 0) 5334 - conf->reshape_progress; 5335 } else if (!mddev->reshape_backwards && 5336 conf->reshape_progress > 0) 5337 sector_nr = conf->reshape_progress; 5338 sector_div(sector_nr, new_data_disks); 5339 if (sector_nr) { 5340 mddev->curr_resync_completed = sector_nr; 5341 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 5342 *skipped = 1; 5343 return sector_nr; 5344 } 5345 } 5346 5347 /* We need to process a full chunk at a time. 5348 * If old and new chunk sizes differ, we need to process the 5349 * largest of these 5350 */ 5351 if (mddev->new_chunk_sectors > mddev->chunk_sectors) 5352 reshape_sectors = mddev->new_chunk_sectors; 5353 else 5354 reshape_sectors = mddev->chunk_sectors; 5355 5356 /* We update the metadata at least every 10 seconds, or when 5357 * the data about to be copied would over-write the source of 5358 * the data at the front of the range. i.e. one new_stripe 5359 * along from reshape_progress new_maps to after where 5360 * reshape_safe old_maps to 5361 */ 5362 writepos = conf->reshape_progress; 5363 sector_div(writepos, new_data_disks); 5364 readpos = conf->reshape_progress; 5365 sector_div(readpos, data_disks); 5366 safepos = conf->reshape_safe; 5367 sector_div(safepos, data_disks); 5368 if (mddev->reshape_backwards) { 5369 writepos -= min_t(sector_t, reshape_sectors, writepos); 5370 readpos += reshape_sectors; 5371 safepos += reshape_sectors; 5372 } else { 5373 writepos += reshape_sectors; 5374 readpos -= min_t(sector_t, reshape_sectors, readpos); 5375 safepos -= min_t(sector_t, reshape_sectors, safepos); 5376 } 5377 5378 /* Having calculated the 'writepos' possibly use it 5379 * to set 'stripe_addr' which is where we will write to. 5380 */ 5381 if (mddev->reshape_backwards) { 5382 BUG_ON(conf->reshape_progress == 0); 5383 stripe_addr = writepos; 5384 BUG_ON((mddev->dev_sectors & 5385 ~((sector_t)reshape_sectors - 1)) 5386 - reshape_sectors - stripe_addr 5387 != sector_nr); 5388 } else { 5389 BUG_ON(writepos != sector_nr + reshape_sectors); 5390 stripe_addr = sector_nr; 5391 } 5392 5393 /* 'writepos' is the most advanced device address we might write. 5394 * 'readpos' is the least advanced device address we might read. 5395 * 'safepos' is the least address recorded in the metadata as having 5396 * been reshaped. 5397 * If there is a min_offset_diff, these are adjusted either by 5398 * increasing the safepos/readpos if diff is negative, or 5399 * increasing writepos if diff is positive. 5400 * If 'readpos' is then behind 'writepos', there is no way that we can 5401 * ensure safety in the face of a crash - that must be done by userspace 5402 * making a backup of the data. So in that case there is no particular 5403 * rush to update metadata. 5404 * Otherwise if 'safepos' is behind 'writepos', then we really need to 5405 * update the metadata to advance 'safepos' to match 'readpos' so that 5406 * we can be safe in the event of a crash. 5407 * So we insist on updating metadata if safepos is behind writepos and 5408 * readpos is beyond writepos. 5409 * In any case, update the metadata every 10 seconds. 5410 * Maybe that number should be configurable, but I'm not sure it is 5411 * worth it.... maybe it could be a multiple of safemode_delay??? 5412 */ 5413 if (conf->min_offset_diff < 0) { 5414 safepos += -conf->min_offset_diff; 5415 readpos += -conf->min_offset_diff; 5416 } else 5417 writepos += conf->min_offset_diff; 5418 5419 if ((mddev->reshape_backwards 5420 ? (safepos > writepos && readpos < writepos) 5421 : (safepos < writepos && readpos > writepos)) || 5422 time_after(jiffies, conf->reshape_checkpoint + 10*HZ)) { 5423 /* Cannot proceed until we've updated the superblock... */ 5424 wait_event(conf->wait_for_overlap, 5425 atomic_read(&conf->reshape_stripes)==0 5426 || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); 5427 if (atomic_read(&conf->reshape_stripes) != 0) 5428 return 0; 5429 mddev->reshape_position = conf->reshape_progress; 5430 mddev->curr_resync_completed = sector_nr; 5431 conf->reshape_checkpoint = jiffies; 5432 set_bit(MD_CHANGE_DEVS, &mddev->flags); 5433 md_wakeup_thread(mddev->thread); 5434 wait_event(mddev->sb_wait, mddev->flags == 0 || 5435 test_bit(MD_RECOVERY_INTR, &mddev->recovery)); 5436 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 5437 return 0; 5438 spin_lock_irq(&conf->device_lock); 5439 conf->reshape_safe = mddev->reshape_position; 5440 spin_unlock_irq(&conf->device_lock); 5441 wake_up(&conf->wait_for_overlap); 5442 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 5443 } 5444 5445 INIT_LIST_HEAD(&stripes); 5446 for (i = 0; i < reshape_sectors; i += STRIPE_SECTORS) { 5447 int j; 5448 int skipped_disk = 0; 5449 sh = get_active_stripe(conf, stripe_addr+i, 0, 0, 1); 5450 set_bit(STRIPE_EXPANDING, &sh->state); 5451 atomic_inc(&conf->reshape_stripes); 5452 /* If any of this stripe is beyond the end of the old 5453 * array, then we need to zero those blocks 5454 */ 5455 for (j=sh->disks; j--;) { 5456 sector_t s; 5457 if (j == sh->pd_idx) 5458 continue; 5459 if (conf->level == 6 && 5460 j == sh->qd_idx) 5461 continue; 5462 s = compute_blocknr(sh, j, 0); 5463 if (s < raid5_size(mddev, 0, 0)) { 5464 skipped_disk = 1; 5465 continue; 5466 } 5467 memset(page_address(sh->dev[j].page), 0, STRIPE_SIZE); 5468 set_bit(R5_Expanded, &sh->dev[j].flags); 5469 set_bit(R5_UPTODATE, &sh->dev[j].flags); 5470 } 5471 if (!skipped_disk) { 5472 set_bit(STRIPE_EXPAND_READY, &sh->state); 5473 set_bit(STRIPE_HANDLE, &sh->state); 5474 } 5475 list_add(&sh->lru, &stripes); 5476 } 5477 spin_lock_irq(&conf->device_lock); 5478 if (mddev->reshape_backwards) 5479 conf->reshape_progress -= reshape_sectors * new_data_disks; 5480 else 5481 conf->reshape_progress += reshape_sectors * new_data_disks; 5482 spin_unlock_irq(&conf->device_lock); 5483 /* Ok, those stripe are ready. We can start scheduling 5484 * reads on the source stripes. 5485 * The source stripes are determined by mapping the first and last 5486 * block on the destination stripes. 5487 */ 5488 first_sector = 5489 raid5_compute_sector(conf, stripe_addr*(new_data_disks), 5490 1, &dd_idx, NULL); 5491 last_sector = 5492 raid5_compute_sector(conf, ((stripe_addr+reshape_sectors) 5493 * new_data_disks - 1), 5494 1, &dd_idx, NULL); 5495 if (last_sector >= mddev->dev_sectors) 5496 last_sector = mddev->dev_sectors - 1; 5497 while (first_sector <= last_sector) { 5498 sh = get_active_stripe(conf, first_sector, 1, 0, 1); 5499 set_bit(STRIPE_EXPAND_SOURCE, &sh->state); 5500 set_bit(STRIPE_HANDLE, &sh->state); 5501 release_stripe(sh); 5502 first_sector += STRIPE_SECTORS; 5503 } 5504 /* Now that the sources are clearly marked, we can release 5505 * the destination stripes 5506 */ 5507 while (!list_empty(&stripes)) { 5508 sh = list_entry(stripes.next, struct stripe_head, lru); 5509 list_del_init(&sh->lru); 5510 release_stripe(sh); 5511 } 5512 /* If this takes us to the resync_max point where we have to pause, 5513 * then we need to write out the superblock. 5514 */ 5515 sector_nr += reshape_sectors; 5516 if ((sector_nr - mddev->curr_resync_completed) * 2 5517 >= mddev->resync_max - mddev->curr_resync_completed) { 5518 /* Cannot proceed until we've updated the superblock... */ 5519 wait_event(conf->wait_for_overlap, 5520 atomic_read(&conf->reshape_stripes) == 0 5521 || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); 5522 if (atomic_read(&conf->reshape_stripes) != 0) 5523 goto ret; 5524 mddev->reshape_position = conf->reshape_progress; 5525 mddev->curr_resync_completed = sector_nr; 5526 conf->reshape_checkpoint = jiffies; 5527 set_bit(MD_CHANGE_DEVS, &mddev->flags); 5528 md_wakeup_thread(mddev->thread); 5529 wait_event(mddev->sb_wait, 5530 !test_bit(MD_CHANGE_DEVS, &mddev->flags) 5531 || test_bit(MD_RECOVERY_INTR, &mddev->recovery)); 5532 if (test_bit(MD_RECOVERY_INTR, &mddev->recovery)) 5533 goto ret; 5534 spin_lock_irq(&conf->device_lock); 5535 conf->reshape_safe = mddev->reshape_position; 5536 spin_unlock_irq(&conf->device_lock); 5537 wake_up(&conf->wait_for_overlap); 5538 sysfs_notify(&mddev->kobj, NULL, "sync_completed"); 5539 } 5540 ret: 5541 return reshape_sectors; 5542 } 5543 5544 static inline sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipped) 5545 { 5546 struct r5conf *conf = mddev->private; 5547 struct stripe_head *sh; 5548 sector_t max_sector = mddev->dev_sectors; 5549 sector_t sync_blocks; 5550 int still_degraded = 0; 5551 int i; 5552 5553 if (sector_nr >= max_sector) { 5554 /* just being told to finish up .. nothing much to do */ 5555 5556 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { 5557 end_reshape(conf); 5558 return 0; 5559 } 5560 5561 if (mddev->curr_resync < max_sector) /* aborted */ 5562 bitmap_end_sync(mddev->bitmap, mddev->curr_resync, 5563 &sync_blocks, 1); 5564 else /* completed sync */ 5565 conf->fullsync = 0; 5566 bitmap_close_sync(mddev->bitmap); 5567 5568 return 0; 5569 } 5570 5571 /* Allow raid5_quiesce to complete */ 5572 wait_event(conf->wait_for_overlap, conf->quiesce != 2); 5573 5574 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) 5575 return reshape_request(mddev, sector_nr, skipped); 5576 5577 /* No need to check resync_max as we never do more than one 5578 * stripe, and as resync_max will always be on a chunk boundary, 5579 * if the check in md_do_sync didn't fire, there is no chance 5580 * of overstepping resync_max here 5581 */ 5582 5583 /* if there is too many failed drives and we are trying 5584 * to resync, then assert that we are finished, because there is 5585 * nothing we can do. 5586 */ 5587 if (mddev->degraded >= conf->max_degraded && 5588 test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { 5589 sector_t rv = mddev->dev_sectors - sector_nr; 5590 *skipped = 1; 5591 return rv; 5592 } 5593 if (!test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery) && 5594 !conf->fullsync && 5595 !bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && 5596 sync_blocks >= STRIPE_SECTORS) { 5597 /* we can skip this block, and probably more */ 5598 sync_blocks /= STRIPE_SECTORS; 5599 *skipped = 1; 5600 return sync_blocks * STRIPE_SECTORS; /* keep things rounded to whole stripes */ 5601 } 5602 5603 bitmap_cond_end_sync(mddev->bitmap, sector_nr); 5604 5605 sh = get_active_stripe(conf, sector_nr, 0, 1, 0); 5606 if (sh == NULL) { 5607 sh = get_active_stripe(conf, sector_nr, 0, 0, 0); 5608 /* make sure we don't swamp the stripe cache if someone else 5609 * is trying to get access 5610 */ 5611 schedule_timeout_uninterruptible(1); 5612 } 5613 /* Need to check if array will still be degraded after recovery/resync 5614 * Note in case of > 1 drive failures it's possible we're rebuilding 5615 * one drive while leaving another faulty drive in array. 5616 */ 5617 rcu_read_lock(); 5618 for (i = 0; i < conf->raid_disks; i++) { 5619 struct md_rdev *rdev = ACCESS_ONCE(conf->disks[i].rdev); 5620 5621 if (rdev == NULL || test_bit(Faulty, &rdev->flags)) 5622 still_degraded = 1; 5623 } 5624 rcu_read_unlock(); 5625 5626 bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, still_degraded); 5627 5628 set_bit(STRIPE_SYNC_REQUESTED, &sh->state); 5629 set_bit(STRIPE_HANDLE, &sh->state); 5630 5631 release_stripe(sh); 5632 5633 return STRIPE_SECTORS; 5634 } 5635 5636 static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio) 5637 { 5638 /* We may not be able to submit a whole bio at once as there 5639 * may not be enough stripe_heads available. 5640 * We cannot pre-allocate enough stripe_heads as we may need 5641 * more than exist in the cache (if we allow ever large chunks). 5642 * So we do one stripe head at a time and record in 5643 * ->bi_hw_segments how many have been done. 5644 * 5645 * We *know* that this entire raid_bio is in one chunk, so 5646 * it will be only one 'dd_idx' and only need one call to raid5_compute_sector. 5647 */ 5648 struct stripe_head *sh; 5649 int dd_idx; 5650 sector_t sector, logical_sector, last_sector; 5651 int scnt = 0; 5652 int remaining; 5653 int handled = 0; 5654 5655 logical_sector = raid_bio->bi_iter.bi_sector & 5656 ~((sector_t)STRIPE_SECTORS-1); 5657 sector = raid5_compute_sector(conf, logical_sector, 5658 0, &dd_idx, NULL); 5659 last_sector = bio_end_sector(raid_bio); 5660 5661 for (; logical_sector < last_sector; 5662 logical_sector += STRIPE_SECTORS, 5663 sector += STRIPE_SECTORS, 5664 scnt++) { 5665 5666 if (scnt < raid5_bi_processed_stripes(raid_bio)) 5667 /* already done this stripe */ 5668 continue; 5669 5670 sh = get_active_stripe(conf, sector, 0, 1, 1); 5671 5672 if (!sh) { 5673 /* failed to get a stripe - must wait */ 5674 raid5_set_bi_processed_stripes(raid_bio, scnt); 5675 conf->retry_read_aligned = raid_bio; 5676 return handled; 5677 } 5678 5679 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0, 0)) { 5680 release_stripe(sh); 5681 raid5_set_bi_processed_stripes(raid_bio, scnt); 5682 conf->retry_read_aligned = raid_bio; 5683 return handled; 5684 } 5685 5686 set_bit(R5_ReadNoMerge, &sh->dev[dd_idx].flags); 5687 handle_stripe(sh); 5688 release_stripe(sh); 5689 handled++; 5690 } 5691 remaining = raid5_dec_bi_active_stripes(raid_bio); 5692 if (remaining == 0) { 5693 trace_block_bio_complete(bdev_get_queue(raid_bio->bi_bdev), 5694 raid_bio, 0); 5695 bio_endio(raid_bio); 5696 } 5697 if (atomic_dec_and_test(&conf->active_aligned_reads)) 5698 wake_up(&conf->wait_for_quiescent); 5699 return handled; 5700 } 5701 5702 static int handle_active_stripes(struct r5conf *conf, int group, 5703 struct r5worker *worker, 5704 struct list_head *temp_inactive_list) 5705 { 5706 struct stripe_head *batch[MAX_STRIPE_BATCH], *sh; 5707 int i, batch_size = 0, hash; 5708 bool release_inactive = false; 5709 5710 while (batch_size < MAX_STRIPE_BATCH && 5711 (sh = __get_priority_stripe(conf, group)) != NULL) 5712 batch[batch_size++] = sh; 5713 5714 if (batch_size == 0) { 5715 for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) 5716 if (!list_empty(temp_inactive_list + i)) 5717 break; 5718 if (i == NR_STRIPE_HASH_LOCKS) 5719 return batch_size; 5720 release_inactive = true; 5721 } 5722 spin_unlock_irq(&conf->device_lock); 5723 5724 release_inactive_stripe_list(conf, temp_inactive_list, 5725 NR_STRIPE_HASH_LOCKS); 5726 5727 if (release_inactive) { 5728 spin_lock_irq(&conf->device_lock); 5729 return 0; 5730 } 5731 5732 for (i = 0; i < batch_size; i++) 5733 handle_stripe(batch[i]); 5734 5735 cond_resched(); 5736 5737 spin_lock_irq(&conf->device_lock); 5738 for (i = 0; i < batch_size; i++) { 5739 hash = batch[i]->hash_lock_index; 5740 __release_stripe(conf, batch[i], &temp_inactive_list[hash]); 5741 } 5742 return batch_size; 5743 } 5744 5745 static void raid5_do_work(struct work_struct *work) 5746 { 5747 struct r5worker *worker = container_of(work, struct r5worker, work); 5748 struct r5worker_group *group = worker->group; 5749 struct r5conf *conf = group->conf; 5750 int group_id = group - conf->worker_groups; 5751 int handled; 5752 struct blk_plug plug; 5753 5754 pr_debug("+++ raid5worker active\n"); 5755 5756 blk_start_plug(&plug); 5757 handled = 0; 5758 spin_lock_irq(&conf->device_lock); 5759 while (1) { 5760 int batch_size, released; 5761 5762 released = release_stripe_list(conf, worker->temp_inactive_list); 5763 5764 batch_size = handle_active_stripes(conf, group_id, worker, 5765 worker->temp_inactive_list); 5766 worker->working = false; 5767 if (!batch_size && !released) 5768 break; 5769 handled += batch_size; 5770 } 5771 pr_debug("%d stripes handled\n", handled); 5772 5773 spin_unlock_irq(&conf->device_lock); 5774 blk_finish_plug(&plug); 5775 5776 pr_debug("--- raid5worker inactive\n"); 5777 } 5778 5779 /* 5780 * This is our raid5 kernel thread. 5781 * 5782 * We scan the hash table for stripes which can be handled now. 5783 * During the scan, completed stripes are saved for us by the interrupt 5784 * handler, so that they will not have to wait for our next wakeup. 5785 */ 5786 static void raid5d(struct md_thread *thread) 5787 { 5788 struct mddev *mddev = thread->mddev; 5789 struct r5conf *conf = mddev->private; 5790 int handled; 5791 struct blk_plug plug; 5792 5793 pr_debug("+++ raid5d active\n"); 5794 5795 md_check_recovery(mddev); 5796 5797 blk_start_plug(&plug); 5798 handled = 0; 5799 spin_lock_irq(&conf->device_lock); 5800 while (1) { 5801 struct bio *bio; 5802 int batch_size, released; 5803 5804 released = release_stripe_list(conf, conf->temp_inactive_list); 5805 if (released) 5806 clear_bit(R5_DID_ALLOC, &conf->cache_state); 5807 5808 if ( 5809 !list_empty(&conf->bitmap_list)) { 5810 /* Now is a good time to flush some bitmap updates */ 5811 conf->seq_flush++; 5812 spin_unlock_irq(&conf->device_lock); 5813 bitmap_unplug(mddev->bitmap); 5814 spin_lock_irq(&conf->device_lock); 5815 conf->seq_write = conf->seq_flush; 5816 activate_bit_delay(conf, conf->temp_inactive_list); 5817 } 5818 raid5_activate_delayed(conf); 5819 5820 while ((bio = remove_bio_from_retry(conf))) { 5821 int ok; 5822 spin_unlock_irq(&conf->device_lock); 5823 ok = retry_aligned_read(conf, bio); 5824 spin_lock_irq(&conf->device_lock); 5825 if (!ok) 5826 break; 5827 handled++; 5828 } 5829 5830 batch_size = handle_active_stripes(conf, ANY_GROUP, NULL, 5831 conf->temp_inactive_list); 5832 if (!batch_size && !released) 5833 break; 5834 handled += batch_size; 5835 5836 if (mddev->flags & ~(1<<MD_CHANGE_PENDING)) { 5837 spin_unlock_irq(&conf->device_lock); 5838 md_check_recovery(mddev); 5839 spin_lock_irq(&conf->device_lock); 5840 } 5841 } 5842 pr_debug("%d stripes handled\n", handled); 5843 5844 spin_unlock_irq(&conf->device_lock); 5845 if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state) && 5846 mutex_trylock(&conf->cache_size_mutex)) { 5847 grow_one_stripe(conf, __GFP_NOWARN); 5848 /* Set flag even if allocation failed. This helps 5849 * slow down allocation requests when mem is short 5850 */ 5851 set_bit(R5_DID_ALLOC, &conf->cache_state); 5852 mutex_unlock(&conf->cache_size_mutex); 5853 } 5854 5855 async_tx_issue_pending_all(); 5856 blk_finish_plug(&plug); 5857 5858 pr_debug("--- raid5d inactive\n"); 5859 } 5860 5861 static ssize_t 5862 raid5_show_stripe_cache_size(struct mddev *mddev, char *page) 5863 { 5864 struct r5conf *conf; 5865 int ret = 0; 5866 spin_lock(&mddev->lock); 5867 conf = mddev->private; 5868 if (conf) 5869 ret = sprintf(page, "%d\n", conf->min_nr_stripes); 5870 spin_unlock(&mddev->lock); 5871 return ret; 5872 } 5873 5874 int 5875 raid5_set_cache_size(struct mddev *mddev, int size) 5876 { 5877 struct r5conf *conf = mddev->private; 5878 int err; 5879 5880 if (size <= 16 || size > 32768) 5881 return -EINVAL; 5882 5883 conf->min_nr_stripes = size; 5884 mutex_lock(&conf->cache_size_mutex); 5885 while (size < conf->max_nr_stripes && 5886 drop_one_stripe(conf)) 5887 ; 5888 mutex_unlock(&conf->cache_size_mutex); 5889 5890 5891 err = md_allow_write(mddev); 5892 if (err) 5893 return err; 5894 5895 mutex_lock(&conf->cache_size_mutex); 5896 while (size > conf->max_nr_stripes) 5897 if (!grow_one_stripe(conf, GFP_KERNEL)) 5898 break; 5899 mutex_unlock(&conf->cache_size_mutex); 5900 5901 return 0; 5902 } 5903 EXPORT_SYMBOL(raid5_set_cache_size); 5904 5905 static ssize_t 5906 raid5_store_stripe_cache_size(struct mddev *mddev, const char *page, size_t len) 5907 { 5908 struct r5conf *conf; 5909 unsigned long new; 5910 int err; 5911 5912 if (len >= PAGE_SIZE) 5913 return -EINVAL; 5914 if (kstrtoul(page, 10, &new)) 5915 return -EINVAL; 5916 err = mddev_lock(mddev); 5917 if (err) 5918 return err; 5919 conf = mddev->private; 5920 if (!conf) 5921 err = -ENODEV; 5922 else 5923 err = raid5_set_cache_size(mddev, new); 5924 mddev_unlock(mddev); 5925 5926 return err ?: len; 5927 } 5928 5929 static struct md_sysfs_entry 5930 raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR, 5931 raid5_show_stripe_cache_size, 5932 raid5_store_stripe_cache_size); 5933 5934 static ssize_t 5935 raid5_show_rmw_level(struct mddev *mddev, char *page) 5936 { 5937 struct r5conf *conf = mddev->private; 5938 if (conf) 5939 return sprintf(page, "%d\n", conf->rmw_level); 5940 else 5941 return 0; 5942 } 5943 5944 static ssize_t 5945 raid5_store_rmw_level(struct mddev *mddev, const char *page, size_t len) 5946 { 5947 struct r5conf *conf = mddev->private; 5948 unsigned long new; 5949 5950 if (!conf) 5951 return -ENODEV; 5952 5953 if (len >= PAGE_SIZE) 5954 return -EINVAL; 5955 5956 if (kstrtoul(page, 10, &new)) 5957 return -EINVAL; 5958 5959 if (new != PARITY_DISABLE_RMW && !raid6_call.xor_syndrome) 5960 return -EINVAL; 5961 5962 if (new != PARITY_DISABLE_RMW && 5963 new != PARITY_ENABLE_RMW && 5964 new != PARITY_PREFER_RMW) 5965 return -EINVAL; 5966 5967 conf->rmw_level = new; 5968 return len; 5969 } 5970 5971 static struct md_sysfs_entry 5972 raid5_rmw_level = __ATTR(rmw_level, S_IRUGO | S_IWUSR, 5973 raid5_show_rmw_level, 5974 raid5_store_rmw_level); 5975 5976 5977 static ssize_t 5978 raid5_show_preread_threshold(struct mddev *mddev, char *page) 5979 { 5980 struct r5conf *conf; 5981 int ret = 0; 5982 spin_lock(&mddev->lock); 5983 conf = mddev->private; 5984 if (conf) 5985 ret = sprintf(page, "%d\n", conf->bypass_threshold); 5986 spin_unlock(&mddev->lock); 5987 return ret; 5988 } 5989 5990 static ssize_t 5991 raid5_store_preread_threshold(struct mddev *mddev, const char *page, size_t len) 5992 { 5993 struct r5conf *conf; 5994 unsigned long new; 5995 int err; 5996 5997 if (len >= PAGE_SIZE) 5998 return -EINVAL; 5999 if (kstrtoul(page, 10, &new)) 6000 return -EINVAL; 6001 6002 err = mddev_lock(mddev); 6003 if (err) 6004 return err; 6005 conf = mddev->private; 6006 if (!conf) 6007 err = -ENODEV; 6008 else if (new > conf->min_nr_stripes) 6009 err = -EINVAL; 6010 else 6011 conf->bypass_threshold = new; 6012 mddev_unlock(mddev); 6013 return err ?: len; 6014 } 6015 6016 static struct md_sysfs_entry 6017 raid5_preread_bypass_threshold = __ATTR(preread_bypass_threshold, 6018 S_IRUGO | S_IWUSR, 6019 raid5_show_preread_threshold, 6020 raid5_store_preread_threshold); 6021 6022 static ssize_t 6023 raid5_show_skip_copy(struct mddev *mddev, char *page) 6024 { 6025 struct r5conf *conf; 6026 int ret = 0; 6027 spin_lock(&mddev->lock); 6028 conf = mddev->private; 6029 if (conf) 6030 ret = sprintf(page, "%d\n", conf->skip_copy); 6031 spin_unlock(&mddev->lock); 6032 return ret; 6033 } 6034 6035 static ssize_t 6036 raid5_store_skip_copy(struct mddev *mddev, const char *page, size_t len) 6037 { 6038 struct r5conf *conf; 6039 unsigned long new; 6040 int err; 6041 6042 if (len >= PAGE_SIZE) 6043 return -EINVAL; 6044 if (kstrtoul(page, 10, &new)) 6045 return -EINVAL; 6046 new = !!new; 6047 6048 err = mddev_lock(mddev); 6049 if (err) 6050 return err; 6051 conf = mddev->private; 6052 if (!conf) 6053 err = -ENODEV; 6054 else if (new != conf->skip_copy) { 6055 mddev_suspend(mddev); 6056 conf->skip_copy = new; 6057 if (new) 6058 mddev->queue->backing_dev_info.capabilities |= 6059 BDI_CAP_STABLE_WRITES; 6060 else 6061 mddev->queue->backing_dev_info.capabilities &= 6062 ~BDI_CAP_STABLE_WRITES; 6063 mddev_resume(mddev); 6064 } 6065 mddev_unlock(mddev); 6066 return err ?: len; 6067 } 6068 6069 static struct md_sysfs_entry 6070 raid5_skip_copy = __ATTR(skip_copy, S_IRUGO | S_IWUSR, 6071 raid5_show_skip_copy, 6072 raid5_store_skip_copy); 6073 6074 static ssize_t 6075 stripe_cache_active_show(struct mddev *mddev, char *page) 6076 { 6077 struct r5conf *conf = mddev->private; 6078 if (conf) 6079 return sprintf(page, "%d\n", atomic_read(&conf->active_stripes)); 6080 else 6081 return 0; 6082 } 6083 6084 static struct md_sysfs_entry 6085 raid5_stripecache_active = __ATTR_RO(stripe_cache_active); 6086 6087 static ssize_t 6088 raid5_show_group_thread_cnt(struct mddev *mddev, char *page) 6089 { 6090 struct r5conf *conf; 6091 int ret = 0; 6092 spin_lock(&mddev->lock); 6093 conf = mddev->private; 6094 if (conf) 6095 ret = sprintf(page, "%d\n", conf->worker_cnt_per_group); 6096 spin_unlock(&mddev->lock); 6097 return ret; 6098 } 6099 6100 static int alloc_thread_groups(struct r5conf *conf, int cnt, 6101 int *group_cnt, 6102 int *worker_cnt_per_group, 6103 struct r5worker_group **worker_groups); 6104 static ssize_t 6105 raid5_store_group_thread_cnt(struct mddev *mddev, const char *page, size_t len) 6106 { 6107 struct r5conf *conf; 6108 unsigned long new; 6109 int err; 6110 struct r5worker_group *new_groups, *old_groups; 6111 int group_cnt, worker_cnt_per_group; 6112 6113 if (len >= PAGE_SIZE) 6114 return -EINVAL; 6115 if (kstrtoul(page, 10, &new)) 6116 return -EINVAL; 6117 6118 err = mddev_lock(mddev); 6119 if (err) 6120 return err; 6121 conf = mddev->private; 6122 if (!conf) 6123 err = -ENODEV; 6124 else if (new != conf->worker_cnt_per_group) { 6125 mddev_suspend(mddev); 6126 6127 old_groups = conf->worker_groups; 6128 if (old_groups) 6129 flush_workqueue(raid5_wq); 6130 6131 err = alloc_thread_groups(conf, new, 6132 &group_cnt, &worker_cnt_per_group, 6133 &new_groups); 6134 if (!err) { 6135 spin_lock_irq(&conf->device_lock); 6136 conf->group_cnt = group_cnt; 6137 conf->worker_cnt_per_group = worker_cnt_per_group; 6138 conf->worker_groups = new_groups; 6139 spin_unlock_irq(&conf->device_lock); 6140 6141 if (old_groups) 6142 kfree(old_groups[0].workers); 6143 kfree(old_groups); 6144 } 6145 mddev_resume(mddev); 6146 } 6147 mddev_unlock(mddev); 6148 6149 return err ?: len; 6150 } 6151 6152 static struct md_sysfs_entry 6153 raid5_group_thread_cnt = __ATTR(group_thread_cnt, S_IRUGO | S_IWUSR, 6154 raid5_show_group_thread_cnt, 6155 raid5_store_group_thread_cnt); 6156 6157 static struct attribute *raid5_attrs[] = { 6158 &raid5_stripecache_size.attr, 6159 &raid5_stripecache_active.attr, 6160 &raid5_preread_bypass_threshold.attr, 6161 &raid5_group_thread_cnt.attr, 6162 &raid5_skip_copy.attr, 6163 &raid5_rmw_level.attr, 6164 NULL, 6165 }; 6166 static struct attribute_group raid5_attrs_group = { 6167 .name = NULL, 6168 .attrs = raid5_attrs, 6169 }; 6170 6171 static int alloc_thread_groups(struct r5conf *conf, int cnt, 6172 int *group_cnt, 6173 int *worker_cnt_per_group, 6174 struct r5worker_group **worker_groups) 6175 { 6176 int i, j, k; 6177 ssize_t size; 6178 struct r5worker *workers; 6179 6180 *worker_cnt_per_group = cnt; 6181 if (cnt == 0) { 6182 *group_cnt = 0; 6183 *worker_groups = NULL; 6184 return 0; 6185 } 6186 *group_cnt = num_possible_nodes(); 6187 size = sizeof(struct r5worker) * cnt; 6188 workers = kzalloc(size * *group_cnt, GFP_NOIO); 6189 *worker_groups = kzalloc(sizeof(struct r5worker_group) * 6190 *group_cnt, GFP_NOIO); 6191 if (!*worker_groups || !workers) { 6192 kfree(workers); 6193 kfree(*worker_groups); 6194 return -ENOMEM; 6195 } 6196 6197 for (i = 0; i < *group_cnt; i++) { 6198 struct r5worker_group *group; 6199 6200 group = &(*worker_groups)[i]; 6201 INIT_LIST_HEAD(&group->handle_list); 6202 group->conf = conf; 6203 group->workers = workers + i * cnt; 6204 6205 for (j = 0; j < cnt; j++) { 6206 struct r5worker *worker = group->workers + j; 6207 worker->group = group; 6208 INIT_WORK(&worker->work, raid5_do_work); 6209 6210 for (k = 0; k < NR_STRIPE_HASH_LOCKS; k++) 6211 INIT_LIST_HEAD(worker->temp_inactive_list + k); 6212 } 6213 } 6214 6215 return 0; 6216 } 6217 6218 static void free_thread_groups(struct r5conf *conf) 6219 { 6220 if (conf->worker_groups) 6221 kfree(conf->worker_groups[0].workers); 6222 kfree(conf->worker_groups); 6223 conf->worker_groups = NULL; 6224 } 6225 6226 static sector_t 6227 raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks) 6228 { 6229 struct r5conf *conf = mddev->private; 6230 6231 if (!sectors) 6232 sectors = mddev->dev_sectors; 6233 if (!raid_disks) 6234 /* size is defined by the smallest of previous and new size */ 6235 raid_disks = min(conf->raid_disks, conf->previous_raid_disks); 6236 6237 sectors &= ~((sector_t)mddev->chunk_sectors - 1); 6238 sectors &= ~((sector_t)mddev->new_chunk_sectors - 1); 6239 return sectors * (raid_disks - conf->max_degraded); 6240 } 6241 6242 static void free_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu) 6243 { 6244 safe_put_page(percpu->spare_page); 6245 if (percpu->scribble) 6246 flex_array_free(percpu->scribble); 6247 percpu->spare_page = NULL; 6248 percpu->scribble = NULL; 6249 } 6250 6251 static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu) 6252 { 6253 if (conf->level == 6 && !percpu->spare_page) 6254 percpu->spare_page = alloc_page(GFP_KERNEL); 6255 if (!percpu->scribble) 6256 percpu->scribble = scribble_alloc(max(conf->raid_disks, 6257 conf->previous_raid_disks), 6258 max(conf->chunk_sectors, 6259 conf->prev_chunk_sectors) 6260 / STRIPE_SECTORS, 6261 GFP_KERNEL); 6262 6263 if (!percpu->scribble || (conf->level == 6 && !percpu->spare_page)) { 6264 free_scratch_buffer(conf, percpu); 6265 return -ENOMEM; 6266 } 6267 6268 return 0; 6269 } 6270 6271 static void raid5_free_percpu(struct r5conf *conf) 6272 { 6273 unsigned long cpu; 6274 6275 if (!conf->percpu) 6276 return; 6277 6278 #ifdef CONFIG_HOTPLUG_CPU 6279 unregister_cpu_notifier(&conf->cpu_notify); 6280 #endif 6281 6282 get_online_cpus(); 6283 for_each_possible_cpu(cpu) 6284 free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); 6285 put_online_cpus(); 6286 6287 free_percpu(conf->percpu); 6288 } 6289 6290 static void free_conf(struct r5conf *conf) 6291 { 6292 if (conf->shrinker.seeks) 6293 unregister_shrinker(&conf->shrinker); 6294 free_thread_groups(conf); 6295 shrink_stripes(conf); 6296 raid5_free_percpu(conf); 6297 kfree(conf->disks); 6298 kfree(conf->stripe_hashtbl); 6299 kfree(conf); 6300 } 6301 6302 #ifdef CONFIG_HOTPLUG_CPU 6303 static int raid456_cpu_notify(struct notifier_block *nfb, unsigned long action, 6304 void *hcpu) 6305 { 6306 struct r5conf *conf = container_of(nfb, struct r5conf, cpu_notify); 6307 long cpu = (long)hcpu; 6308 struct raid5_percpu *percpu = per_cpu_ptr(conf->percpu, cpu); 6309 6310 switch (action) { 6311 case CPU_UP_PREPARE: 6312 case CPU_UP_PREPARE_FROZEN: 6313 if (alloc_scratch_buffer(conf, percpu)) { 6314 pr_err("%s: failed memory allocation for cpu%ld\n", 6315 __func__, cpu); 6316 return notifier_from_errno(-ENOMEM); 6317 } 6318 break; 6319 case CPU_DEAD: 6320 case CPU_DEAD_FROZEN: 6321 free_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); 6322 break; 6323 default: 6324 break; 6325 } 6326 return NOTIFY_OK; 6327 } 6328 #endif 6329 6330 static int raid5_alloc_percpu(struct r5conf *conf) 6331 { 6332 unsigned long cpu; 6333 int err = 0; 6334 6335 conf->percpu = alloc_percpu(struct raid5_percpu); 6336 if (!conf->percpu) 6337 return -ENOMEM; 6338 6339 #ifdef CONFIG_HOTPLUG_CPU 6340 conf->cpu_notify.notifier_call = raid456_cpu_notify; 6341 conf->cpu_notify.priority = 0; 6342 err = register_cpu_notifier(&conf->cpu_notify); 6343 if (err) 6344 return err; 6345 #endif 6346 6347 get_online_cpus(); 6348 for_each_present_cpu(cpu) { 6349 err = alloc_scratch_buffer(conf, per_cpu_ptr(conf->percpu, cpu)); 6350 if (err) { 6351 pr_err("%s: failed memory allocation for cpu%ld\n", 6352 __func__, cpu); 6353 break; 6354 } 6355 } 6356 put_online_cpus(); 6357 6358 return err; 6359 } 6360 6361 static unsigned long raid5_cache_scan(struct shrinker *shrink, 6362 struct shrink_control *sc) 6363 { 6364 struct r5conf *conf = container_of(shrink, struct r5conf, shrinker); 6365 unsigned long ret = SHRINK_STOP; 6366 6367 if (mutex_trylock(&conf->cache_size_mutex)) { 6368 ret= 0; 6369 while (ret < sc->nr_to_scan && 6370 conf->max_nr_stripes > conf->min_nr_stripes) { 6371 if (drop_one_stripe(conf) == 0) { 6372 ret = SHRINK_STOP; 6373 break; 6374 } 6375 ret++; 6376 } 6377 mutex_unlock(&conf->cache_size_mutex); 6378 } 6379 return ret; 6380 } 6381 6382 static unsigned long raid5_cache_count(struct shrinker *shrink, 6383 struct shrink_control *sc) 6384 { 6385 struct r5conf *conf = container_of(shrink, struct r5conf, shrinker); 6386 6387 if (conf->max_nr_stripes < conf->min_nr_stripes) 6388 /* unlikely, but not impossible */ 6389 return 0; 6390 return conf->max_nr_stripes - conf->min_nr_stripes; 6391 } 6392 6393 static struct r5conf *setup_conf(struct mddev *mddev) 6394 { 6395 struct r5conf *conf; 6396 int raid_disk, memory, max_disks; 6397 struct md_rdev *rdev; 6398 struct disk_info *disk; 6399 char pers_name[6]; 6400 int i; 6401 int group_cnt, worker_cnt_per_group; 6402 struct r5worker_group *new_group; 6403 6404 if (mddev->new_level != 5 6405 && mddev->new_level != 4 6406 && mddev->new_level != 6) { 6407 printk(KERN_ERR "md/raid:%s: raid level not set to 4/5/6 (%d)\n", 6408 mdname(mddev), mddev->new_level); 6409 return ERR_PTR(-EIO); 6410 } 6411 if ((mddev->new_level == 5 6412 && !algorithm_valid_raid5(mddev->new_layout)) || 6413 (mddev->new_level == 6 6414 && !algorithm_valid_raid6(mddev->new_layout))) { 6415 printk(KERN_ERR "md/raid:%s: layout %d not supported\n", 6416 mdname(mddev), mddev->new_layout); 6417 return ERR_PTR(-EIO); 6418 } 6419 if (mddev->new_level == 6 && mddev->raid_disks < 4) { 6420 printk(KERN_ERR "md/raid:%s: not enough configured devices (%d, minimum 4)\n", 6421 mdname(mddev), mddev->raid_disks); 6422 return ERR_PTR(-EINVAL); 6423 } 6424 6425 if (!mddev->new_chunk_sectors || 6426 (mddev->new_chunk_sectors << 9) % PAGE_SIZE || 6427 !is_power_of_2(mddev->new_chunk_sectors)) { 6428 printk(KERN_ERR "md/raid:%s: invalid chunk size %d\n", 6429 mdname(mddev), mddev->new_chunk_sectors << 9); 6430 return ERR_PTR(-EINVAL); 6431 } 6432 6433 conf = kzalloc(sizeof(struct r5conf), GFP_KERNEL); 6434 if (conf == NULL) 6435 goto abort; 6436 /* Don't enable multi-threading by default*/ 6437 if (!alloc_thread_groups(conf, 0, &group_cnt, &worker_cnt_per_group, 6438 &new_group)) { 6439 conf->group_cnt = group_cnt; 6440 conf->worker_cnt_per_group = worker_cnt_per_group; 6441 conf->worker_groups = new_group; 6442 } else 6443 goto abort; 6444 spin_lock_init(&conf->device_lock); 6445 seqcount_init(&conf->gen_lock); 6446 mutex_init(&conf->cache_size_mutex); 6447 init_waitqueue_head(&conf->wait_for_quiescent); 6448 for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) { 6449 init_waitqueue_head(&conf->wait_for_stripe[i]); 6450 } 6451 init_waitqueue_head(&conf->wait_for_overlap); 6452 INIT_LIST_HEAD(&conf->handle_list); 6453 INIT_LIST_HEAD(&conf->hold_list); 6454 INIT_LIST_HEAD(&conf->delayed_list); 6455 INIT_LIST_HEAD(&conf->bitmap_list); 6456 init_llist_head(&conf->released_stripes); 6457 atomic_set(&conf->active_stripes, 0); 6458 atomic_set(&conf->preread_active_stripes, 0); 6459 atomic_set(&conf->active_aligned_reads, 0); 6460 conf->bypass_threshold = BYPASS_THRESHOLD; 6461 conf->recovery_disabled = mddev->recovery_disabled - 1; 6462 6463 conf->raid_disks = mddev->raid_disks; 6464 if (mddev->reshape_position == MaxSector) 6465 conf->previous_raid_disks = mddev->raid_disks; 6466 else 6467 conf->previous_raid_disks = mddev->raid_disks - mddev->delta_disks; 6468 max_disks = max(conf->raid_disks, conf->previous_raid_disks); 6469 6470 conf->disks = kzalloc(max_disks * sizeof(struct disk_info), 6471 GFP_KERNEL); 6472 if (!conf->disks) 6473 goto abort; 6474 6475 conf->mddev = mddev; 6476 6477 if ((conf->stripe_hashtbl = kzalloc(PAGE_SIZE, GFP_KERNEL)) == NULL) 6478 goto abort; 6479 6480 /* We init hash_locks[0] separately to that it can be used 6481 * as the reference lock in the spin_lock_nest_lock() call 6482 * in lock_all_device_hash_locks_irq in order to convince 6483 * lockdep that we know what we are doing. 6484 */ 6485 spin_lock_init(conf->hash_locks); 6486 for (i = 1; i < NR_STRIPE_HASH_LOCKS; i++) 6487 spin_lock_init(conf->hash_locks + i); 6488 6489 for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) 6490 INIT_LIST_HEAD(conf->inactive_list + i); 6491 6492 for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) 6493 INIT_LIST_HEAD(conf->temp_inactive_list + i); 6494 6495 conf->level = mddev->new_level; 6496 conf->chunk_sectors = mddev->new_chunk_sectors; 6497 if (raid5_alloc_percpu(conf) != 0) 6498 goto abort; 6499 6500 pr_debug("raid456: run(%s) called.\n", mdname(mddev)); 6501 6502 rdev_for_each(rdev, mddev) { 6503 raid_disk = rdev->raid_disk; 6504 if (raid_disk >= max_disks 6505 || raid_disk < 0) 6506 continue; 6507 disk = conf->disks + raid_disk; 6508 6509 if (test_bit(Replacement, &rdev->flags)) { 6510 if (disk->replacement) 6511 goto abort; 6512 disk->replacement = rdev; 6513 } else { 6514 if (disk->rdev) 6515 goto abort; 6516 disk->rdev = rdev; 6517 } 6518 6519 if (test_bit(In_sync, &rdev->flags)) { 6520 char b[BDEVNAME_SIZE]; 6521 printk(KERN_INFO "md/raid:%s: device %s operational as raid" 6522 " disk %d\n", 6523 mdname(mddev), bdevname(rdev->bdev, b), raid_disk); 6524 } else if (rdev->saved_raid_disk != raid_disk) 6525 /* Cannot rely on bitmap to complete recovery */ 6526 conf->fullsync = 1; 6527 } 6528 6529 conf->level = mddev->new_level; 6530 if (conf->level == 6) { 6531 conf->max_degraded = 2; 6532 if (raid6_call.xor_syndrome) 6533 conf->rmw_level = PARITY_ENABLE_RMW; 6534 else 6535 conf->rmw_level = PARITY_DISABLE_RMW; 6536 } else { 6537 conf->max_degraded = 1; 6538 conf->rmw_level = PARITY_ENABLE_RMW; 6539 } 6540 conf->algorithm = mddev->new_layout; 6541 conf->reshape_progress = mddev->reshape_position; 6542 if (conf->reshape_progress != MaxSector) { 6543 conf->prev_chunk_sectors = mddev->chunk_sectors; 6544 conf->prev_algo = mddev->layout; 6545 } 6546 6547 conf->min_nr_stripes = NR_STRIPES; 6548 memory = conf->min_nr_stripes * (sizeof(struct stripe_head) + 6549 max_disks * ((sizeof(struct bio) + PAGE_SIZE))) / 1024; 6550 atomic_set(&conf->empty_inactive_list_nr, NR_STRIPE_HASH_LOCKS); 6551 if (grow_stripes(conf, conf->min_nr_stripes)) { 6552 printk(KERN_ERR 6553 "md/raid:%s: couldn't allocate %dkB for buffers\n", 6554 mdname(mddev), memory); 6555 goto abort; 6556 } else 6557 printk(KERN_INFO "md/raid:%s: allocated %dkB\n", 6558 mdname(mddev), memory); 6559 /* 6560 * Losing a stripe head costs more than the time to refill it, 6561 * it reduces the queue depth and so can hurt throughput. 6562 * So set it rather large, scaled by number of devices. 6563 */ 6564 conf->shrinker.seeks = DEFAULT_SEEKS * conf->raid_disks * 4; 6565 conf->shrinker.scan_objects = raid5_cache_scan; 6566 conf->shrinker.count_objects = raid5_cache_count; 6567 conf->shrinker.batch = 128; 6568 conf->shrinker.flags = 0; 6569 register_shrinker(&conf->shrinker); 6570 6571 sprintf(pers_name, "raid%d", mddev->new_level); 6572 conf->thread = md_register_thread(raid5d, mddev, pers_name); 6573 if (!conf->thread) { 6574 printk(KERN_ERR 6575 "md/raid:%s: couldn't allocate thread.\n", 6576 mdname(mddev)); 6577 goto abort; 6578 } 6579 6580 return conf; 6581 6582 abort: 6583 if (conf) { 6584 free_conf(conf); 6585 return ERR_PTR(-EIO); 6586 } else 6587 return ERR_PTR(-ENOMEM); 6588 } 6589 6590 static int only_parity(int raid_disk, int algo, int raid_disks, int max_degraded) 6591 { 6592 switch (algo) { 6593 case ALGORITHM_PARITY_0: 6594 if (raid_disk < max_degraded) 6595 return 1; 6596 break; 6597 case ALGORITHM_PARITY_N: 6598 if (raid_disk >= raid_disks - max_degraded) 6599 return 1; 6600 break; 6601 case ALGORITHM_PARITY_0_6: 6602 if (raid_disk == 0 || 6603 raid_disk == raid_disks - 1) 6604 return 1; 6605 break; 6606 case ALGORITHM_LEFT_ASYMMETRIC_6: 6607 case ALGORITHM_RIGHT_ASYMMETRIC_6: 6608 case ALGORITHM_LEFT_SYMMETRIC_6: 6609 case ALGORITHM_RIGHT_SYMMETRIC_6: 6610 if (raid_disk == raid_disks - 1) 6611 return 1; 6612 } 6613 return 0; 6614 } 6615 6616 static int run(struct mddev *mddev) 6617 { 6618 struct r5conf *conf; 6619 int working_disks = 0; 6620 int dirty_parity_disks = 0; 6621 struct md_rdev *rdev; 6622 sector_t reshape_offset = 0; 6623 int i; 6624 long long min_offset_diff = 0; 6625 int first = 1; 6626 6627 if (mddev->recovery_cp != MaxSector) 6628 printk(KERN_NOTICE "md/raid:%s: not clean" 6629 " -- starting background reconstruction\n", 6630 mdname(mddev)); 6631 6632 rdev_for_each(rdev, mddev) { 6633 long long diff; 6634 if (rdev->raid_disk < 0) 6635 continue; 6636 diff = (rdev->new_data_offset - rdev->data_offset); 6637 if (first) { 6638 min_offset_diff = diff; 6639 first = 0; 6640 } else if (mddev->reshape_backwards && 6641 diff < min_offset_diff) 6642 min_offset_diff = diff; 6643 else if (!mddev->reshape_backwards && 6644 diff > min_offset_diff) 6645 min_offset_diff = diff; 6646 } 6647 6648 if (mddev->reshape_position != MaxSector) { 6649 /* Check that we can continue the reshape. 6650 * Difficulties arise if the stripe we would write to 6651 * next is at or after the stripe we would read from next. 6652 * For a reshape that changes the number of devices, this 6653 * is only possible for a very short time, and mdadm makes 6654 * sure that time appears to have past before assembling 6655 * the array. So we fail if that time hasn't passed. 6656 * For a reshape that keeps the number of devices the same 6657 * mdadm must be monitoring the reshape can keeping the 6658 * critical areas read-only and backed up. It will start 6659 * the array in read-only mode, so we check for that. 6660 */ 6661 sector_t here_new, here_old; 6662 int old_disks; 6663 int max_degraded = (mddev->level == 6 ? 2 : 1); 6664 6665 if (mddev->new_level != mddev->level) { 6666 printk(KERN_ERR "md/raid:%s: unsupported reshape " 6667 "required - aborting.\n", 6668 mdname(mddev)); 6669 return -EINVAL; 6670 } 6671 old_disks = mddev->raid_disks - mddev->delta_disks; 6672 /* reshape_position must be on a new-stripe boundary, and one 6673 * further up in new geometry must map after here in old 6674 * geometry. 6675 */ 6676 here_new = mddev->reshape_position; 6677 if (sector_div(here_new, mddev->new_chunk_sectors * 6678 (mddev->raid_disks - max_degraded))) { 6679 printk(KERN_ERR "md/raid:%s: reshape_position not " 6680 "on a stripe boundary\n", mdname(mddev)); 6681 return -EINVAL; 6682 } 6683 reshape_offset = here_new * mddev->new_chunk_sectors; 6684 /* here_new is the stripe we will write to */ 6685 here_old = mddev->reshape_position; 6686 sector_div(here_old, mddev->chunk_sectors * 6687 (old_disks-max_degraded)); 6688 /* here_old is the first stripe that we might need to read 6689 * from */ 6690 if (mddev->delta_disks == 0) { 6691 if ((here_new * mddev->new_chunk_sectors != 6692 here_old * mddev->chunk_sectors)) { 6693 printk(KERN_ERR "md/raid:%s: reshape position is" 6694 " confused - aborting\n", mdname(mddev)); 6695 return -EINVAL; 6696 } 6697 /* We cannot be sure it is safe to start an in-place 6698 * reshape. It is only safe if user-space is monitoring 6699 * and taking constant backups. 6700 * mdadm always starts a situation like this in 6701 * readonly mode so it can take control before 6702 * allowing any writes. So just check for that. 6703 */ 6704 if (abs(min_offset_diff) >= mddev->chunk_sectors && 6705 abs(min_offset_diff) >= mddev->new_chunk_sectors) 6706 /* not really in-place - so OK */; 6707 else if (mddev->ro == 0) { 6708 printk(KERN_ERR "md/raid:%s: in-place reshape " 6709 "must be started in read-only mode " 6710 "- aborting\n", 6711 mdname(mddev)); 6712 return -EINVAL; 6713 } 6714 } else if (mddev->reshape_backwards 6715 ? (here_new * mddev->new_chunk_sectors + min_offset_diff <= 6716 here_old * mddev->chunk_sectors) 6717 : (here_new * mddev->new_chunk_sectors >= 6718 here_old * mddev->chunk_sectors + (-min_offset_diff))) { 6719 /* Reading from the same stripe as writing to - bad */ 6720 printk(KERN_ERR "md/raid:%s: reshape_position too early for " 6721 "auto-recovery - aborting.\n", 6722 mdname(mddev)); 6723 return -EINVAL; 6724 } 6725 printk(KERN_INFO "md/raid:%s: reshape will continue\n", 6726 mdname(mddev)); 6727 /* OK, we should be able to continue; */ 6728 } else { 6729 BUG_ON(mddev->level != mddev->new_level); 6730 BUG_ON(mddev->layout != mddev->new_layout); 6731 BUG_ON(mddev->chunk_sectors != mddev->new_chunk_sectors); 6732 BUG_ON(mddev->delta_disks != 0); 6733 } 6734 6735 if (mddev->private == NULL) 6736 conf = setup_conf(mddev); 6737 else 6738 conf = mddev->private; 6739 6740 if (IS_ERR(conf)) 6741 return PTR_ERR(conf); 6742 6743 conf->min_offset_diff = min_offset_diff; 6744 mddev->thread = conf->thread; 6745 conf->thread = NULL; 6746 mddev->private = conf; 6747 6748 for (i = 0; i < conf->raid_disks && conf->previous_raid_disks; 6749 i++) { 6750 rdev = conf->disks[i].rdev; 6751 if (!rdev && conf->disks[i].replacement) { 6752 /* The replacement is all we have yet */ 6753 rdev = conf->disks[i].replacement; 6754 conf->disks[i].replacement = NULL; 6755 clear_bit(Replacement, &rdev->flags); 6756 conf->disks[i].rdev = rdev; 6757 } 6758 if (!rdev) 6759 continue; 6760 if (conf->disks[i].replacement && 6761 conf->reshape_progress != MaxSector) { 6762 /* replacements and reshape simply do not mix. */ 6763 printk(KERN_ERR "md: cannot handle concurrent " 6764 "replacement and reshape.\n"); 6765 goto abort; 6766 } 6767 if (test_bit(In_sync, &rdev->flags)) { 6768 working_disks++; 6769 continue; 6770 } 6771 /* This disc is not fully in-sync. However if it 6772 * just stored parity (beyond the recovery_offset), 6773 * when we don't need to be concerned about the 6774 * array being dirty. 6775 * When reshape goes 'backwards', we never have 6776 * partially completed devices, so we only need 6777 * to worry about reshape going forwards. 6778 */ 6779 /* Hack because v0.91 doesn't store recovery_offset properly. */ 6780 if (mddev->major_version == 0 && 6781 mddev->minor_version > 90) 6782 rdev->recovery_offset = reshape_offset; 6783 6784 if (rdev->recovery_offset < reshape_offset) { 6785 /* We need to check old and new layout */ 6786 if (!only_parity(rdev->raid_disk, 6787 conf->algorithm, 6788 conf->raid_disks, 6789 conf->max_degraded)) 6790 continue; 6791 } 6792 if (!only_parity(rdev->raid_disk, 6793 conf->prev_algo, 6794 conf->previous_raid_disks, 6795 conf->max_degraded)) 6796 continue; 6797 dirty_parity_disks++; 6798 } 6799 6800 /* 6801 * 0 for a fully functional array, 1 or 2 for a degraded array. 6802 */ 6803 mddev->degraded = calc_degraded(conf); 6804 6805 if (has_failed(conf)) { 6806 printk(KERN_ERR "md/raid:%s: not enough operational devices" 6807 " (%d/%d failed)\n", 6808 mdname(mddev), mddev->degraded, conf->raid_disks); 6809 goto abort; 6810 } 6811 6812 /* device size must be a multiple of chunk size */ 6813 mddev->dev_sectors &= ~(mddev->chunk_sectors - 1); 6814 mddev->resync_max_sectors = mddev->dev_sectors; 6815 6816 if (mddev->degraded > dirty_parity_disks && 6817 mddev->recovery_cp != MaxSector) { 6818 if (mddev->ok_start_degraded) 6819 printk(KERN_WARNING 6820 "md/raid:%s: starting dirty degraded array" 6821 " - data corruption possible.\n", 6822 mdname(mddev)); 6823 else { 6824 printk(KERN_ERR 6825 "md/raid:%s: cannot start dirty degraded array.\n", 6826 mdname(mddev)); 6827 goto abort; 6828 } 6829 } 6830 6831 if (mddev->degraded == 0) 6832 printk(KERN_INFO "md/raid:%s: raid level %d active with %d out of %d" 6833 " devices, algorithm %d\n", mdname(mddev), conf->level, 6834 mddev->raid_disks-mddev->degraded, mddev->raid_disks, 6835 mddev->new_layout); 6836 else 6837 printk(KERN_ALERT "md/raid:%s: raid level %d active with %d" 6838 " out of %d devices, algorithm %d\n", 6839 mdname(mddev), conf->level, 6840 mddev->raid_disks - mddev->degraded, 6841 mddev->raid_disks, mddev->new_layout); 6842 6843 print_raid5_conf(conf); 6844 6845 if (conf->reshape_progress != MaxSector) { 6846 conf->reshape_safe = conf->reshape_progress; 6847 atomic_set(&conf->reshape_stripes, 0); 6848 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 6849 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 6850 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 6851 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 6852 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 6853 "reshape"); 6854 } 6855 6856 /* Ok, everything is just fine now */ 6857 if (mddev->to_remove == &raid5_attrs_group) 6858 mddev->to_remove = NULL; 6859 else if (mddev->kobj.sd && 6860 sysfs_create_group(&mddev->kobj, &raid5_attrs_group)) 6861 printk(KERN_WARNING 6862 "raid5: failed to create sysfs attributes for %s\n", 6863 mdname(mddev)); 6864 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); 6865 6866 if (mddev->queue) { 6867 int chunk_size; 6868 bool discard_supported = true; 6869 /* read-ahead size must cover two whole stripes, which 6870 * is 2 * (datadisks) * chunksize where 'n' is the 6871 * number of raid devices 6872 */ 6873 int data_disks = conf->previous_raid_disks - conf->max_degraded; 6874 int stripe = data_disks * 6875 ((mddev->chunk_sectors << 9) / PAGE_SIZE); 6876 if (mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 6877 mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 6878 6879 chunk_size = mddev->chunk_sectors << 9; 6880 blk_queue_io_min(mddev->queue, chunk_size); 6881 blk_queue_io_opt(mddev->queue, chunk_size * 6882 (conf->raid_disks - conf->max_degraded)); 6883 mddev->queue->limits.raid_partial_stripes_expensive = 1; 6884 /* 6885 * We can only discard a whole stripe. It doesn't make sense to 6886 * discard data disk but write parity disk 6887 */ 6888 stripe = stripe * PAGE_SIZE; 6889 /* Round up to power of 2, as discard handling 6890 * currently assumes that */ 6891 while ((stripe-1) & stripe) 6892 stripe = (stripe | (stripe-1)) + 1; 6893 mddev->queue->limits.discard_alignment = stripe; 6894 mddev->queue->limits.discard_granularity = stripe; 6895 /* 6896 * unaligned part of discard request will be ignored, so can't 6897 * guarantee discard_zeroes_data 6898 */ 6899 mddev->queue->limits.discard_zeroes_data = 0; 6900 6901 blk_queue_max_write_same_sectors(mddev->queue, 0); 6902 6903 rdev_for_each(rdev, mddev) { 6904 disk_stack_limits(mddev->gendisk, rdev->bdev, 6905 rdev->data_offset << 9); 6906 disk_stack_limits(mddev->gendisk, rdev->bdev, 6907 rdev->new_data_offset << 9); 6908 /* 6909 * discard_zeroes_data is required, otherwise data 6910 * could be lost. Consider a scenario: discard a stripe 6911 * (the stripe could be inconsistent if 6912 * discard_zeroes_data is 0); write one disk of the 6913 * stripe (the stripe could be inconsistent again 6914 * depending on which disks are used to calculate 6915 * parity); the disk is broken; The stripe data of this 6916 * disk is lost. 6917 */ 6918 if (!blk_queue_discard(bdev_get_queue(rdev->bdev)) || 6919 !bdev_get_queue(rdev->bdev)-> 6920 limits.discard_zeroes_data) 6921 discard_supported = false; 6922 /* Unfortunately, discard_zeroes_data is not currently 6923 * a guarantee - just a hint. So we only allow DISCARD 6924 * if the sysadmin has confirmed that only safe devices 6925 * are in use by setting a module parameter. 6926 */ 6927 if (!devices_handle_discard_safely) { 6928 if (discard_supported) { 6929 pr_info("md/raid456: discard support disabled due to uncertainty.\n"); 6930 pr_info("Set raid456.devices_handle_discard_safely=Y to override.\n"); 6931 } 6932 discard_supported = false; 6933 } 6934 } 6935 6936 if (discard_supported && 6937 mddev->queue->limits.max_discard_sectors >= stripe && 6938 mddev->queue->limits.discard_granularity >= stripe) 6939 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, 6940 mddev->queue); 6941 else 6942 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, 6943 mddev->queue); 6944 } 6945 6946 return 0; 6947 abort: 6948 md_unregister_thread(&mddev->thread); 6949 print_raid5_conf(conf); 6950 free_conf(conf); 6951 mddev->private = NULL; 6952 printk(KERN_ALERT "md/raid:%s: failed to run raid set.\n", mdname(mddev)); 6953 return -EIO; 6954 } 6955 6956 static void raid5_free(struct mddev *mddev, void *priv) 6957 { 6958 struct r5conf *conf = priv; 6959 6960 free_conf(conf); 6961 mddev->to_remove = &raid5_attrs_group; 6962 } 6963 6964 static void status(struct seq_file *seq, struct mddev *mddev) 6965 { 6966 struct r5conf *conf = mddev->private; 6967 int i; 6968 6969 seq_printf(seq, " level %d, %dk chunk, algorithm %d", mddev->level, 6970 mddev->chunk_sectors / 2, mddev->layout); 6971 seq_printf (seq, " [%d/%d] [", conf->raid_disks, conf->raid_disks - mddev->degraded); 6972 for (i = 0; i < conf->raid_disks; i++) 6973 seq_printf (seq, "%s", 6974 conf->disks[i].rdev && 6975 test_bit(In_sync, &conf->disks[i].rdev->flags) ? "U" : "_"); 6976 seq_printf (seq, "]"); 6977 } 6978 6979 static void print_raid5_conf (struct r5conf *conf) 6980 { 6981 int i; 6982 struct disk_info *tmp; 6983 6984 printk(KERN_DEBUG "RAID conf printout:\n"); 6985 if (!conf) { 6986 printk("(conf==NULL)\n"); 6987 return; 6988 } 6989 printk(KERN_DEBUG " --- level:%d rd:%d wd:%d\n", conf->level, 6990 conf->raid_disks, 6991 conf->raid_disks - conf->mddev->degraded); 6992 6993 for (i = 0; i < conf->raid_disks; i++) { 6994 char b[BDEVNAME_SIZE]; 6995 tmp = conf->disks + i; 6996 if (tmp->rdev) 6997 printk(KERN_DEBUG " disk %d, o:%d, dev:%s\n", 6998 i, !test_bit(Faulty, &tmp->rdev->flags), 6999 bdevname(tmp->rdev->bdev, b)); 7000 } 7001 } 7002 7003 static int raid5_spare_active(struct mddev *mddev) 7004 { 7005 int i; 7006 struct r5conf *conf = mddev->private; 7007 struct disk_info *tmp; 7008 int count = 0; 7009 unsigned long flags; 7010 7011 for (i = 0; i < conf->raid_disks; i++) { 7012 tmp = conf->disks + i; 7013 if (tmp->replacement 7014 && tmp->replacement->recovery_offset == MaxSector 7015 && !test_bit(Faulty, &tmp->replacement->flags) 7016 && !test_and_set_bit(In_sync, &tmp->replacement->flags)) { 7017 /* Replacement has just become active. */ 7018 if (!tmp->rdev 7019 || !test_and_clear_bit(In_sync, &tmp->rdev->flags)) 7020 count++; 7021 if (tmp->rdev) { 7022 /* Replaced device not technically faulty, 7023 * but we need to be sure it gets removed 7024 * and never re-added. 7025 */ 7026 set_bit(Faulty, &tmp->rdev->flags); 7027 sysfs_notify_dirent_safe( 7028 tmp->rdev->sysfs_state); 7029 } 7030 sysfs_notify_dirent_safe(tmp->replacement->sysfs_state); 7031 } else if (tmp->rdev 7032 && tmp->rdev->recovery_offset == MaxSector 7033 && !test_bit(Faulty, &tmp->rdev->flags) 7034 && !test_and_set_bit(In_sync, &tmp->rdev->flags)) { 7035 count++; 7036 sysfs_notify_dirent_safe(tmp->rdev->sysfs_state); 7037 } 7038 } 7039 spin_lock_irqsave(&conf->device_lock, flags); 7040 mddev->degraded = calc_degraded(conf); 7041 spin_unlock_irqrestore(&conf->device_lock, flags); 7042 print_raid5_conf(conf); 7043 return count; 7044 } 7045 7046 static int raid5_remove_disk(struct mddev *mddev, struct md_rdev *rdev) 7047 { 7048 struct r5conf *conf = mddev->private; 7049 int err = 0; 7050 int number = rdev->raid_disk; 7051 struct md_rdev **rdevp; 7052 struct disk_info *p = conf->disks + number; 7053 7054 print_raid5_conf(conf); 7055 if (rdev == p->rdev) 7056 rdevp = &p->rdev; 7057 else if (rdev == p->replacement) 7058 rdevp = &p->replacement; 7059 else 7060 return 0; 7061 7062 if (number >= conf->raid_disks && 7063 conf->reshape_progress == MaxSector) 7064 clear_bit(In_sync, &rdev->flags); 7065 7066 if (test_bit(In_sync, &rdev->flags) || 7067 atomic_read(&rdev->nr_pending)) { 7068 err = -EBUSY; 7069 goto abort; 7070 } 7071 /* Only remove non-faulty devices if recovery 7072 * isn't possible. 7073 */ 7074 if (!test_bit(Faulty, &rdev->flags) && 7075 mddev->recovery_disabled != conf->recovery_disabled && 7076 !has_failed(conf) && 7077 (!p->replacement || p->replacement == rdev) && 7078 number < conf->raid_disks) { 7079 err = -EBUSY; 7080 goto abort; 7081 } 7082 *rdevp = NULL; 7083 synchronize_rcu(); 7084 if (atomic_read(&rdev->nr_pending)) { 7085 /* lost the race, try later */ 7086 err = -EBUSY; 7087 *rdevp = rdev; 7088 } else if (p->replacement) { 7089 /* We must have just cleared 'rdev' */ 7090 p->rdev = p->replacement; 7091 clear_bit(Replacement, &p->replacement->flags); 7092 smp_mb(); /* Make sure other CPUs may see both as identical 7093 * but will never see neither - if they are careful 7094 */ 7095 p->replacement = NULL; 7096 clear_bit(WantReplacement, &rdev->flags); 7097 } else 7098 /* We might have just removed the Replacement as faulty- 7099 * clear the bit just in case 7100 */ 7101 clear_bit(WantReplacement, &rdev->flags); 7102 abort: 7103 7104 print_raid5_conf(conf); 7105 return err; 7106 } 7107 7108 static int raid5_add_disk(struct mddev *mddev, struct md_rdev *rdev) 7109 { 7110 struct r5conf *conf = mddev->private; 7111 int err = -EEXIST; 7112 int disk; 7113 struct disk_info *p; 7114 int first = 0; 7115 int last = conf->raid_disks - 1; 7116 7117 if (mddev->recovery_disabled == conf->recovery_disabled) 7118 return -EBUSY; 7119 7120 if (rdev->saved_raid_disk < 0 && has_failed(conf)) 7121 /* no point adding a device */ 7122 return -EINVAL; 7123 7124 if (rdev->raid_disk >= 0) 7125 first = last = rdev->raid_disk; 7126 7127 /* 7128 * find the disk ... but prefer rdev->saved_raid_disk 7129 * if possible. 7130 */ 7131 if (rdev->saved_raid_disk >= 0 && 7132 rdev->saved_raid_disk >= first && 7133 conf->disks[rdev->saved_raid_disk].rdev == NULL) 7134 first = rdev->saved_raid_disk; 7135 7136 for (disk = first; disk <= last; disk++) { 7137 p = conf->disks + disk; 7138 if (p->rdev == NULL) { 7139 clear_bit(In_sync, &rdev->flags); 7140 rdev->raid_disk = disk; 7141 err = 0; 7142 if (rdev->saved_raid_disk != disk) 7143 conf->fullsync = 1; 7144 rcu_assign_pointer(p->rdev, rdev); 7145 goto out; 7146 } 7147 } 7148 for (disk = first; disk <= last; disk++) { 7149 p = conf->disks + disk; 7150 if (test_bit(WantReplacement, &p->rdev->flags) && 7151 p->replacement == NULL) { 7152 clear_bit(In_sync, &rdev->flags); 7153 set_bit(Replacement, &rdev->flags); 7154 rdev->raid_disk = disk; 7155 err = 0; 7156 conf->fullsync = 1; 7157 rcu_assign_pointer(p->replacement, rdev); 7158 break; 7159 } 7160 } 7161 out: 7162 print_raid5_conf(conf); 7163 return err; 7164 } 7165 7166 static int raid5_resize(struct mddev *mddev, sector_t sectors) 7167 { 7168 /* no resync is happening, and there is enough space 7169 * on all devices, so we can resize. 7170 * We need to make sure resync covers any new space. 7171 * If the array is shrinking we should possibly wait until 7172 * any io in the removed space completes, but it hardly seems 7173 * worth it. 7174 */ 7175 sector_t newsize; 7176 sectors &= ~((sector_t)mddev->chunk_sectors - 1); 7177 newsize = raid5_size(mddev, sectors, mddev->raid_disks); 7178 if (mddev->external_size && 7179 mddev->array_sectors > newsize) 7180 return -EINVAL; 7181 if (mddev->bitmap) { 7182 int ret = bitmap_resize(mddev->bitmap, sectors, 0, 0); 7183 if (ret) 7184 return ret; 7185 } 7186 md_set_array_sectors(mddev, newsize); 7187 set_capacity(mddev->gendisk, mddev->array_sectors); 7188 revalidate_disk(mddev->gendisk); 7189 if (sectors > mddev->dev_sectors && 7190 mddev->recovery_cp > mddev->dev_sectors) { 7191 mddev->recovery_cp = mddev->dev_sectors; 7192 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery); 7193 } 7194 mddev->dev_sectors = sectors; 7195 mddev->resync_max_sectors = sectors; 7196 return 0; 7197 } 7198 7199 static int check_stripe_cache(struct mddev *mddev) 7200 { 7201 /* Can only proceed if there are plenty of stripe_heads. 7202 * We need a minimum of one full stripe,, and for sensible progress 7203 * it is best to have about 4 times that. 7204 * If we require 4 times, then the default 256 4K stripe_heads will 7205 * allow for chunk sizes up to 256K, which is probably OK. 7206 * If the chunk size is greater, user-space should request more 7207 * stripe_heads first. 7208 */ 7209 struct r5conf *conf = mddev->private; 7210 if (((mddev->chunk_sectors << 9) / STRIPE_SIZE) * 4 7211 > conf->min_nr_stripes || 7212 ((mddev->new_chunk_sectors << 9) / STRIPE_SIZE) * 4 7213 > conf->min_nr_stripes) { 7214 printk(KERN_WARNING "md/raid:%s: reshape: not enough stripes. Needed %lu\n", 7215 mdname(mddev), 7216 ((max(mddev->chunk_sectors, mddev->new_chunk_sectors) << 9) 7217 / STRIPE_SIZE)*4); 7218 return 0; 7219 } 7220 return 1; 7221 } 7222 7223 static int check_reshape(struct mddev *mddev) 7224 { 7225 struct r5conf *conf = mddev->private; 7226 7227 if (mddev->delta_disks == 0 && 7228 mddev->new_layout == mddev->layout && 7229 mddev->new_chunk_sectors == mddev->chunk_sectors) 7230 return 0; /* nothing to do */ 7231 if (has_failed(conf)) 7232 return -EINVAL; 7233 if (mddev->delta_disks < 0 && mddev->reshape_position == MaxSector) { 7234 /* We might be able to shrink, but the devices must 7235 * be made bigger first. 7236 * For raid6, 4 is the minimum size. 7237 * Otherwise 2 is the minimum 7238 */ 7239 int min = 2; 7240 if (mddev->level == 6) 7241 min = 4; 7242 if (mddev->raid_disks + mddev->delta_disks < min) 7243 return -EINVAL; 7244 } 7245 7246 if (!check_stripe_cache(mddev)) 7247 return -ENOSPC; 7248 7249 if (mddev->new_chunk_sectors > mddev->chunk_sectors || 7250 mddev->delta_disks > 0) 7251 if (resize_chunks(conf, 7252 conf->previous_raid_disks 7253 + max(0, mddev->delta_disks), 7254 max(mddev->new_chunk_sectors, 7255 mddev->chunk_sectors) 7256 ) < 0) 7257 return -ENOMEM; 7258 return resize_stripes(conf, (conf->previous_raid_disks 7259 + mddev->delta_disks)); 7260 } 7261 7262 static int raid5_start_reshape(struct mddev *mddev) 7263 { 7264 struct r5conf *conf = mddev->private; 7265 struct md_rdev *rdev; 7266 int spares = 0; 7267 unsigned long flags; 7268 7269 if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery)) 7270 return -EBUSY; 7271 7272 if (!check_stripe_cache(mddev)) 7273 return -ENOSPC; 7274 7275 if (has_failed(conf)) 7276 return -EINVAL; 7277 7278 rdev_for_each(rdev, mddev) { 7279 if (!test_bit(In_sync, &rdev->flags) 7280 && !test_bit(Faulty, &rdev->flags)) 7281 spares++; 7282 } 7283 7284 if (spares - mddev->degraded < mddev->delta_disks - conf->max_degraded) 7285 /* Not enough devices even to make a degraded array 7286 * of that size 7287 */ 7288 return -EINVAL; 7289 7290 /* Refuse to reduce size of the array. Any reductions in 7291 * array size must be through explicit setting of array_size 7292 * attribute. 7293 */ 7294 if (raid5_size(mddev, 0, conf->raid_disks + mddev->delta_disks) 7295 < mddev->array_sectors) { 7296 printk(KERN_ERR "md/raid:%s: array size must be reduced " 7297 "before number of disks\n", mdname(mddev)); 7298 return -EINVAL; 7299 } 7300 7301 atomic_set(&conf->reshape_stripes, 0); 7302 spin_lock_irq(&conf->device_lock); 7303 write_seqcount_begin(&conf->gen_lock); 7304 conf->previous_raid_disks = conf->raid_disks; 7305 conf->raid_disks += mddev->delta_disks; 7306 conf->prev_chunk_sectors = conf->chunk_sectors; 7307 conf->chunk_sectors = mddev->new_chunk_sectors; 7308 conf->prev_algo = conf->algorithm; 7309 conf->algorithm = mddev->new_layout; 7310 conf->generation++; 7311 /* Code that selects data_offset needs to see the generation update 7312 * if reshape_progress has been set - so a memory barrier needed. 7313 */ 7314 smp_mb(); 7315 if (mddev->reshape_backwards) 7316 conf->reshape_progress = raid5_size(mddev, 0, 0); 7317 else 7318 conf->reshape_progress = 0; 7319 conf->reshape_safe = conf->reshape_progress; 7320 write_seqcount_end(&conf->gen_lock); 7321 spin_unlock_irq(&conf->device_lock); 7322 7323 /* Now make sure any requests that proceeded on the assumption 7324 * the reshape wasn't running - like Discard or Read - have 7325 * completed. 7326 */ 7327 mddev_suspend(mddev); 7328 mddev_resume(mddev); 7329 7330 /* Add some new drives, as many as will fit. 7331 * We know there are enough to make the newly sized array work. 7332 * Don't add devices if we are reducing the number of 7333 * devices in the array. This is because it is not possible 7334 * to correctly record the "partially reconstructed" state of 7335 * such devices during the reshape and confusion could result. 7336 */ 7337 if (mddev->delta_disks >= 0) { 7338 rdev_for_each(rdev, mddev) 7339 if (rdev->raid_disk < 0 && 7340 !test_bit(Faulty, &rdev->flags)) { 7341 if (raid5_add_disk(mddev, rdev) == 0) { 7342 if (rdev->raid_disk 7343 >= conf->previous_raid_disks) 7344 set_bit(In_sync, &rdev->flags); 7345 else 7346 rdev->recovery_offset = 0; 7347 7348 if (sysfs_link_rdev(mddev, rdev)) 7349 /* Failure here is OK */; 7350 } 7351 } else if (rdev->raid_disk >= conf->previous_raid_disks 7352 && !test_bit(Faulty, &rdev->flags)) { 7353 /* This is a spare that was manually added */ 7354 set_bit(In_sync, &rdev->flags); 7355 } 7356 7357 /* When a reshape changes the number of devices, 7358 * ->degraded is measured against the larger of the 7359 * pre and post number of devices. 7360 */ 7361 spin_lock_irqsave(&conf->device_lock, flags); 7362 mddev->degraded = calc_degraded(conf); 7363 spin_unlock_irqrestore(&conf->device_lock, flags); 7364 } 7365 mddev->raid_disks = conf->raid_disks; 7366 mddev->reshape_position = conf->reshape_progress; 7367 set_bit(MD_CHANGE_DEVS, &mddev->flags); 7368 7369 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 7370 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 7371 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); 7372 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 7373 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 7374 mddev->sync_thread = md_register_thread(md_do_sync, mddev, 7375 "reshape"); 7376 if (!mddev->sync_thread) { 7377 mddev->recovery = 0; 7378 spin_lock_irq(&conf->device_lock); 7379 write_seqcount_begin(&conf->gen_lock); 7380 mddev->raid_disks = conf->raid_disks = conf->previous_raid_disks; 7381 mddev->new_chunk_sectors = 7382 conf->chunk_sectors = conf->prev_chunk_sectors; 7383 mddev->new_layout = conf->algorithm = conf->prev_algo; 7384 rdev_for_each(rdev, mddev) 7385 rdev->new_data_offset = rdev->data_offset; 7386 smp_wmb(); 7387 conf->generation --; 7388 conf->reshape_progress = MaxSector; 7389 mddev->reshape_position = MaxSector; 7390 write_seqcount_end(&conf->gen_lock); 7391 spin_unlock_irq(&conf->device_lock); 7392 return -EAGAIN; 7393 } 7394 conf->reshape_checkpoint = jiffies; 7395 md_wakeup_thread(mddev->sync_thread); 7396 md_new_event(mddev); 7397 return 0; 7398 } 7399 7400 /* This is called from the reshape thread and should make any 7401 * changes needed in 'conf' 7402 */ 7403 static void end_reshape(struct r5conf *conf) 7404 { 7405 7406 if (!test_bit(MD_RECOVERY_INTR, &conf->mddev->recovery)) { 7407 struct md_rdev *rdev; 7408 7409 spin_lock_irq(&conf->device_lock); 7410 conf->previous_raid_disks = conf->raid_disks; 7411 rdev_for_each(rdev, conf->mddev) 7412 rdev->data_offset = rdev->new_data_offset; 7413 smp_wmb(); 7414 conf->reshape_progress = MaxSector; 7415 spin_unlock_irq(&conf->device_lock); 7416 wake_up(&conf->wait_for_overlap); 7417 7418 /* read-ahead size must cover two whole stripes, which is 7419 * 2 * (datadisks) * chunksize where 'n' is the number of raid devices 7420 */ 7421 if (conf->mddev->queue) { 7422 int data_disks = conf->raid_disks - conf->max_degraded; 7423 int stripe = data_disks * ((conf->chunk_sectors << 9) 7424 / PAGE_SIZE); 7425 if (conf->mddev->queue->backing_dev_info.ra_pages < 2 * stripe) 7426 conf->mddev->queue->backing_dev_info.ra_pages = 2 * stripe; 7427 } 7428 } 7429 } 7430 7431 /* This is called from the raid5d thread with mddev_lock held. 7432 * It makes config changes to the device. 7433 */ 7434 static void raid5_finish_reshape(struct mddev *mddev) 7435 { 7436 struct r5conf *conf = mddev->private; 7437 7438 if (!test_bit(MD_RECOVERY_INTR, &mddev->recovery)) { 7439 7440 if (mddev->delta_disks > 0) { 7441 md_set_array_sectors(mddev, raid5_size(mddev, 0, 0)); 7442 set_capacity(mddev->gendisk, mddev->array_sectors); 7443 revalidate_disk(mddev->gendisk); 7444 } else { 7445 int d; 7446 spin_lock_irq(&conf->device_lock); 7447 mddev->degraded = calc_degraded(conf); 7448 spin_unlock_irq(&conf->device_lock); 7449 for (d = conf->raid_disks ; 7450 d < conf->raid_disks - mddev->delta_disks; 7451 d++) { 7452 struct md_rdev *rdev = conf->disks[d].rdev; 7453 if (rdev) 7454 clear_bit(In_sync, &rdev->flags); 7455 rdev = conf->disks[d].replacement; 7456 if (rdev) 7457 clear_bit(In_sync, &rdev->flags); 7458 } 7459 } 7460 mddev->layout = conf->algorithm; 7461 mddev->chunk_sectors = conf->chunk_sectors; 7462 mddev->reshape_position = MaxSector; 7463 mddev->delta_disks = 0; 7464 mddev->reshape_backwards = 0; 7465 } 7466 } 7467 7468 static void raid5_quiesce(struct mddev *mddev, int state) 7469 { 7470 struct r5conf *conf = mddev->private; 7471 7472 switch(state) { 7473 case 2: /* resume for a suspend */ 7474 wake_up(&conf->wait_for_overlap); 7475 break; 7476 7477 case 1: /* stop all writes */ 7478 lock_all_device_hash_locks_irq(conf); 7479 /* '2' tells resync/reshape to pause so that all 7480 * active stripes can drain 7481 */ 7482 conf->quiesce = 2; 7483 wait_event_cmd(conf->wait_for_quiescent, 7484 atomic_read(&conf->active_stripes) == 0 && 7485 atomic_read(&conf->active_aligned_reads) == 0, 7486 unlock_all_device_hash_locks_irq(conf), 7487 lock_all_device_hash_locks_irq(conf)); 7488 conf->quiesce = 1; 7489 unlock_all_device_hash_locks_irq(conf); 7490 /* allow reshape to continue */ 7491 wake_up(&conf->wait_for_overlap); 7492 break; 7493 7494 case 0: /* re-enable writes */ 7495 lock_all_device_hash_locks_irq(conf); 7496 conf->quiesce = 0; 7497 wake_up(&conf->wait_for_quiescent); 7498 wake_up(&conf->wait_for_overlap); 7499 unlock_all_device_hash_locks_irq(conf); 7500 break; 7501 } 7502 } 7503 7504 static void *raid45_takeover_raid0(struct mddev *mddev, int level) 7505 { 7506 struct r0conf *raid0_conf = mddev->private; 7507 sector_t sectors; 7508 7509 /* for raid0 takeover only one zone is supported */ 7510 if (raid0_conf->nr_strip_zones > 1) { 7511 printk(KERN_ERR "md/raid:%s: cannot takeover raid0 with more than one zone.\n", 7512 mdname(mddev)); 7513 return ERR_PTR(-EINVAL); 7514 } 7515 7516 sectors = raid0_conf->strip_zone[0].zone_end; 7517 sector_div(sectors, raid0_conf->strip_zone[0].nb_dev); 7518 mddev->dev_sectors = sectors; 7519 mddev->new_level = level; 7520 mddev->new_layout = ALGORITHM_PARITY_N; 7521 mddev->new_chunk_sectors = mddev->chunk_sectors; 7522 mddev->raid_disks += 1; 7523 mddev->delta_disks = 1; 7524 /* make sure it will be not marked as dirty */ 7525 mddev->recovery_cp = MaxSector; 7526 7527 return setup_conf(mddev); 7528 } 7529 7530 static void *raid5_takeover_raid1(struct mddev *mddev) 7531 { 7532 int chunksect; 7533 7534 if (mddev->raid_disks != 2 || 7535 mddev->degraded > 1) 7536 return ERR_PTR(-EINVAL); 7537 7538 /* Should check if there are write-behind devices? */ 7539 7540 chunksect = 64*2; /* 64K by default */ 7541 7542 /* The array must be an exact multiple of chunksize */ 7543 while (chunksect && (mddev->array_sectors & (chunksect-1))) 7544 chunksect >>= 1; 7545 7546 if ((chunksect<<9) < STRIPE_SIZE) 7547 /* array size does not allow a suitable chunk size */ 7548 return ERR_PTR(-EINVAL); 7549 7550 mddev->new_level = 5; 7551 mddev->new_layout = ALGORITHM_LEFT_SYMMETRIC; 7552 mddev->new_chunk_sectors = chunksect; 7553 7554 return setup_conf(mddev); 7555 } 7556 7557 static void *raid5_takeover_raid6(struct mddev *mddev) 7558 { 7559 int new_layout; 7560 7561 switch (mddev->layout) { 7562 case ALGORITHM_LEFT_ASYMMETRIC_6: 7563 new_layout = ALGORITHM_LEFT_ASYMMETRIC; 7564 break; 7565 case ALGORITHM_RIGHT_ASYMMETRIC_6: 7566 new_layout = ALGORITHM_RIGHT_ASYMMETRIC; 7567 break; 7568 case ALGORITHM_LEFT_SYMMETRIC_6: 7569 new_layout = ALGORITHM_LEFT_SYMMETRIC; 7570 break; 7571 case ALGORITHM_RIGHT_SYMMETRIC_6: 7572 new_layout = ALGORITHM_RIGHT_SYMMETRIC; 7573 break; 7574 case ALGORITHM_PARITY_0_6: 7575 new_layout = ALGORITHM_PARITY_0; 7576 break; 7577 case ALGORITHM_PARITY_N: 7578 new_layout = ALGORITHM_PARITY_N; 7579 break; 7580 default: 7581 return ERR_PTR(-EINVAL); 7582 } 7583 mddev->new_level = 5; 7584 mddev->new_layout = new_layout; 7585 mddev->delta_disks = -1; 7586 mddev->raid_disks -= 1; 7587 return setup_conf(mddev); 7588 } 7589 7590 static int raid5_check_reshape(struct mddev *mddev) 7591 { 7592 /* For a 2-drive array, the layout and chunk size can be changed 7593 * immediately as not restriping is needed. 7594 * For larger arrays we record the new value - after validation 7595 * to be used by a reshape pass. 7596 */ 7597 struct r5conf *conf = mddev->private; 7598 int new_chunk = mddev->new_chunk_sectors; 7599 7600 if (mddev->new_layout >= 0 && !algorithm_valid_raid5(mddev->new_layout)) 7601 return -EINVAL; 7602 if (new_chunk > 0) { 7603 if (!is_power_of_2(new_chunk)) 7604 return -EINVAL; 7605 if (new_chunk < (PAGE_SIZE>>9)) 7606 return -EINVAL; 7607 if (mddev->array_sectors & (new_chunk-1)) 7608 /* not factor of array size */ 7609 return -EINVAL; 7610 } 7611 7612 /* They look valid */ 7613 7614 if (mddev->raid_disks == 2) { 7615 /* can make the change immediately */ 7616 if (mddev->new_layout >= 0) { 7617 conf->algorithm = mddev->new_layout; 7618 mddev->layout = mddev->new_layout; 7619 } 7620 if (new_chunk > 0) { 7621 conf->chunk_sectors = new_chunk ; 7622 mddev->chunk_sectors = new_chunk; 7623 } 7624 set_bit(MD_CHANGE_DEVS, &mddev->flags); 7625 md_wakeup_thread(mddev->thread); 7626 } 7627 return check_reshape(mddev); 7628 } 7629 7630 static int raid6_check_reshape(struct mddev *mddev) 7631 { 7632 int new_chunk = mddev->new_chunk_sectors; 7633 7634 if (mddev->new_layout >= 0 && !algorithm_valid_raid6(mddev->new_layout)) 7635 return -EINVAL; 7636 if (new_chunk > 0) { 7637 if (!is_power_of_2(new_chunk)) 7638 return -EINVAL; 7639 if (new_chunk < (PAGE_SIZE >> 9)) 7640 return -EINVAL; 7641 if (mddev->array_sectors & (new_chunk-1)) 7642 /* not factor of array size */ 7643 return -EINVAL; 7644 } 7645 7646 /* They look valid */ 7647 return check_reshape(mddev); 7648 } 7649 7650 static void *raid5_takeover(struct mddev *mddev) 7651 { 7652 /* raid5 can take over: 7653 * raid0 - if there is only one strip zone - make it a raid4 layout 7654 * raid1 - if there are two drives. We need to know the chunk size 7655 * raid4 - trivial - just use a raid4 layout. 7656 * raid6 - Providing it is a *_6 layout 7657 */ 7658 if (mddev->level == 0) 7659 return raid45_takeover_raid0(mddev, 5); 7660 if (mddev->level == 1) 7661 return raid5_takeover_raid1(mddev); 7662 if (mddev->level == 4) { 7663 mddev->new_layout = ALGORITHM_PARITY_N; 7664 mddev->new_level = 5; 7665 return setup_conf(mddev); 7666 } 7667 if (mddev->level == 6) 7668 return raid5_takeover_raid6(mddev); 7669 7670 return ERR_PTR(-EINVAL); 7671 } 7672 7673 static void *raid4_takeover(struct mddev *mddev) 7674 { 7675 /* raid4 can take over: 7676 * raid0 - if there is only one strip zone 7677 * raid5 - if layout is right 7678 */ 7679 if (mddev->level == 0) 7680 return raid45_takeover_raid0(mddev, 4); 7681 if (mddev->level == 5 && 7682 mddev->layout == ALGORITHM_PARITY_N) { 7683 mddev->new_layout = 0; 7684 mddev->new_level = 4; 7685 return setup_conf(mddev); 7686 } 7687 return ERR_PTR(-EINVAL); 7688 } 7689 7690 static struct md_personality raid5_personality; 7691 7692 static void *raid6_takeover(struct mddev *mddev) 7693 { 7694 /* Currently can only take over a raid5. We map the 7695 * personality to an equivalent raid6 personality 7696 * with the Q block at the end. 7697 */ 7698 int new_layout; 7699 7700 if (mddev->pers != &raid5_personality) 7701 return ERR_PTR(-EINVAL); 7702 if (mddev->degraded > 1) 7703 return ERR_PTR(-EINVAL); 7704 if (mddev->raid_disks > 253) 7705 return ERR_PTR(-EINVAL); 7706 if (mddev->raid_disks < 3) 7707 return ERR_PTR(-EINVAL); 7708 7709 switch (mddev->layout) { 7710 case ALGORITHM_LEFT_ASYMMETRIC: 7711 new_layout = ALGORITHM_LEFT_ASYMMETRIC_6; 7712 break; 7713 case ALGORITHM_RIGHT_ASYMMETRIC: 7714 new_layout = ALGORITHM_RIGHT_ASYMMETRIC_6; 7715 break; 7716 case ALGORITHM_LEFT_SYMMETRIC: 7717 new_layout = ALGORITHM_LEFT_SYMMETRIC_6; 7718 break; 7719 case ALGORITHM_RIGHT_SYMMETRIC: 7720 new_layout = ALGORITHM_RIGHT_SYMMETRIC_6; 7721 break; 7722 case ALGORITHM_PARITY_0: 7723 new_layout = ALGORITHM_PARITY_0_6; 7724 break; 7725 case ALGORITHM_PARITY_N: 7726 new_layout = ALGORITHM_PARITY_N; 7727 break; 7728 default: 7729 return ERR_PTR(-EINVAL); 7730 } 7731 mddev->new_level = 6; 7732 mddev->new_layout = new_layout; 7733 mddev->delta_disks = 1; 7734 mddev->raid_disks += 1; 7735 return setup_conf(mddev); 7736 } 7737 7738 static struct md_personality raid6_personality = 7739 { 7740 .name = "raid6", 7741 .level = 6, 7742 .owner = THIS_MODULE, 7743 .make_request = make_request, 7744 .run = run, 7745 .free = raid5_free, 7746 .status = status, 7747 .error_handler = error, 7748 .hot_add_disk = raid5_add_disk, 7749 .hot_remove_disk= raid5_remove_disk, 7750 .spare_active = raid5_spare_active, 7751 .sync_request = sync_request, 7752 .resize = raid5_resize, 7753 .size = raid5_size, 7754 .check_reshape = raid6_check_reshape, 7755 .start_reshape = raid5_start_reshape, 7756 .finish_reshape = raid5_finish_reshape, 7757 .quiesce = raid5_quiesce, 7758 .takeover = raid6_takeover, 7759 .congested = raid5_congested, 7760 }; 7761 static struct md_personality raid5_personality = 7762 { 7763 .name = "raid5", 7764 .level = 5, 7765 .owner = THIS_MODULE, 7766 .make_request = make_request, 7767 .run = run, 7768 .free = raid5_free, 7769 .status = status, 7770 .error_handler = error, 7771 .hot_add_disk = raid5_add_disk, 7772 .hot_remove_disk= raid5_remove_disk, 7773 .spare_active = raid5_spare_active, 7774 .sync_request = sync_request, 7775 .resize = raid5_resize, 7776 .size = raid5_size, 7777 .check_reshape = raid5_check_reshape, 7778 .start_reshape = raid5_start_reshape, 7779 .finish_reshape = raid5_finish_reshape, 7780 .quiesce = raid5_quiesce, 7781 .takeover = raid5_takeover, 7782 .congested = raid5_congested, 7783 }; 7784 7785 static struct md_personality raid4_personality = 7786 { 7787 .name = "raid4", 7788 .level = 4, 7789 .owner = THIS_MODULE, 7790 .make_request = make_request, 7791 .run = run, 7792 .free = raid5_free, 7793 .status = status, 7794 .error_handler = error, 7795 .hot_add_disk = raid5_add_disk, 7796 .hot_remove_disk= raid5_remove_disk, 7797 .spare_active = raid5_spare_active, 7798 .sync_request = sync_request, 7799 .resize = raid5_resize, 7800 .size = raid5_size, 7801 .check_reshape = raid5_check_reshape, 7802 .start_reshape = raid5_start_reshape, 7803 .finish_reshape = raid5_finish_reshape, 7804 .quiesce = raid5_quiesce, 7805 .takeover = raid4_takeover, 7806 .congested = raid5_congested, 7807 }; 7808 7809 static int __init raid5_init(void) 7810 { 7811 raid5_wq = alloc_workqueue("raid5wq", 7812 WQ_UNBOUND|WQ_MEM_RECLAIM|WQ_CPU_INTENSIVE|WQ_SYSFS, 0); 7813 if (!raid5_wq) 7814 return -ENOMEM; 7815 register_md_personality(&raid6_personality); 7816 register_md_personality(&raid5_personality); 7817 register_md_personality(&raid4_personality); 7818 return 0; 7819 } 7820 7821 static void raid5_exit(void) 7822 { 7823 unregister_md_personality(&raid6_personality); 7824 unregister_md_personality(&raid5_personality); 7825 unregister_md_personality(&raid4_personality); 7826 destroy_workqueue(raid5_wq); 7827 } 7828 7829 module_init(raid5_init); 7830 module_exit(raid5_exit); 7831 MODULE_LICENSE("GPL"); 7832 MODULE_DESCRIPTION("RAID4/5/6 (striping with parity) personality for MD"); 7833 MODULE_ALIAS("md-personality-4"); /* RAID5 */ 7834 MODULE_ALIAS("md-raid5"); 7835 MODULE_ALIAS("md-raid4"); 7836 MODULE_ALIAS("md-level-5"); 7837 MODULE_ALIAS("md-level-4"); 7838 MODULE_ALIAS("md-personality-8"); /* RAID6 */ 7839 MODULE_ALIAS("md-raid6"); 7840 MODULE_ALIAS("md-level-6"); 7841 7842 /* This used to be two separate modules, they were: */ 7843 MODULE_ALIAS("raid5"); 7844 MODULE_ALIAS("raid6"); 7845