Lines Matching full:sh
26 * (in add_stripe_bio) we update the in-memory bitmap and record in sh->bm_seq
120 static inline int raid6_d0(struct stripe_head *sh) in raid6_d0() argument
122 if (sh->ddf_layout) in raid6_d0()
126 if (sh->qd_idx == sh->disks - 1) in raid6_d0()
129 return sh->qd_idx + 1; in raid6_d0()
142 static int raid6_idx_to_slot(int idx, struct stripe_head *sh, in raid6_idx_to_slot() argument
147 if (sh->ddf_layout) in raid6_idx_to_slot()
149 if (idx == sh->pd_idx) in raid6_idx_to_slot()
151 if (idx == sh->qd_idx) in raid6_idx_to_slot()
153 if (!sh->ddf_layout) in raid6_idx_to_slot()
160 static int stripe_operations_active(struct stripe_head *sh) in stripe_operations_active() argument
162 return sh->check_state || sh->reconstruct_state || in stripe_operations_active()
163 test_bit(STRIPE_BIOFILL_RUN, &sh->state) || in stripe_operations_active()
164 test_bit(STRIPE_COMPUTE_RUN, &sh->state); in stripe_operations_active()
167 static bool stripe_is_lowprio(struct stripe_head *sh) in stripe_is_lowprio() argument
169 return (test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state) || in stripe_is_lowprio()
170 test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state)) && in stripe_is_lowprio()
171 !test_bit(STRIPE_R5C_CACHING, &sh->state); in stripe_is_lowprio()
174 static void raid5_wakeup_stripe_thread(struct stripe_head *sh) in raid5_wakeup_stripe_thread() argument
175 __must_hold(&sh->raid_conf->device_lock) in raid5_wakeup_stripe_thread()
177 struct r5conf *conf = sh->raid_conf; in raid5_wakeup_stripe_thread()
180 int i, cpu = sh->cpu; in raid5_wakeup_stripe_thread()
184 sh->cpu = cpu; in raid5_wakeup_stripe_thread()
187 if (list_empty(&sh->lru)) { in raid5_wakeup_stripe_thread()
190 if (stripe_is_lowprio(sh)) in raid5_wakeup_stripe_thread()
191 list_add_tail(&sh->lru, &group->loprio_list); in raid5_wakeup_stripe_thread()
193 list_add_tail(&sh->lru, &group->handle_list); in raid5_wakeup_stripe_thread()
195 sh->group = group; in raid5_wakeup_stripe_thread()
203 group = conf->worker_groups + cpu_to_group(sh->cpu); in raid5_wakeup_stripe_thread()
207 queue_work_on(sh->cpu, raid5_wq, &group->workers[0].work); in raid5_wakeup_stripe_thread()
214 queue_work_on(sh->cpu, raid5_wq, in raid5_wakeup_stripe_thread()
221 static void do_release_stripe(struct r5conf *conf, struct stripe_head *sh, in do_release_stripe() argument
228 BUG_ON(!list_empty(&sh->lru)); in do_release_stripe()
232 for (i = sh->disks; i--; ) in do_release_stripe()
233 if (test_bit(R5_InJournal, &sh->dev[i].flags)) in do_release_stripe()
242 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) || in do_release_stripe()
244 !test_bit(STRIPE_HANDLE, &sh->state) && injournal != 0)) { in do_release_stripe()
245 if (test_bit(STRIPE_R5C_CACHING, &sh->state)) in do_release_stripe()
246 r5c_make_stripe_write_out(sh); in do_release_stripe()
247 set_bit(STRIPE_HANDLE, &sh->state); in do_release_stripe()
250 if (test_bit(STRIPE_HANDLE, &sh->state)) { in do_release_stripe()
251 if (test_bit(STRIPE_DELAYED, &sh->state) && in do_release_stripe()
252 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in do_release_stripe()
253 list_add_tail(&sh->lru, &conf->delayed_list); in do_release_stripe()
254 else if (test_bit(STRIPE_BIT_DELAY, &sh->state) && in do_release_stripe()
255 sh->bm_seq - conf->seq_write > 0) in do_release_stripe()
256 list_add_tail(&sh->lru, &conf->bitmap_list); in do_release_stripe()
258 clear_bit(STRIPE_DELAYED, &sh->state); in do_release_stripe()
259 clear_bit(STRIPE_BIT_DELAY, &sh->state); in do_release_stripe()
261 if (stripe_is_lowprio(sh)) in do_release_stripe()
262 list_add_tail(&sh->lru, in do_release_stripe()
265 list_add_tail(&sh->lru, in do_release_stripe()
268 raid5_wakeup_stripe_thread(sh); in do_release_stripe()
274 BUG_ON(stripe_operations_active(sh)); in do_release_stripe()
275 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in do_release_stripe()
280 if (!test_bit(STRIPE_EXPANDING, &sh->state)) { in do_release_stripe()
282 list_add_tail(&sh->lru, temp_inactive_list); in do_release_stripe()
284 WARN_ON(test_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags)); in do_release_stripe()
286 list_add_tail(&sh->lru, temp_inactive_list); in do_release_stripe()
289 if (!test_and_set_bit(STRIPE_R5C_FULL_STRIPE, &sh->state)) in do_release_stripe()
291 if (test_and_clear_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state)) in do_release_stripe()
293 list_add_tail(&sh->lru, &conf->r5c_full_stripe_list); in do_release_stripe()
301 list_add_tail(&sh->lru, &conf->r5c_partial_stripe_list); in do_release_stripe()
307 static void __release_stripe(struct r5conf *conf, struct stripe_head *sh, in __release_stripe() argument
311 if (atomic_dec_and_test(&sh->count)) in __release_stripe()
312 do_release_stripe(conf, sh, temp_inactive_list); in __release_stripe()
368 struct stripe_head *sh, *t; in release_stripe_list() local
374 llist_for_each_entry_safe(sh, t, head, release_list) { in release_stripe_list()
377 /* sh could be readded after STRIPE_ON_RELEASE_LIST is cleard */ in release_stripe_list()
379 clear_bit(STRIPE_ON_RELEASE_LIST, &sh->state); in release_stripe_list()
385 hash = sh->hash_lock_index; in release_stripe_list()
386 __release_stripe(conf, sh, &temp_inactive_list[hash]); in release_stripe_list()
393 void raid5_release_stripe(struct stripe_head *sh) in raid5_release_stripe() argument
395 struct r5conf *conf = sh->raid_conf; in raid5_release_stripe()
403 if (atomic_add_unless(&sh->count, -1, 1)) in raid5_release_stripe()
407 test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state)) in raid5_release_stripe()
409 wakeup = llist_add(&sh->release_list, &conf->released_stripes); in raid5_release_stripe()
415 if (atomic_dec_and_lock_irqsave(&sh->count, &conf->device_lock, flags)) { in raid5_release_stripe()
417 hash = sh->hash_lock_index; in raid5_release_stripe()
418 do_release_stripe(conf, sh, &list); in raid5_release_stripe()
424 static inline void remove_hash(struct stripe_head *sh) in remove_hash() argument
427 (unsigned long long)sh->sector); in remove_hash()
429 hlist_del_init(&sh->hash); in remove_hash()
432 static inline void insert_hash(struct r5conf *conf, struct stripe_head *sh) in insert_hash() argument
434 struct hlist_head *hp = stripe_hash(conf, sh->sector); in insert_hash()
437 (unsigned long long)sh->sector); in insert_hash()
439 hlist_add_head(&sh->hash, hp); in insert_hash()
445 struct stripe_head *sh = NULL; in get_free_stripe() local
451 sh = list_entry(first, struct stripe_head, lru); in get_free_stripe()
453 remove_hash(sh); in get_free_stripe()
455 BUG_ON(hash != sh->hash_lock_index); in get_free_stripe()
459 return sh; in get_free_stripe()
463 static void free_stripe_pages(struct stripe_head *sh) in free_stripe_pages() argument
469 if (!sh->pages) in free_stripe_pages()
472 for (i = 0; i < sh->nr_pages; i++) { in free_stripe_pages()
473 p = sh->pages[i]; in free_stripe_pages()
476 sh->pages[i] = NULL; in free_stripe_pages()
480 static int alloc_stripe_pages(struct stripe_head *sh, gfp_t gfp) in alloc_stripe_pages() argument
485 for (i = 0; i < sh->nr_pages; i++) { in alloc_stripe_pages()
487 if (sh->pages[i]) in alloc_stripe_pages()
492 free_stripe_pages(sh); in alloc_stripe_pages()
495 sh->pages[i] = p; in alloc_stripe_pages()
501 init_stripe_shared_pages(struct stripe_head *sh, struct r5conf *conf, int disks) in init_stripe_shared_pages() argument
505 if (sh->pages) in init_stripe_shared_pages()
508 /* Each of the sh->dev[i] need one conf->stripe_size */ in init_stripe_shared_pages()
512 sh->pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); in init_stripe_shared_pages()
513 if (!sh->pages) in init_stripe_shared_pages()
515 sh->nr_pages = nr_pages; in init_stripe_shared_pages()
516 sh->stripes_per_page = cnt; in init_stripe_shared_pages()
521 static void shrink_buffers(struct stripe_head *sh) in shrink_buffers() argument
524 int num = sh->raid_conf->pool_size; in shrink_buffers()
530 WARN_ON(sh->dev[i].page != sh->dev[i].orig_page); in shrink_buffers()
531 p = sh->dev[i].page; in shrink_buffers()
534 sh->dev[i].page = NULL; in shrink_buffers()
539 sh->dev[i].page = NULL; in shrink_buffers()
540 free_stripe_pages(sh); /* Free pages */ in shrink_buffers()
544 static int grow_buffers(struct stripe_head *sh, gfp_t gfp) in grow_buffers() argument
547 int num = sh->raid_conf->pool_size; in grow_buffers()
556 sh->dev[i].page = page; in grow_buffers()
557 sh->dev[i].orig_page = page; in grow_buffers()
558 sh->dev[i].offset = 0; in grow_buffers()
561 if (alloc_stripe_pages(sh, gfp)) in grow_buffers()
565 sh->dev[i].page = raid5_get_dev_page(sh, i); in grow_buffers()
566 sh->dev[i].orig_page = sh->dev[i].page; in grow_buffers()
567 sh->dev[i].offset = raid5_get_page_offset(sh, i); in grow_buffers()
574 struct stripe_head *sh);
576 static void init_stripe(struct stripe_head *sh, sector_t sector, int previous) in init_stripe() argument
578 struct r5conf *conf = sh->raid_conf; in init_stripe()
581 BUG_ON(atomic_read(&sh->count) != 0); in init_stripe()
582 BUG_ON(test_bit(STRIPE_HANDLE, &sh->state)); in init_stripe()
583 BUG_ON(stripe_operations_active(sh)); in init_stripe()
584 BUG_ON(sh->batch_head); in init_stripe()
590 sh->generation = conf->generation - previous; in init_stripe()
591 sh->disks = previous ? conf->previous_raid_disks : conf->raid_disks; in init_stripe()
592 sh->sector = sector; in init_stripe()
593 stripe_set_idx(sector, conf, previous, sh); in init_stripe()
594 sh->state = 0; in init_stripe()
596 for (i = sh->disks; i--; ) { in init_stripe()
597 struct r5dev *dev = &sh->dev[i]; in init_stripe()
602 (unsigned long long)sh->sector, i, dev->toread, in init_stripe()
608 dev->sector = raid5_compute_blocknr(sh, i, previous); in init_stripe()
612 sh->overwrite_disks = 0; in init_stripe()
613 insert_hash(conf, sh); in init_stripe()
614 sh->cpu = smp_processor_id(); in init_stripe()
615 set_bit(STRIPE_BATCH_READY, &sh->state); in init_stripe()
621 struct stripe_head *sh; in __find_stripe() local
624 hlist_for_each_entry(sh, stripe_hash(conf, sector), hash) in __find_stripe()
625 if (sh->sector == sector && sh->generation == generation) in __find_stripe()
626 return sh; in __find_stripe()
635 struct stripe_head *sh; in find_get_stripe() local
637 sh = __find_stripe(conf, sector, generation); in find_get_stripe()
638 if (!sh) in find_get_stripe()
641 if (atomic_inc_not_zero(&sh->count)) in find_get_stripe()
642 return sh; in find_get_stripe()
646 * be on a list (sh->lru). Must remove the stripe from the list that in find_get_stripe()
651 if (!atomic_read(&sh->count)) { in find_get_stripe()
652 if (!test_bit(STRIPE_HANDLE, &sh->state)) in find_get_stripe()
654 BUG_ON(list_empty(&sh->lru) && in find_get_stripe()
655 !test_bit(STRIPE_EXPANDING, &sh->state)); in find_get_stripe()
659 list_del_init(&sh->lru); in find_get_stripe()
663 if (sh->group) { in find_get_stripe()
664 sh->group->stripes_cnt--; in find_get_stripe()
665 sh->group = NULL; in find_get_stripe()
668 atomic_inc(&sh->count); in find_get_stripe()
671 return sh; in find_get_stripe()
807 struct stripe_head *sh; in raid5_get_active_stripe() local
834 sh = find_get_stripe(conf, sector, conf->generation - previous, in raid5_get_active_stripe()
836 if (sh) in raid5_get_active_stripe()
840 sh = get_free_stripe(conf, hash); in raid5_get_active_stripe()
841 if (sh) { in raid5_get_active_stripe()
843 init_stripe(sh, sector, previous); in raid5_get_active_stripe()
844 atomic_inc(&sh->count); in raid5_get_active_stripe()
871 return sh; in raid5_get_active_stripe()
874 static bool is_full_stripe_write(struct stripe_head *sh) in is_full_stripe_write() argument
876 BUG_ON(sh->overwrite_disks > (sh->disks - sh->raid_conf->max_degraded)); in is_full_stripe_write()
877 return sh->overwrite_disks == (sh->disks - sh->raid_conf->max_degraded); in is_full_stripe_write()
902 static bool stripe_can_batch(struct stripe_head *sh) in stripe_can_batch() argument
904 struct r5conf *conf = sh->raid_conf; in stripe_can_batch()
908 return test_bit(STRIPE_BATCH_READY, &sh->state) && in stripe_can_batch()
909 !test_bit(STRIPE_BITMAP_PENDING, &sh->state) && in stripe_can_batch()
910 is_full_stripe_write(sh); in stripe_can_batch()
915 struct stripe_head *sh, struct stripe_head *last_sh) in stripe_add_to_batch_list() argument
923 tmp_sec = sh->sector; in stripe_add_to_batch_list()
926 head_sector = sh->sector - RAID5_STRIPE_SECTORS(conf); in stripe_add_to_batch_list()
943 lock_two_stripes(head, sh); in stripe_add_to_batch_list()
945 if (!stripe_can_batch(head) || !stripe_can_batch(sh)) in stripe_add_to_batch_list()
948 if (sh->batch_head) in stripe_add_to_batch_list()
952 while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx) in stripe_add_to_batch_list()
954 if (head->dev[dd_idx].towrite->bi_opf != sh->dev[dd_idx].towrite->bi_opf || in stripe_add_to_batch_list()
955 bio_op(head->dev[dd_idx].towrite) != bio_op(sh->dev[dd_idx].towrite)) in stripe_add_to_batch_list()
972 sh->batch_head = head->batch_head; in stripe_add_to_batch_list()
978 list_add(&sh->batch_list, &head->batch_list); in stripe_add_to_batch_list()
982 sh->batch_head = head->batch_head; in stripe_add_to_batch_list()
984 list_add_tail(&sh->batch_list, &head->batch_list); in stripe_add_to_batch_list()
988 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in stripe_add_to_batch_list()
993 if (test_and_clear_bit(STRIPE_BIT_DELAY, &sh->state)) { in stripe_add_to_batch_list()
994 int seq = sh->bm_seq; in stripe_add_to_batch_list()
995 if (test_bit(STRIPE_BIT_DELAY, &sh->batch_head->state) && in stripe_add_to_batch_list()
996 sh->batch_head->bm_seq > seq) in stripe_add_to_batch_list()
997 seq = sh->batch_head->bm_seq; in stripe_add_to_batch_list()
998 set_bit(STRIPE_BIT_DELAY, &sh->batch_head->state); in stripe_add_to_batch_list()
999 sh->batch_head->bm_seq = seq; in stripe_add_to_batch_list()
1002 atomic_inc(&sh->count); in stripe_add_to_batch_list()
1004 unlock_two_stripes(head, sh); in stripe_add_to_batch_list()
1012 static int use_new_offset(struct r5conf *conf, struct stripe_head *sh) in use_new_offset() argument
1022 if (sh->generation == conf->generation - 1) in use_new_offset()
1139 static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) in ops_run_io() argument
1141 struct r5conf *conf = sh->raid_conf; in ops_run_io()
1142 int i, disks = sh->disks; in ops_run_io()
1143 struct stripe_head *head_sh = sh; in ops_run_io()
1150 if (log_stripe(sh, s) == 0) in ops_run_io()
1162 sh = head_sh; in ops_run_io()
1163 if (test_and_clear_bit(R5_Wantwrite, &sh->dev[i].flags)) { in ops_run_io()
1165 if (test_and_clear_bit(R5_WantFUA, &sh->dev[i].flags)) in ops_run_io()
1167 if (test_bit(R5_Discard, &sh->dev[i].flags)) in ops_run_io()
1169 } else if (test_and_clear_bit(R5_Wantread, &sh->dev[i].flags)) in ops_run_io()
1172 &sh->dev[i].flags)) { in ops_run_io()
1177 if (test_and_clear_bit(R5_SyncIO, &sh->dev[i].flags)) in ops_run_io()
1181 dev = &sh->dev[i]; in ops_run_io()
1214 int bad = rdev_has_badblock(rdev, sh->sector, in ops_run_io()
1248 set_bit(STRIPE_IO_STARTED, &sh->state); in ops_run_io()
1254 bi->bi_private = sh; in ops_run_io()
1257 __func__, (unsigned long long)sh->sector, in ops_run_io()
1259 atomic_inc(&sh->count); in ops_run_io()
1260 if (sh != head_sh) in ops_run_io()
1262 if (use_new_offset(conf, sh)) in ops_run_io()
1263 bi->bi_iter.bi_sector = (sh->sector in ops_run_io()
1266 bi->bi_iter.bi_sector = (sh->sector in ops_run_io()
1271 if (test_bit(R5_SkipCopy, &sh->dev[i].flags)) in ops_run_io()
1272 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); in ops_run_io()
1275 test_bit(R5_InJournal, &sh->dev[i].flags)) in ops_run_io()
1281 sh->dev[i].vec.bv_page = sh->dev[i].orig_page; in ops_run_io()
1283 sh->dev[i].vec.bv_page = sh->dev[i].page; in ops_run_io()
1286 bi->bi_io_vec[0].bv_offset = sh->dev[i].offset; in ops_run_io()
1295 set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags); in ops_run_io()
1297 mddev_trace_remap(conf->mddev, bi, sh->dev[i].sector); in ops_run_io()
1308 set_bit(STRIPE_IO_STARTED, &sh->state); in ops_run_io()
1313 rbi->bi_private = sh; in ops_run_io()
1317 __func__, (unsigned long long)sh->sector, in ops_run_io()
1319 atomic_inc(&sh->count); in ops_run_io()
1320 if (sh != head_sh) in ops_run_io()
1322 if (use_new_offset(conf, sh)) in ops_run_io()
1323 rbi->bi_iter.bi_sector = (sh->sector in ops_run_io()
1326 rbi->bi_iter.bi_sector = (sh->sector in ops_run_io()
1328 if (test_bit(R5_SkipCopy, &sh->dev[i].flags)) in ops_run_io()
1329 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); in ops_run_io()
1330 sh->dev[i].rvec.bv_page = sh->dev[i].page; in ops_run_io()
1333 rbi->bi_io_vec[0].bv_offset = sh->dev[i].offset; in ops_run_io()
1341 mddev_trace_remap(conf->mddev, rbi, sh->dev[i].sector); in ops_run_io()
1349 set_bit(STRIPE_DEGRADED, &sh->state); in ops_run_io()
1351 bi->bi_opf, i, (unsigned long long)sh->sector); in ops_run_io()
1352 clear_bit(R5_LOCKED, &sh->dev[i].flags); in ops_run_io()
1353 set_bit(STRIPE_HANDLE, &sh->state); in ops_run_io()
1358 sh = list_first_entry(&sh->batch_list, struct stripe_head, in ops_run_io()
1360 if (sh != head_sh) in ops_run_io()
1371 struct stripe_head *sh, int no_skipcopy) in async_copy_data() argument
1379 struct r5conf *conf = sh->raid_conf; in async_copy_data()
1435 struct stripe_head *sh = stripe_head_ref; in ops_complete_biofill() local
1437 struct r5conf *conf = sh->raid_conf; in ops_complete_biofill()
1440 (unsigned long long)sh->sector); in ops_complete_biofill()
1443 for (i = sh->disks; i--; ) { in ops_complete_biofill()
1444 struct r5dev *dev = &sh->dev[i]; in ops_complete_biofill()
1465 clear_bit(STRIPE_BIOFILL_RUN, &sh->state); in ops_complete_biofill()
1467 set_bit(STRIPE_HANDLE, &sh->state); in ops_complete_biofill()
1468 raid5_release_stripe(sh); in ops_complete_biofill()
1471 static void ops_run_biofill(struct stripe_head *sh) in ops_run_biofill() argument
1476 struct r5conf *conf = sh->raid_conf; in ops_run_biofill()
1478 BUG_ON(sh->batch_head); in ops_run_biofill()
1480 (unsigned long long)sh->sector); in ops_run_biofill()
1482 for (i = sh->disks; i--; ) { in ops_run_biofill()
1483 struct r5dev *dev = &sh->dev[i]; in ops_run_biofill()
1486 spin_lock_irq(&sh->stripe_lock); in ops_run_biofill()
1489 spin_unlock_irq(&sh->stripe_lock); in ops_run_biofill()
1494 dev->sector, tx, sh, 0); in ops_run_biofill()
1500 atomic_inc(&sh->count); in ops_run_biofill()
1501 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_biofill, sh, NULL); in ops_run_biofill()
1505 static void mark_target_uptodate(struct stripe_head *sh, int target) in mark_target_uptodate() argument
1512 tgt = &sh->dev[target]; in mark_target_uptodate()
1520 struct stripe_head *sh = stripe_head_ref; in ops_complete_compute() local
1523 (unsigned long long)sh->sector); in ops_complete_compute()
1526 mark_target_uptodate(sh, sh->ops.target); in ops_complete_compute()
1527 mark_target_uptodate(sh, sh->ops.target2); in ops_complete_compute()
1529 clear_bit(STRIPE_COMPUTE_RUN, &sh->state); in ops_complete_compute()
1530 if (sh->check_state == check_state_compute_run) in ops_complete_compute()
1531 sh->check_state = check_state_compute_result; in ops_complete_compute()
1532 set_bit(STRIPE_HANDLE, &sh->state); in ops_complete_compute()
1533 raid5_release_stripe(sh); in ops_complete_compute()
1543 static addr_conv_t *to_addr_conv(struct stripe_head *sh, in to_addr_conv() argument
1546 return (void *) (to_addr_page(percpu, i) + sh->disks + 2); in to_addr_conv()
1553 to_addr_offs(struct stripe_head *sh, struct raid5_percpu *percpu) in to_addr_offs() argument
1555 return (unsigned int *) (to_addr_conv(sh, percpu, 0) + sh->disks + 2); in to_addr_offs()
1559 ops_run_compute5(struct stripe_head *sh, struct raid5_percpu *percpu) in ops_run_compute5() argument
1561 int disks = sh->disks; in ops_run_compute5()
1563 unsigned int *off_srcs = to_addr_offs(sh, percpu); in ops_run_compute5()
1564 int target = sh->ops.target; in ops_run_compute5()
1565 struct r5dev *tgt = &sh->dev[target]; in ops_run_compute5()
1573 BUG_ON(sh->batch_head); in ops_run_compute5()
1576 __func__, (unsigned long long)sh->sector, target); in ops_run_compute5()
1581 off_srcs[count] = sh->dev[i].offset; in ops_run_compute5()
1582 xor_srcs[count++] = sh->dev[i].page; in ops_run_compute5()
1586 atomic_inc(&sh->count); in ops_run_compute5()
1589 ops_complete_compute, sh, to_addr_conv(sh, percpu, 0)); in ops_run_compute5()
1592 RAID5_STRIPE_SIZE(sh->raid_conf), &submit); in ops_run_compute5()
1595 RAID5_STRIPE_SIZE(sh->raid_conf), &submit); in ops_run_compute5()
1601 * @srcs - (struct page *) array of size sh->disks
1603 * @sh - stripe_head to parse
1612 struct stripe_head *sh, in set_syndrome_sources() argument
1615 int disks = sh->disks; in set_syndrome_sources()
1616 int syndrome_disks = sh->ddf_layout ? disks : (disks - 2); in set_syndrome_sources()
1617 int d0_idx = raid6_d0(sh); in set_syndrome_sources()
1627 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); in set_syndrome_sources()
1628 struct r5dev *dev = &sh->dev[i]; in set_syndrome_sources()
1630 if (i == sh->qd_idx || i == sh->pd_idx || in set_syndrome_sources()
1639 srcs[slot] = sh->dev[i].orig_page; in set_syndrome_sources()
1641 srcs[slot] = sh->dev[i].page; in set_syndrome_sources()
1647 offs[slot] = sh->dev[i].offset; in set_syndrome_sources()
1656 ops_run_compute6_1(struct stripe_head *sh, struct raid5_percpu *percpu) in ops_run_compute6_1() argument
1658 int disks = sh->disks; in ops_run_compute6_1()
1660 unsigned int *offs = to_addr_offs(sh, percpu); in ops_run_compute6_1()
1662 int qd_idx = sh->qd_idx; in ops_run_compute6_1()
1671 BUG_ON(sh->batch_head); in ops_run_compute6_1()
1672 if (sh->ops.target < 0) in ops_run_compute6_1()
1673 target = sh->ops.target2; in ops_run_compute6_1()
1674 else if (sh->ops.target2 < 0) in ops_run_compute6_1()
1675 target = sh->ops.target; in ops_run_compute6_1()
1681 __func__, (unsigned long long)sh->sector, target); in ops_run_compute6_1()
1683 tgt = &sh->dev[target]; in ops_run_compute6_1()
1688 atomic_inc(&sh->count); in ops_run_compute6_1()
1691 count = set_syndrome_sources(blocks, offs, sh, SYNDROME_SRC_ALL); in ops_run_compute6_1()
1695 ops_complete_compute, sh, in ops_run_compute6_1()
1696 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_1()
1698 RAID5_STRIPE_SIZE(sh->raid_conf), &submit); in ops_run_compute6_1()
1705 offs[count] = sh->dev[i].offset; in ops_run_compute6_1()
1706 blocks[count++] = sh->dev[i].page; in ops_run_compute6_1()
1710 NULL, ops_complete_compute, sh, in ops_run_compute6_1()
1711 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_1()
1713 RAID5_STRIPE_SIZE(sh->raid_conf), &submit); in ops_run_compute6_1()
1720 ops_run_compute6_2(struct stripe_head *sh, struct raid5_percpu *percpu) in ops_run_compute6_2() argument
1722 int i, count, disks = sh->disks; in ops_run_compute6_2()
1723 int syndrome_disks = sh->ddf_layout ? disks : disks-2; in ops_run_compute6_2()
1724 int d0_idx = raid6_d0(sh); in ops_run_compute6_2()
1726 int target = sh->ops.target; in ops_run_compute6_2()
1727 int target2 = sh->ops.target2; in ops_run_compute6_2()
1728 struct r5dev *tgt = &sh->dev[target]; in ops_run_compute6_2()
1729 struct r5dev *tgt2 = &sh->dev[target2]; in ops_run_compute6_2()
1732 unsigned int *offs = to_addr_offs(sh, percpu); in ops_run_compute6_2()
1735 BUG_ON(sh->batch_head); in ops_run_compute6_2()
1737 __func__, (unsigned long long)sh->sector, target, target2); in ops_run_compute6_2()
1752 int slot = raid6_idx_to_slot(i, sh, &count, syndrome_disks); in ops_run_compute6_2()
1754 offs[slot] = sh->dev[i].offset; in ops_run_compute6_2()
1755 blocks[slot] = sh->dev[i].page; in ops_run_compute6_2()
1768 __func__, (unsigned long long)sh->sector, faila, failb); in ops_run_compute6_2()
1770 atomic_inc(&sh->count); in ops_run_compute6_2()
1777 ops_complete_compute, sh, in ops_run_compute6_2()
1778 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_2()
1780 RAID5_STRIPE_SIZE(sh->raid_conf), in ops_run_compute6_2()
1786 int qd_idx = sh->qd_idx; in ops_run_compute6_2()
1798 offs[count] = sh->dev[i].offset; in ops_run_compute6_2()
1799 blocks[count++] = sh->dev[i].page; in ops_run_compute6_2()
1801 dest = sh->dev[data_target].page; in ops_run_compute6_2()
1802 dest_off = sh->dev[data_target].offset; in ops_run_compute6_2()
1806 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_2()
1808 RAID5_STRIPE_SIZE(sh->raid_conf), in ops_run_compute6_2()
1811 count = set_syndrome_sources(blocks, offs, sh, SYNDROME_SRC_ALL); in ops_run_compute6_2()
1813 ops_complete_compute, sh, in ops_run_compute6_2()
1814 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_2()
1816 RAID5_STRIPE_SIZE(sh->raid_conf), in ops_run_compute6_2()
1821 ops_complete_compute, sh, in ops_run_compute6_2()
1822 to_addr_conv(sh, percpu, 0)); in ops_run_compute6_2()
1826 RAID5_STRIPE_SIZE(sh->raid_conf), in ops_run_compute6_2()
1832 RAID5_STRIPE_SIZE(sh->raid_conf), in ops_run_compute6_2()
1841 struct stripe_head *sh = stripe_head_ref; in ops_complete_prexor() local
1844 (unsigned long long)sh->sector); in ops_complete_prexor()
1846 if (r5c_is_writeback(sh->raid_conf->log)) in ops_complete_prexor()
1851 r5c_release_extra_page(sh); in ops_complete_prexor()
1855 ops_run_prexor5(struct stripe_head *sh, struct raid5_percpu *percpu, in ops_run_prexor5() argument
1858 int disks = sh->disks; in ops_run_prexor5()
1860 unsigned int *off_srcs = to_addr_offs(sh, percpu); in ops_run_prexor5()
1861 int count = 0, pd_idx = sh->pd_idx, i; in ops_run_prexor5()
1865 unsigned int off_dest = off_srcs[count] = sh->dev[pd_idx].offset; in ops_run_prexor5()
1866 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; in ops_run_prexor5()
1868 BUG_ON(sh->batch_head); in ops_run_prexor5()
1870 (unsigned long long)sh->sector); in ops_run_prexor5()
1873 struct r5dev *dev = &sh->dev[i]; in ops_run_prexor5()
1889 ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0)); in ops_run_prexor5()
1891 RAID5_STRIPE_SIZE(sh->raid_conf), &submit); in ops_run_prexor5()
1897 ops_run_prexor6(struct stripe_head *sh, struct raid5_percpu *percpu, in ops_run_prexor6() argument
1901 unsigned int *offs = to_addr_offs(sh, percpu); in ops_run_prexor6()
1906 (unsigned long long)sh->sector); in ops_run_prexor6()
1908 count = set_syndrome_sources(blocks, offs, sh, SYNDROME_SRC_WANT_DRAIN); in ops_run_prexor6()
1911 ops_complete_prexor, sh, to_addr_conv(sh, percpu, 0)); in ops_run_prexor6()
1913 RAID5_STRIPE_SIZE(sh->raid_conf), &submit); in ops_run_prexor6()
1919 ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) in ops_run_biodrain() argument
1921 struct r5conf *conf = sh->raid_conf; in ops_run_biodrain()
1922 int disks = sh->disks; in ops_run_biodrain()
1924 struct stripe_head *head_sh = sh; in ops_run_biodrain()
1927 (unsigned long long)sh->sector); in ops_run_biodrain()
1933 sh = head_sh; in ops_run_biodrain()
1938 dev = &sh->dev[i]; in ops_run_biodrain()
1944 spin_lock_irq(&sh->stripe_lock); in ops_run_biodrain()
1947 sh->overwrite_disks = 0; in ops_run_biodrain()
1950 spin_unlock_irq(&sh->stripe_lock); in ops_run_biodrain()
1964 dev->sector, tx, sh, in ops_run_biodrain()
1977 sh = list_first_entry(&sh->batch_list, in ops_run_biodrain()
1980 if (sh == head_sh) in ops_run_biodrain()
1992 struct stripe_head *sh = stripe_head_ref; in ops_complete_reconstruct() local
1993 int disks = sh->disks; in ops_complete_reconstruct()
1994 int pd_idx = sh->pd_idx; in ops_complete_reconstruct()
1995 int qd_idx = sh->qd_idx; in ops_complete_reconstruct()
2000 (unsigned long long)sh->sector); in ops_complete_reconstruct()
2003 fua |= test_bit(R5_WantFUA, &sh->dev[i].flags); in ops_complete_reconstruct()
2004 sync |= test_bit(R5_SyncIO, &sh->dev[i].flags); in ops_complete_reconstruct()
2005 discard |= test_bit(R5_Discard, &sh->dev[i].flags); in ops_complete_reconstruct()
2009 struct r5dev *dev = &sh->dev[i]; in ops_complete_reconstruct()
2014 if (test_bit(STRIPE_EXPAND_READY, &sh->state)) in ops_complete_reconstruct()
2024 if (sh->reconstruct_state == reconstruct_state_drain_run) in ops_complete_reconstruct()
2025 sh->reconstruct_state = reconstruct_state_drain_result; in ops_complete_reconstruct()
2026 else if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) in ops_complete_reconstruct()
2027 sh->reconstruct_state = reconstruct_state_prexor_drain_result; in ops_complete_reconstruct()
2029 BUG_ON(sh->reconstruct_state != reconstruct_state_run); in ops_complete_reconstruct()
2030 sh->reconstruct_state = reconstruct_state_result; in ops_complete_reconstruct()
2033 set_bit(STRIPE_HANDLE, &sh->state); in ops_complete_reconstruct()
2034 raid5_release_stripe(sh); in ops_complete_reconstruct()
2038 ops_run_reconstruct5(struct stripe_head *sh, struct raid5_percpu *percpu, in ops_run_reconstruct5() argument
2041 int disks = sh->disks; in ops_run_reconstruct5()
2045 int count, pd_idx = sh->pd_idx, i; in ops_run_reconstruct5()
2051 struct stripe_head *head_sh = sh; in ops_run_reconstruct5()
2055 (unsigned long long)sh->sector); in ops_run_reconstruct5()
2057 for (i = 0; i < sh->disks; i++) { in ops_run_reconstruct5()
2060 if (!test_bit(R5_Discard, &sh->dev[i].flags)) in ops_run_reconstruct5()
2063 if (i >= sh->disks) { in ops_run_reconstruct5()
2064 atomic_inc(&sh->count); in ops_run_reconstruct5()
2065 set_bit(R5_Discard, &sh->dev[pd_idx].flags); in ops_run_reconstruct5()
2066 ops_complete_reconstruct(sh); in ops_run_reconstruct5()
2072 off_srcs = to_addr_offs(sh, percpu); in ops_run_reconstruct5()
2078 off_dest = off_srcs[count] = sh->dev[pd_idx].offset; in ops_run_reconstruct5()
2079 xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; in ops_run_reconstruct5()
2081 struct r5dev *dev = &sh->dev[i]; in ops_run_reconstruct5()
2089 xor_dest = sh->dev[pd_idx].page; in ops_run_reconstruct5()
2090 off_dest = sh->dev[pd_idx].offset; in ops_run_reconstruct5()
2092 struct r5dev *dev = &sh->dev[i]; in ops_run_reconstruct5()
2106 list_first_entry(&sh->batch_list, in ops_run_reconstruct5()
2114 to_addr_conv(sh, percpu, j)); in ops_run_reconstruct5()
2118 to_addr_conv(sh, percpu, j)); in ops_run_reconstruct5()
2123 RAID5_STRIPE_SIZE(sh->raid_conf), &submit); in ops_run_reconstruct5()
2126 RAID5_STRIPE_SIZE(sh->raid_conf), &submit); in ops_run_reconstruct5()
2129 sh = list_first_entry(&sh->batch_list, struct stripe_head, in ops_run_reconstruct5()
2136 ops_run_reconstruct6(struct stripe_head *sh, struct raid5_percpu *percpu, in ops_run_reconstruct6() argument
2143 struct stripe_head *head_sh = sh; in ops_run_reconstruct6()
2148 pr_debug("%s: stripe %llu\n", __func__, (unsigned long long)sh->sector); in ops_run_reconstruct6()
2150 for (i = 0; i < sh->disks; i++) { in ops_run_reconstruct6()
2151 if (sh->pd_idx == i || sh->qd_idx == i) in ops_run_reconstruct6()
2153 if (!test_bit(R5_Discard, &sh->dev[i].flags)) in ops_run_reconstruct6()
2156 if (i >= sh->disks) { in ops_run_reconstruct6()
2157 atomic_inc(&sh->count); in ops_run_reconstruct6()
2158 set_bit(R5_Discard, &sh->dev[sh->pd_idx].flags); in ops_run_reconstruct6()
2159 set_bit(R5_Discard, &sh->dev[sh->qd_idx].flags); in ops_run_reconstruct6()
2160 ops_complete_reconstruct(sh); in ops_run_reconstruct6()
2166 offs = to_addr_offs(sh, percpu); in ops_run_reconstruct6()
2168 if (sh->reconstruct_state == reconstruct_state_prexor_drain_run) { in ops_run_reconstruct6()
2176 count = set_syndrome_sources(blocks, offs, sh, synflags); in ops_run_reconstruct6()
2178 list_first_entry(&sh->batch_list, in ops_run_reconstruct6()
2184 head_sh, to_addr_conv(sh, percpu, j)); in ops_run_reconstruct6()
2187 to_addr_conv(sh, percpu, j)); in ops_run_reconstruct6()
2189 RAID5_STRIPE_SIZE(sh->raid_conf), &submit); in ops_run_reconstruct6()
2192 sh = list_first_entry(&sh->batch_list, struct stripe_head, in ops_run_reconstruct6()
2200 struct stripe_head *sh = stripe_head_ref; in ops_complete_check() local
2203 (unsigned long long)sh->sector); in ops_complete_check()
2205 sh->check_state = check_state_check_result; in ops_complete_check()
2206 set_bit(STRIPE_HANDLE, &sh->state); in ops_complete_check()
2207 raid5_release_stripe(sh); in ops_complete_check()
2210 static void ops_run_check_p(struct stripe_head *sh, struct raid5_percpu *percpu) in ops_run_check_p() argument
2212 int disks = sh->disks; in ops_run_check_p()
2213 int pd_idx = sh->pd_idx; in ops_run_check_p()
2214 int qd_idx = sh->qd_idx; in ops_run_check_p()
2218 unsigned int *off_srcs = to_addr_offs(sh, percpu); in ops_run_check_p()
2225 (unsigned long long)sh->sector); in ops_run_check_p()
2227 BUG_ON(sh->batch_head); in ops_run_check_p()
2229 xor_dest = sh->dev[pd_idx].page; in ops_run_check_p()
2230 off_dest = sh->dev[pd_idx].offset; in ops_run_check_p()
2236 off_srcs[count] = sh->dev[i].offset; in ops_run_check_p()
2237 xor_srcs[count++] = sh->dev[i].page; in ops_run_check_p()
2241 to_addr_conv(sh, percpu, 0)); in ops_run_check_p()
2243 RAID5_STRIPE_SIZE(sh->raid_conf), in ops_run_check_p()
2244 &sh->ops.zero_sum_result, &submit); in ops_run_check_p()
2246 atomic_inc(&sh->count); in ops_run_check_p()
2247 init_async_submit(&submit, ASYNC_TX_ACK, tx, ops_complete_check, sh, NULL); in ops_run_check_p()
2251 static void ops_run_check_pq(struct stripe_head *sh, struct raid5_percpu *percpu, int checkp) in ops_run_check_pq() argument
2254 unsigned int *offs = to_addr_offs(sh, percpu); in ops_run_check_pq()
2259 (unsigned long long)sh->sector, checkp); in ops_run_check_pq()
2261 BUG_ON(sh->batch_head); in ops_run_check_pq()
2262 count = set_syndrome_sources(srcs, offs, sh, SYNDROME_SRC_ALL); in ops_run_check_pq()
2266 atomic_inc(&sh->count); in ops_run_check_pq()
2268 sh, to_addr_conv(sh, percpu, 0)); in ops_run_check_pq()
2270 RAID5_STRIPE_SIZE(sh->raid_conf), in ops_run_check_pq()
2271 &sh->ops.zero_sum_result, percpu->spare_page, 0, &submit); in ops_run_check_pq()
2274 static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request) in raid_run_ops() argument
2276 int overlap_clear = 0, i, disks = sh->disks; in raid_run_ops()
2278 struct r5conf *conf = sh->raid_conf; in raid_run_ops()
2285 ops_run_biofill(sh); in raid_run_ops()
2291 tx = ops_run_compute5(sh, percpu); in raid_run_ops()
2293 if (sh->ops.target2 < 0 || sh->ops.target < 0) in raid_run_ops()
2294 tx = ops_run_compute6_1(sh, percpu); in raid_run_ops()
2296 tx = ops_run_compute6_2(sh, percpu); in raid_run_ops()
2305 tx = ops_run_prexor5(sh, percpu, tx); in raid_run_ops()
2307 tx = ops_run_prexor6(sh, percpu, tx); in raid_run_ops()
2311 tx = ops_run_partial_parity(sh, percpu, tx); in raid_run_ops()
2314 tx = ops_run_biodrain(sh, tx); in raid_run_ops()
2320 ops_run_reconstruct5(sh, percpu, tx); in raid_run_ops()
2322 ops_run_reconstruct6(sh, percpu, tx); in raid_run_ops()
2326 if (sh->check_state == check_state_run) in raid_run_ops()
2327 ops_run_check_p(sh, percpu); in raid_run_ops()
2328 else if (sh->check_state == check_state_run_q) in raid_run_ops()
2329 ops_run_check_pq(sh, percpu, 0); in raid_run_ops()
2330 else if (sh->check_state == check_state_run_pq) in raid_run_ops()
2331 ops_run_check_pq(sh, percpu, 1); in raid_run_ops()
2336 if (overlap_clear && !sh->batch_head) { in raid_run_ops()
2338 struct r5dev *dev = &sh->dev[i]; in raid_run_ops()
2346 static void free_stripe(struct kmem_cache *sc, struct stripe_head *sh) in free_stripe() argument
2349 kfree(sh->pages); in free_stripe()
2351 if (sh->ppl_page) in free_stripe()
2352 __free_page(sh->ppl_page); in free_stripe()
2353 kmem_cache_free(sc, sh); in free_stripe()
2359 struct stripe_head *sh; in alloc_stripe() local
2361 sh = kmem_cache_zalloc(sc, gfp); in alloc_stripe()
2362 if (sh) { in alloc_stripe()
2363 spin_lock_init(&sh->stripe_lock); in alloc_stripe()
2364 spin_lock_init(&sh->batch_lock); in alloc_stripe()
2365 INIT_LIST_HEAD(&sh->batch_list); in alloc_stripe()
2366 INIT_LIST_HEAD(&sh->lru); in alloc_stripe()
2367 INIT_LIST_HEAD(&sh->r5c); in alloc_stripe()
2368 INIT_LIST_HEAD(&sh->log_list); in alloc_stripe()
2369 atomic_set(&sh->count, 1); in alloc_stripe()
2370 sh->raid_conf = conf; in alloc_stripe()
2371 sh->log_start = MaxSector; in alloc_stripe()
2374 sh->ppl_page = alloc_page(gfp); in alloc_stripe()
2375 if (!sh->ppl_page) { in alloc_stripe()
2376 free_stripe(sc, sh); in alloc_stripe()
2381 if (init_stripe_shared_pages(sh, conf, disks)) { in alloc_stripe()
2382 free_stripe(sc, sh); in alloc_stripe()
2387 return sh; in alloc_stripe()
2391 struct stripe_head *sh; in grow_one_stripe() local
2393 sh = alloc_stripe(conf->slab_cache, gfp, conf->pool_size, conf); in grow_one_stripe()
2394 if (!sh) in grow_one_stripe()
2397 if (grow_buffers(sh, gfp)) { in grow_one_stripe()
2398 shrink_buffers(sh); in grow_one_stripe()
2399 free_stripe(conf->slab_cache, sh); in grow_one_stripe()
2402 sh->hash_lock_index = in grow_one_stripe()
2407 raid5_release_stripe(sh); in grow_one_stripe()
2691 struct stripe_head *sh; in drop_one_stripe() local
2695 sh = get_free_stripe(conf, hash); in drop_one_stripe()
2697 if (!sh) in drop_one_stripe()
2699 BUG_ON(atomic_read(&sh->count)); in drop_one_stripe()
2700 shrink_buffers(sh); in drop_one_stripe()
2701 free_stripe(conf->slab_cache, sh); in drop_one_stripe()
2719 struct stripe_head *sh = bi->bi_private; in raid5_end_read_request() local
2720 struct r5conf *conf = sh->raid_conf; in raid5_end_read_request()
2721 int disks = sh->disks, i; in raid5_end_read_request()
2726 if (bi == &sh->dev[i].req) in raid5_end_read_request()
2730 (unsigned long long)sh->sector, i, atomic_read(&sh->count), in raid5_end_read_request()
2736 if (test_bit(R5_ReadRepl, &sh->dev[i].flags)) in raid5_end_read_request()
2746 if (use_new_offset(conf, sh)) in raid5_end_read_request()
2747 s = sh->sector + rdev->new_data_offset; in raid5_end_read_request()
2749 s = sh->sector + rdev->data_offset; in raid5_end_read_request()
2751 set_bit(R5_UPTODATE, &sh->dev[i].flags); in raid5_end_read_request()
2752 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { in raid5_end_read_request()
2763 clear_bit(R5_ReadError, &sh->dev[i].flags); in raid5_end_read_request()
2764 clear_bit(R5_ReWrite, &sh->dev[i].flags); in raid5_end_read_request()
2765 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) in raid5_end_read_request()
2766 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); in raid5_end_read_request()
2768 if (test_bit(R5_InJournal, &sh->dev[i].flags)) in raid5_end_read_request()
2773 set_bit(R5_OrigPageUPTDODATE, &sh->dev[i].flags); in raid5_end_read_request()
2781 clear_bit(R5_UPTODATE, &sh->dev[i].flags); in raid5_end_read_request()
2784 if (test_bit(R5_ReadRepl, &sh->dev[i].flags)) in raid5_end_read_request()
2797 } else if (test_bit(R5_ReWrite, &sh->dev[i].flags)) { in raid5_end_read_request()
2818 && !test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) in raid5_end_read_request()
2821 if (sh->qd_idx >= 0 && sh->pd_idx == i) in raid5_end_read_request()
2822 set_bit(R5_ReadError, &sh->dev[i].flags); in raid5_end_read_request()
2823 else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) { in raid5_end_read_request()
2824 set_bit(R5_ReadError, &sh->dev[i].flags); in raid5_end_read_request()
2825 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags); in raid5_end_read_request()
2827 set_bit(R5_ReadNoMerge, &sh->dev[i].flags); in raid5_end_read_request()
2829 clear_bit(R5_ReadError, &sh->dev[i].flags); in raid5_end_read_request()
2830 clear_bit(R5_ReWrite, &sh->dev[i].flags); in raid5_end_read_request()
2834 rdev, sh->sector, RAID5_STRIPE_SECTORS(conf), 0))) in raid5_end_read_request()
2840 clear_bit(R5_LOCKED, &sh->dev[i].flags); in raid5_end_read_request()
2841 set_bit(STRIPE_HANDLE, &sh->state); in raid5_end_read_request()
2842 raid5_release_stripe(sh); in raid5_end_read_request()
2847 struct stripe_head *sh = bi->bi_private; in raid5_end_write_request() local
2848 struct r5conf *conf = sh->raid_conf; in raid5_end_write_request()
2849 int disks = sh->disks, i; in raid5_end_write_request()
2854 if (bi == &sh->dev[i].req) { in raid5_end_write_request()
2858 if (bi == &sh->dev[i].rreq) { in raid5_end_write_request()
2872 (unsigned long long)sh->sector, i, atomic_read(&sh->count), in raid5_end_write_request()
2882 else if (rdev_has_badblock(rdev, sh->sector, in raid5_end_write_request()
2884 set_bit(R5_MadeGoodRepl, &sh->dev[i].flags); in raid5_end_write_request()
2887 set_bit(STRIPE_DEGRADED, &sh->state); in raid5_end_write_request()
2889 set_bit(R5_WriteError, &sh->dev[i].flags); in raid5_end_write_request()
2893 } else if (rdev_has_badblock(rdev, sh->sector, in raid5_end_write_request()
2895 set_bit(R5_MadeGood, &sh->dev[i].flags); in raid5_end_write_request()
2896 if (test_bit(R5_ReadError, &sh->dev[i].flags)) in raid5_end_write_request()
2901 set_bit(R5_ReWrite, &sh->dev[i].flags); in raid5_end_write_request()
2906 if (sh->batch_head && bi->bi_status && !replacement) in raid5_end_write_request()
2907 set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state); in raid5_end_write_request()
2910 if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags)) in raid5_end_write_request()
2911 clear_bit(R5_LOCKED, &sh->dev[i].flags); in raid5_end_write_request()
2912 set_bit(STRIPE_HANDLE, &sh->state); in raid5_end_write_request()
2914 if (sh->batch_head && sh != sh->batch_head) in raid5_end_write_request()
2915 raid5_release_stripe(sh->batch_head); in raid5_end_write_request()
2916 raid5_release_stripe(sh); in raid5_end_write_request()
2959 struct stripe_head *sh) in raid5_compute_sector() argument
3147 if (sh) { in raid5_compute_sector()
3148 sh->pd_idx = pd_idx; in raid5_compute_sector()
3149 sh->qd_idx = qd_idx; in raid5_compute_sector()
3150 sh->ddf_layout = ddf_layout; in raid5_compute_sector()
3159 sector_t raid5_compute_blocknr(struct stripe_head *sh, int i, int previous) in raid5_compute_blocknr() argument
3161 struct r5conf *conf = sh->raid_conf; in raid5_compute_blocknr()
3162 int raid_disks = sh->disks; in raid5_compute_blocknr()
3164 sector_t new_sector = sh->sector, check; in raid5_compute_blocknr()
3179 if (i == sh->pd_idx) in raid5_compute_blocknr()
3187 if (i > sh->pd_idx) in raid5_compute_blocknr()
3192 if (i < sh->pd_idx) in raid5_compute_blocknr()
3194 i -= (sh->pd_idx + 1); in raid5_compute_blocknr()
3206 if (i == sh->qd_idx) in raid5_compute_blocknr()
3213 if (sh->pd_idx == raid_disks-1) in raid5_compute_blocknr()
3215 else if (i > sh->pd_idx) in raid5_compute_blocknr()
3220 if (sh->pd_idx == raid_disks-1) in raid5_compute_blocknr()
3224 if (i < sh->pd_idx) in raid5_compute_blocknr()
3226 i -= (sh->pd_idx + 2); in raid5_compute_blocknr()
3236 if (sh->pd_idx == 0) in raid5_compute_blocknr()
3240 if (i < sh->pd_idx) in raid5_compute_blocknr()
3242 i -= (sh->pd_idx + 1); in raid5_compute_blocknr()
3247 if (i > sh->pd_idx) in raid5_compute_blocknr()
3252 if (i < sh->pd_idx) in raid5_compute_blocknr()
3254 i -= (sh->pd_idx + 1); in raid5_compute_blocknr()
3270 if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx in raid5_compute_blocknr()
3271 || sh2.qd_idx != sh->qd_idx) { in raid5_compute_blocknr()
3336 schedule_reconstruction(struct stripe_head *sh, struct stripe_head_state *s, in schedule_reconstruction() argument
3339 int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx, disks = sh->disks; in schedule_reconstruction()
3340 struct r5conf *conf = sh->raid_conf; in schedule_reconstruction()
3350 r5c_release_extra_page(sh); in schedule_reconstruction()
3353 struct r5dev *dev = &sh->dev[i]; in schedule_reconstruction()
3374 sh->reconstruct_state = reconstruct_state_drain_run; in schedule_reconstruction()
3377 sh->reconstruct_state = reconstruct_state_run; in schedule_reconstruction()
3382 if (!test_and_set_bit(STRIPE_FULL_WRITE, &sh->state)) in schedule_reconstruction()
3385 BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) || in schedule_reconstruction()
3386 test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags))); in schedule_reconstruction()
3388 (!(test_bit(R5_UPTODATE, &sh->dev[qd_idx].flags) || in schedule_reconstruction()
3389 test_bit(R5_Wantcompute, &sh->dev[qd_idx].flags)))); in schedule_reconstruction()
3392 struct r5dev *dev = &sh->dev[i]; in schedule_reconstruction()
3411 sh->reconstruct_state = reconstruct_state_prexor_drain_run; in schedule_reconstruction()
3420 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); in schedule_reconstruction()
3421 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); in schedule_reconstruction()
3425 int qd_idx = sh->qd_idx; in schedule_reconstruction()
3426 struct r5dev *dev = &sh->dev[qd_idx]; in schedule_reconstruction()
3433 if (raid5_has_ppl(sh->raid_conf) && sh->ppl_page && in schedule_reconstruction()
3435 !test_bit(STRIPE_FULL_WRITE, &sh->state) && in schedule_reconstruction()
3436 test_bit(R5_Insync, &sh->dev[pd_idx].flags)) in schedule_reconstruction()
3440 __func__, (unsigned long long)sh->sector, in schedule_reconstruction()
3444 static bool stripe_bio_overlaps(struct stripe_head *sh, struct bio *bi, in stripe_bio_overlaps() argument
3447 struct r5conf *conf = sh->raid_conf; in stripe_bio_overlaps()
3451 bi->bi_iter.bi_sector, sh->sector); in stripe_bio_overlaps()
3454 if (sh->batch_head) in stripe_bio_overlaps()
3458 bip = &sh->dev[dd_idx].towrite; in stripe_bio_overlaps()
3460 bip = &sh->dev[dd_idx].toread; in stripe_bio_overlaps()
3485 for (i = 0; i < sh->disks; i++) { in stripe_bio_overlaps()
3486 if (i != sh->pd_idx && in stripe_bio_overlaps()
3487 (i == dd_idx || sh->dev[i].towrite)) { in stripe_bio_overlaps()
3488 sector = sh->dev[i].sector; in stripe_bio_overlaps()
3504 static void __add_stripe_bio(struct stripe_head *sh, struct bio *bi, in __add_stripe_bio() argument
3507 struct r5conf *conf = sh->raid_conf; in __add_stripe_bio()
3512 bip = &sh->dev[dd_idx].towrite; in __add_stripe_bio()
3516 bip = &sh->dev[dd_idx].toread; in __add_stripe_bio()
3523 clear_bit(STRIPE_BATCH_READY, &sh->state); in __add_stripe_bio()
3534 sector_t sector = sh->dev[dd_idx].sector; in __add_stripe_bio()
3535 for (bi=sh->dev[dd_idx].towrite; in __add_stripe_bio()
3536 sector < sh->dev[dd_idx].sector + RAID5_STRIPE_SECTORS(conf) && in __add_stripe_bio()
3538 bi = r5_next_bio(conf, bi, sh->dev[dd_idx].sector)) { in __add_stripe_bio()
3542 if (sector >= sh->dev[dd_idx].sector + RAID5_STRIPE_SECTORS(conf)) in __add_stripe_bio()
3543 if (!test_and_set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags)) in __add_stripe_bio()
3544 sh->overwrite_disks++; in __add_stripe_bio()
3548 (*bip)->bi_iter.bi_sector, sh->sector, dd_idx, in __add_stripe_bio()
3549 sh->dev[dd_idx].sector); in __add_stripe_bio()
3564 set_bit(STRIPE_BITMAP_PENDING, &sh->state); in __add_stripe_bio()
3565 spin_unlock_irq(&sh->stripe_lock); in __add_stripe_bio()
3566 conf->mddev->bitmap_ops->startwrite(conf->mddev, sh->sector, in __add_stripe_bio()
3568 spin_lock_irq(&sh->stripe_lock); in __add_stripe_bio()
3569 clear_bit(STRIPE_BITMAP_PENDING, &sh->state); in __add_stripe_bio()
3570 if (!sh->batch_head) { in __add_stripe_bio()
3571 sh->bm_seq = conf->seq_flush+1; in __add_stripe_bio()
3572 set_bit(STRIPE_BIT_DELAY, &sh->state); in __add_stripe_bio()
3582 static bool add_stripe_bio(struct stripe_head *sh, struct bio *bi, in add_stripe_bio() argument
3585 spin_lock_irq(&sh->stripe_lock); in add_stripe_bio()
3587 if (stripe_bio_overlaps(sh, bi, dd_idx, forwrite)) { in add_stripe_bio()
3588 set_bit(R5_Overlap, &sh->dev[dd_idx].flags); in add_stripe_bio()
3589 spin_unlock_irq(&sh->stripe_lock); in add_stripe_bio()
3593 __add_stripe_bio(sh, bi, dd_idx, forwrite, previous); in add_stripe_bio()
3594 spin_unlock_irq(&sh->stripe_lock); in add_stripe_bio()
3601 struct stripe_head *sh) in stripe_set_idx() argument
3613 &dd_idx, sh); in stripe_set_idx()
3617 handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, in handle_failed_stripe() argument
3621 BUG_ON(sh->batch_head); in handle_failed_stripe()
3626 if (test_bit(R5_ReadError, &sh->dev[i].flags)) { in handle_failed_stripe()
3637 sh->sector, in handle_failed_stripe()
3643 spin_lock_irq(&sh->stripe_lock); in handle_failed_stripe()
3645 bi = sh->dev[i].towrite; in handle_failed_stripe()
3646 sh->dev[i].towrite = NULL; in handle_failed_stripe()
3647 sh->overwrite_disks = 0; in handle_failed_stripe()
3648 spin_unlock_irq(&sh->stripe_lock); in handle_failed_stripe()
3652 log_stripe_write_finished(sh); in handle_failed_stripe()
3654 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) in handle_failed_stripe()
3655 wake_up_bit(&sh->dev[i].flags, R5_Overlap); in handle_failed_stripe()
3658 sh->dev[i].sector + RAID5_STRIPE_SECTORS(conf)) { in handle_failed_stripe()
3659 struct bio *nextbi = r5_next_bio(conf, bi, sh->dev[i].sector); in handle_failed_stripe()
3667 sh->sector, RAID5_STRIPE_SECTORS(conf), in handle_failed_stripe()
3671 bi = sh->dev[i].written; in handle_failed_stripe()
3672 sh->dev[i].written = NULL; in handle_failed_stripe()
3673 if (test_and_clear_bit(R5_SkipCopy, &sh->dev[i].flags)) { in handle_failed_stripe()
3674 WARN_ON(test_bit(R5_UPTODATE, &sh->dev[i].flags)); in handle_failed_stripe()
3675 sh->dev[i].page = sh->dev[i].orig_page; in handle_failed_stripe()
3680 sh->dev[i].sector + RAID5_STRIPE_SECTORS(conf)) { in handle_failed_stripe()
3681 struct bio *bi2 = r5_next_bio(conf, bi, sh->dev[i].sector); in handle_failed_stripe()
3691 if (!test_bit(R5_Wantfill, &sh->dev[i].flags) && in handle_failed_stripe()
3693 (!test_bit(R5_Insync, &sh->dev[i].flags) || in handle_failed_stripe()
3694 test_bit(R5_ReadError, &sh->dev[i].flags))) { in handle_failed_stripe()
3695 spin_lock_irq(&sh->stripe_lock); in handle_failed_stripe()
3696 bi = sh->dev[i].toread; in handle_failed_stripe()
3697 sh->dev[i].toread = NULL; in handle_failed_stripe()
3698 spin_unlock_irq(&sh->stripe_lock); in handle_failed_stripe()
3699 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) in handle_failed_stripe()
3700 wake_up_bit(&sh->dev[i].flags, R5_Overlap); in handle_failed_stripe()
3704 sh->dev[i].sector + RAID5_STRIPE_SECTORS(conf)) { in handle_failed_stripe()
3706 r5_next_bio(conf, bi, sh->dev[i].sector); in handle_failed_stripe()
3714 sh->sector, RAID5_STRIPE_SECTORS(conf), in handle_failed_stripe()
3719 clear_bit(R5_LOCKED, &sh->dev[i].flags); in handle_failed_stripe()
3724 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) in handle_failed_stripe()
3730 handle_failed_sync(struct r5conf *conf, struct stripe_head *sh, in handle_failed_sync() argument
3736 BUG_ON(sh->batch_head); in handle_failed_sync()
3737 clear_bit(STRIPE_SYNCING, &sh->state); in handle_failed_sync()
3738 if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) in handle_failed_sync()
3739 wake_up_bit(&sh->dev[sh->pd_idx].flags, R5_Overlap); in handle_failed_sync()
3759 && !rdev_set_badblocks(rdev, sh->sector, in handle_failed_sync()
3767 && !rdev_set_badblocks(rdev, sh->sector, in handle_failed_sync()
3778 static int want_replace(struct stripe_head *sh, int disk_idx) in want_replace() argument
3783 rdev = sh->raid_conf->disks[disk_idx].replacement; in want_replace()
3787 && (rdev->recovery_offset <= sh->sector in want_replace()
3788 || rdev->mddev->recovery_cp <= sh->sector)) in want_replace()
3793 static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s, in need_this_block() argument
3796 struct r5dev *dev = &sh->dev[disk_idx]; in need_this_block()
3797 struct r5dev *fdev[2] = { &sh->dev[s->failed_num[0]], in need_this_block()
3798 &sh->dev[s->failed_num[1]] }; in need_this_block()
3800 bool force_rcw = (sh->raid_conf->rmw_level == PARITY_DISABLE_RMW); in need_this_block()
3816 (s->replacing && want_replace(sh, disk_idx))) in need_this_block()
3841 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in need_this_block()
3862 s->failed_num[i] == sh->pd_idx || in need_this_block()
3863 s->failed_num[i] == sh->qd_idx) && in need_this_block()
3880 sh->sector < sh->raid_conf->mddev->recovery_cp) in need_this_block()
3884 if (s->failed_num[i] != sh->pd_idx && in need_this_block()
3885 s->failed_num[i] != sh->qd_idx && in need_this_block()
3900 static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s, in fetch_block() argument
3903 struct r5dev *dev = &sh->dev[disk_idx]; in fetch_block()
3906 if (need_this_block(sh, s, disk_idx, disks)) { in fetch_block()
3912 BUG_ON(sh->batch_head); in fetch_block()
3924 ((sh->qd_idx >= 0 && sh->pd_idx == disk_idx) || in fetch_block()
3931 (unsigned long long)sh->sector, disk_idx); in fetch_block()
3932 set_bit(STRIPE_COMPUTE_RUN, &sh->state); in fetch_block()
3935 sh->ops.target = disk_idx; in fetch_block()
3936 sh->ops.target2 = -1; /* no 2nd target */ in fetch_block()
3955 &sh->dev[other].flags)) in fetch_block()
3960 (unsigned long long)sh->sector, in fetch_block()
3962 set_bit(STRIPE_COMPUTE_RUN, &sh->state); in fetch_block()
3964 set_bit(R5_Wantcompute, &sh->dev[disk_idx].flags); in fetch_block()
3965 set_bit(R5_Wantcompute, &sh->dev[other].flags); in fetch_block()
3966 sh->ops.target = disk_idx; in fetch_block()
3967 sh->ops.target2 = other; in fetch_block()
3986 static void handle_stripe_fill(struct stripe_head *sh, in handle_stripe_fill() argument
3996 if (!test_bit(STRIPE_COMPUTE_RUN, &sh->state) && !sh->check_state && in handle_stripe_fill()
3997 !sh->reconstruct_state) { in handle_stripe_fill()
4007 if (test_bit(STRIPE_R5C_CACHING, &sh->state)) in handle_stripe_fill()
4008 r5c_make_stripe_write_out(sh); in handle_stripe_fill()
4013 if (fetch_block(sh, s, i, disks)) in handle_stripe_fill()
4017 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe_fill()
4028 struct stripe_head *sh, int disks) in handle_stripe_clean_event() argument
4033 struct stripe_head *head_sh = sh; in handle_stripe_clean_event()
4037 if (sh->dev[i].written) { in handle_stripe_clean_event()
4038 dev = &sh->dev[i]; in handle_stripe_clean_event()
4065 sh->sector, RAID5_STRIPE_SECTORS(conf), in handle_stripe_clean_event()
4066 !test_bit(STRIPE_DEGRADED, &sh->state), in handle_stripe_clean_event()
4069 sh = list_first_entry(&sh->batch_list, in handle_stripe_clean_event()
4072 if (sh != head_sh) { in handle_stripe_clean_event()
4073 dev = &sh->dev[i]; in handle_stripe_clean_event()
4077 sh = head_sh; in handle_stripe_clean_event()
4078 dev = &sh->dev[i]; in handle_stripe_clean_event()
4083 log_stripe_write_finished(sh); in handle_stripe_clean_event()
4086 test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) { in handle_stripe_clean_event()
4088 clear_bit(R5_Discard, &sh->dev[sh->pd_idx].flags); in handle_stripe_clean_event()
4089 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); in handle_stripe_clean_event()
4090 if (sh->qd_idx >= 0) { in handle_stripe_clean_event()
4091 clear_bit(R5_Discard, &sh->dev[sh->qd_idx].flags); in handle_stripe_clean_event()
4092 clear_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags); in handle_stripe_clean_event()
4095 clear_bit(STRIPE_DISCARD, &sh->state); in handle_stripe_clean_event()
4102 hash = sh->hash_lock_index; in handle_stripe_clean_event()
4104 remove_hash(sh); in handle_stripe_clean_event()
4107 sh = list_first_entry(&sh->batch_list, in handle_stripe_clean_event()
4109 if (sh != head_sh) in handle_stripe_clean_event()
4112 sh = head_sh; in handle_stripe_clean_event()
4114 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state)) in handle_stripe_clean_event()
4115 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe_clean_event()
4119 if (test_and_clear_bit(STRIPE_FULL_WRITE, &sh->state)) in handle_stripe_clean_event()
4143 struct stripe_head *sh, in handle_stripe_dirtying() argument
4158 (recovery_cp < MaxSector && sh->sector >= recovery_cp && in handle_stripe_dirtying()
4164 pr_debug("force RCW rmw_level=%u, recovery_cp=%llu sh->sector=%llu\n", in handle_stripe_dirtying()
4166 (unsigned long long)sh->sector); in handle_stripe_dirtying()
4169 struct r5dev *dev = &sh->dev[i]; in handle_stripe_dirtying()
4171 i == sh->pd_idx || i == sh->qd_idx || in handle_stripe_dirtying()
4183 i != sh->pd_idx && i != sh->qd_idx && in handle_stripe_dirtying()
4195 (unsigned long long)sh->sector, sh->state, rmw, rcw); in handle_stripe_dirtying()
4196 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe_dirtying()
4200 sh->sector, rmw); in handle_stripe_dirtying()
4203 struct r5dev *dev = &sh->dev[i]; in handle_stripe_dirtying()
4206 !test_bit(R5_LOCKED, &sh->dev[sh->pd_idx].flags)) { in handle_stripe_dirtying()
4221 r5c_use_extra_page(sh); in handle_stripe_dirtying()
4226 set_bit(STRIPE_DELAYED, &sh->state); in handle_stripe_dirtying()
4233 struct r5dev *dev = &sh->dev[i]; in handle_stripe_dirtying()
4235 i == sh->pd_idx || i == sh->qd_idx || in handle_stripe_dirtying()
4242 &sh->state)) { in handle_stripe_dirtying()
4249 set_bit(STRIPE_DELAYED, &sh->state); in handle_stripe_dirtying()
4258 struct r5dev *dev = &sh->dev[i]; in handle_stripe_dirtying()
4260 i != sh->pd_idx && i != sh->qd_idx && in handle_stripe_dirtying()
4267 &sh->state)) { in handle_stripe_dirtying()
4275 set_bit(STRIPE_DELAYED, &sh->state); in handle_stripe_dirtying()
4281 (unsigned long long)sh->sector, rcw, qread, in handle_stripe_dirtying()
4282 test_bit(STRIPE_DELAYED, &sh->state)); in handle_stripe_dirtying()
4286 !test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in handle_stripe_dirtying()
4287 set_bit(STRIPE_DELAYED, &sh->state); in handle_stripe_dirtying()
4299 if ((s->req_compute || !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) && in handle_stripe_dirtying()
4301 !test_bit(STRIPE_BIT_DELAY, &sh->state))) in handle_stripe_dirtying()
4302 schedule_reconstruction(sh, s, rcw == 0, 0); in handle_stripe_dirtying()
4306 static void handle_parity_checks5(struct r5conf *conf, struct stripe_head *sh, in handle_parity_checks5() argument
4311 BUG_ON(sh->batch_head); in handle_parity_checks5()
4312 set_bit(STRIPE_HANDLE, &sh->state); in handle_parity_checks5()
4314 switch (sh->check_state) { in handle_parity_checks5()
4319 sh->check_state = check_state_run; in handle_parity_checks5()
4321 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); in handle_parity_checks5()
4325 dev = &sh->dev[s->failed_num[0]]; in handle_parity_checks5()
4328 sh->check_state = check_state_idle; in handle_parity_checks5()
4330 dev = &sh->dev[sh->pd_idx]; in handle_parity_checks5()
4333 if (test_bit(STRIPE_INSYNC, &sh->state)) in handle_parity_checks5()
4344 clear_bit(STRIPE_DEGRADED, &sh->state); in handle_parity_checks5()
4345 set_bit(STRIPE_INSYNC, &sh->state); in handle_parity_checks5()
4350 sh->check_state = check_state_idle; in handle_parity_checks5()
4362 if ((sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) == 0) in handle_parity_checks5()
4366 set_bit(STRIPE_INSYNC, &sh->state); in handle_parity_checks5()
4371 set_bit(STRIPE_INSYNC, &sh->state); in handle_parity_checks5()
4374 (unsigned long long) sh->sector, in handle_parity_checks5()
4375 (unsigned long long) sh->sector + in handle_parity_checks5()
4378 sh->check_state = check_state_compute_run; in handle_parity_checks5()
4379 set_bit(STRIPE_COMPUTE_RUN, &sh->state); in handle_parity_checks5()
4382 &sh->dev[sh->pd_idx].flags); in handle_parity_checks5()
4383 sh->ops.target = sh->pd_idx; in handle_parity_checks5()
4384 sh->ops.target2 = -1; in handle_parity_checks5()
4393 __func__, sh->check_state, in handle_parity_checks5()
4394 (unsigned long long) sh->sector); in handle_parity_checks5()
4399 static void handle_parity_checks6(struct r5conf *conf, struct stripe_head *sh, in handle_parity_checks6() argument
4403 int pd_idx = sh->pd_idx; in handle_parity_checks6()
4404 int qd_idx = sh->qd_idx; in handle_parity_checks6()
4407 BUG_ON(sh->batch_head); in handle_parity_checks6()
4408 set_bit(STRIPE_HANDLE, &sh->state); in handle_parity_checks6()
4418 switch (sh->check_state) { in handle_parity_checks6()
4426 sh->check_state = check_state_run; in handle_parity_checks6()
4432 if (sh->check_state == check_state_run) in handle_parity_checks6()
4433 sh->check_state = check_state_run_pq; in handle_parity_checks6()
4435 sh->check_state = check_state_run_q; in handle_parity_checks6()
4439 sh->ops.zero_sum_result = 0; in handle_parity_checks6()
4441 if (sh->check_state == check_state_run) { in handle_parity_checks6()
4443 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); in handle_parity_checks6()
4446 if (sh->check_state >= check_state_run && in handle_parity_checks6()
4447 sh->check_state <= check_state_run_pq) { in handle_parity_checks6()
4459 sh->check_state = check_state_idle; in handle_parity_checks6()
4462 if (test_bit(STRIPE_INSYNC, &sh->state)) in handle_parity_checks6()
4470 dev = &sh->dev[s->failed_num[1]]; in handle_parity_checks6()
4476 dev = &sh->dev[s->failed_num[0]]; in handle_parity_checks6()
4481 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) { in handle_parity_checks6()
4482 dev = &sh->dev[pd_idx]; in handle_parity_checks6()
4487 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) { in handle_parity_checks6()
4488 dev = &sh->dev[qd_idx]; in handle_parity_checks6()
4496 dev - (struct r5dev *) &sh->dev)) { in handle_parity_checks6()
4501 clear_bit(STRIPE_DEGRADED, &sh->state); in handle_parity_checks6()
4503 set_bit(STRIPE_INSYNC, &sh->state); in handle_parity_checks6()
4510 sh->check_state = check_state_idle; in handle_parity_checks6()
4516 if (sh->ops.zero_sum_result == 0) { in handle_parity_checks6()
4519 set_bit(STRIPE_INSYNC, &sh->state); in handle_parity_checks6()
4525 sh->check_state = check_state_compute_result; in handle_parity_checks6()
4536 set_bit(STRIPE_INSYNC, &sh->state); in handle_parity_checks6()
4539 (unsigned long long) sh->sector, in handle_parity_checks6()
4540 (unsigned long long) sh->sector + in handle_parity_checks6()
4543 int *target = &sh->ops.target; in handle_parity_checks6()
4545 sh->ops.target = -1; in handle_parity_checks6()
4546 sh->ops.target2 = -1; in handle_parity_checks6()
4547 sh->check_state = check_state_compute_run; in handle_parity_checks6()
4548 set_bit(STRIPE_COMPUTE_RUN, &sh->state); in handle_parity_checks6()
4550 if (sh->ops.zero_sum_result & SUM_CHECK_P_RESULT) { in handle_parity_checks6()
4552 &sh->dev[pd_idx].flags); in handle_parity_checks6()
4554 target = &sh->ops.target2; in handle_parity_checks6()
4557 if (sh->ops.zero_sum_result & SUM_CHECK_Q_RESULT) { in handle_parity_checks6()
4559 &sh->dev[qd_idx].flags); in handle_parity_checks6()
4570 __func__, sh->check_state, in handle_parity_checks6()
4571 (unsigned long long) sh->sector); in handle_parity_checks6()
4576 static void handle_stripe_expansion(struct r5conf *conf, struct stripe_head *sh) in handle_stripe_expansion() argument
4584 BUG_ON(sh->batch_head); in handle_stripe_expansion()
4585 clear_bit(STRIPE_EXPAND_SOURCE, &sh->state); in handle_stripe_expansion()
4586 for (i = 0; i < sh->disks; i++) in handle_stripe_expansion()
4587 if (i != sh->pd_idx && i != sh->qd_idx) { in handle_stripe_expansion()
4592 sector_t bn = raid5_compute_blocknr(sh, i, 1); in handle_stripe_expansion()
4613 sh->dev[i].page, sh2->dev[dd_idx].offset, in handle_stripe_expansion()
4614 sh->dev[i].offset, RAID5_STRIPE_SIZE(conf), in handle_stripe_expansion()
4649 static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) in analyse_stripe() argument
4651 struct r5conf *conf = sh->raid_conf; in analyse_stripe()
4652 int disks = sh->disks; in analyse_stripe()
4659 s->expanding = test_bit(STRIPE_EXPAND_SOURCE, &sh->state) && !sh->batch_head; in analyse_stripe()
4660 s->expanded = test_bit(STRIPE_EXPAND_READY, &sh->state) && !sh->batch_head; in analyse_stripe()
4670 dev = &sh->dev[i]; in analyse_stripe()
4681 !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) in analyse_stripe()
4710 rdev->recovery_offset >= sh->sector + RAID5_STRIPE_SECTORS(conf) && in analyse_stripe()
4711 !rdev_has_badblock(rdev, sh->sector, in analyse_stripe()
4725 is_bad = rdev_has_badblock(rdev, sh->sector, in analyse_stripe()
4752 else if (sh->sector + RAID5_STRIPE_SECTORS(conf) <= rdev->recovery_offset) in analyse_stripe()
4821 if (test_bit(STRIPE_SYNCING, &sh->state)) { in analyse_stripe()
4831 sh->sector >= conf->mddev->recovery_cp || in analyse_stripe()
4843 static int clear_batch_ready(struct stripe_head *sh) in clear_batch_ready() argument
4846 if (!test_and_clear_bit(STRIPE_BATCH_READY, &sh->state)) in clear_batch_ready()
4847 return (sh->batch_head && sh->batch_head != sh); in clear_batch_ready()
4848 spin_lock(&sh->stripe_lock); in clear_batch_ready()
4849 if (!sh->batch_head) { in clear_batch_ready()
4850 spin_unlock(&sh->stripe_lock); in clear_batch_ready()
4858 if (sh->batch_head != sh) { in clear_batch_ready()
4859 spin_unlock(&sh->stripe_lock); in clear_batch_ready()
4862 spin_lock(&sh->batch_lock); in clear_batch_ready()
4863 list_for_each_entry(tmp, &sh->batch_list, batch_list) in clear_batch_ready()
4865 spin_unlock(&sh->batch_lock); in clear_batch_ready()
4866 spin_unlock(&sh->stripe_lock); in clear_batch_ready()
4878 struct stripe_head *sh, *next; in break_stripe_batch_list() local
4881 list_for_each_entry_safe(sh, next, &head_sh->batch_list, batch_list) { in break_stripe_batch_list()
4883 list_del_init(&sh->batch_list); in break_stripe_batch_list()
4885 WARN_ONCE(sh->state & ((1 << STRIPE_ACTIVE) | in break_stripe_batch_list()
4897 "stripe state: %lx\n", sh->state); in break_stripe_batch_list()
4902 set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS | in break_stripe_batch_list()
4908 sh->check_state = head_sh->check_state; in break_stripe_batch_list()
4909 sh->reconstruct_state = head_sh->reconstruct_state; in break_stripe_batch_list()
4910 spin_lock_irq(&sh->stripe_lock); in break_stripe_batch_list()
4911 sh->batch_head = NULL; in break_stripe_batch_list()
4912 spin_unlock_irq(&sh->stripe_lock); in break_stripe_batch_list()
4913 for (i = 0; i < sh->disks; i++) { in break_stripe_batch_list()
4914 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) in break_stripe_batch_list()
4915 wake_up_bit(&sh->dev[i].flags, R5_Overlap); in break_stripe_batch_list()
4916 sh->dev[i].flags = head_sh->dev[i].flags & in break_stripe_batch_list()
4920 sh->state & handle_flags) in break_stripe_batch_list()
4921 set_bit(STRIPE_HANDLE, &sh->state); in break_stripe_batch_list()
4922 raid5_release_stripe(sh); in break_stripe_batch_list()
4934 static void handle_stripe(struct stripe_head *sh) in handle_stripe() argument
4937 struct r5conf *conf = sh->raid_conf; in handle_stripe()
4940 int disks = sh->disks; in handle_stripe()
4943 clear_bit(STRIPE_HANDLE, &sh->state); in handle_stripe()
4951 if (clear_batch_ready(sh)) in handle_stripe()
4954 if (test_and_set_bit_lock(STRIPE_ACTIVE, &sh->state)) { in handle_stripe()
4957 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe()
4961 if (test_and_clear_bit(STRIPE_BATCH_ERR, &sh->state)) in handle_stripe()
4962 break_stripe_batch_list(sh, 0); in handle_stripe()
4964 if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) && !sh->batch_head) { in handle_stripe()
4965 spin_lock(&sh->stripe_lock); in handle_stripe()
4970 if (!test_bit(STRIPE_R5C_PARTIAL_STRIPE, &sh->state) && in handle_stripe()
4971 !test_bit(STRIPE_R5C_FULL_STRIPE, &sh->state) && in handle_stripe()
4972 !test_bit(STRIPE_DISCARD, &sh->state) && in handle_stripe()
4973 test_and_clear_bit(STRIPE_SYNC_REQUESTED, &sh->state)) { in handle_stripe()
4974 set_bit(STRIPE_SYNCING, &sh->state); in handle_stripe()
4975 clear_bit(STRIPE_INSYNC, &sh->state); in handle_stripe()
4976 clear_bit(STRIPE_REPLACED, &sh->state); in handle_stripe()
4978 spin_unlock(&sh->stripe_lock); in handle_stripe()
4980 clear_bit(STRIPE_DELAYED, &sh->state); in handle_stripe()
4984 (unsigned long long)sh->sector, sh->state, in handle_stripe()
4985 atomic_read(&sh->count), sh->pd_idx, sh->qd_idx, in handle_stripe()
4986 sh->check_state, sh->reconstruct_state); in handle_stripe()
4988 analyse_stripe(sh, &s); in handle_stripe()
4990 if (test_bit(STRIPE_LOG_TRAPPED, &sh->state)) in handle_stripe()
4995 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe()
5002 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe()
5010 if (s.to_fill && !test_bit(STRIPE_BIOFILL_RUN, &sh->state)) { in handle_stripe()
5012 set_bit(STRIPE_BIOFILL_RUN, &sh->state); in handle_stripe()
5028 sh->check_state = 0; in handle_stripe()
5029 sh->reconstruct_state = 0; in handle_stripe()
5030 break_stripe_batch_list(sh, 0); in handle_stripe()
5032 handle_failed_stripe(conf, sh, &s, disks); in handle_stripe()
5034 handle_failed_sync(conf, sh, &s); in handle_stripe()
5041 if (sh->reconstruct_state == reconstruct_state_prexor_drain_result) in handle_stripe()
5043 if (sh->reconstruct_state == reconstruct_state_drain_result || in handle_stripe()
5044 sh->reconstruct_state == reconstruct_state_prexor_drain_result) { in handle_stripe()
5045 sh->reconstruct_state = reconstruct_state_idle; in handle_stripe()
5050 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags) && in handle_stripe()
5051 !test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)); in handle_stripe()
5052 BUG_ON(sh->qd_idx >= 0 && in handle_stripe()
5053 !test_bit(R5_UPTODATE, &sh->dev[sh->qd_idx].flags) && in handle_stripe()
5054 !test_bit(R5_Discard, &sh->dev[sh->qd_idx].flags)); in handle_stripe()
5056 struct r5dev *dev = &sh->dev[i]; in handle_stripe()
5058 (i == sh->pd_idx || i == sh->qd_idx || in handle_stripe()
5068 ((i == sh->pd_idx || i == sh->qd_idx) && in handle_stripe()
5070 set_bit(STRIPE_INSYNC, &sh->state); in handle_stripe()
5073 if (test_and_clear_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in handle_stripe()
5081 pdev = &sh->dev[sh->pd_idx]; in handle_stripe()
5082 s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx) in handle_stripe()
5083 || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx); in handle_stripe()
5084 qdev = &sh->dev[sh->qd_idx]; in handle_stripe()
5085 s.q_failed = (s.failed >= 1 && s.failed_num[0] == sh->qd_idx) in handle_stripe()
5086 || (s.failed >= 2 && s.failed_num[1] == sh->qd_idx) in handle_stripe()
5098 handle_stripe_clean_event(conf, sh, disks); in handle_stripe()
5101 r5c_handle_cached_data_endio(conf, sh, disks); in handle_stripe()
5102 log_stripe_write_finished(sh); in handle_stripe()
5113 handle_stripe_fill(sh, &s, disks); in handle_stripe()
5120 r5c_finish_stripe_write_out(conf, sh, &s); in handle_stripe()
5131 if (!sh->reconstruct_state && !sh->check_state && !sh->log_io) { in handle_stripe()
5134 handle_stripe_dirtying(conf, sh, &s, disks); in handle_stripe()
5140 ret = r5c_try_caching_write(conf, sh, &s, in handle_stripe()
5151 (!test_bit(STRIPE_R5C_CACHING, &sh->state) && in handle_stripe()
5153 ret = handle_stripe_dirtying(conf, sh, &s, in handle_stripe()
5166 if (sh->check_state || in handle_stripe()
5168 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && in handle_stripe()
5169 !test_bit(STRIPE_INSYNC, &sh->state))) { in handle_stripe()
5171 handle_parity_checks6(conf, sh, &s, disks); in handle_stripe()
5173 handle_parity_checks5(conf, sh, &s, disks); in handle_stripe()
5177 && !test_bit(STRIPE_COMPUTE_RUN, &sh->state) in handle_stripe()
5178 && !test_bit(STRIPE_REPLACED, &sh->state)) { in handle_stripe()
5181 if (test_bit(R5_NeedReplace, &sh->dev[i].flags)) { in handle_stripe()
5182 WARN_ON(!test_bit(R5_UPTODATE, &sh->dev[i].flags)); in handle_stripe()
5183 set_bit(R5_WantReplace, &sh->dev[i].flags); in handle_stripe()
5184 set_bit(R5_LOCKED, &sh->dev[i].flags); in handle_stripe()
5188 set_bit(STRIPE_INSYNC, &sh->state); in handle_stripe()
5189 set_bit(STRIPE_REPLACED, &sh->state); in handle_stripe()
5192 !test_bit(STRIPE_COMPUTE_RUN, &sh->state) && in handle_stripe()
5193 test_bit(STRIPE_INSYNC, &sh->state)) { in handle_stripe()
5195 clear_bit(STRIPE_SYNCING, &sh->state); in handle_stripe()
5196 if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) in handle_stripe()
5197 wake_up_bit(&sh->dev[sh->pd_idx].flags, R5_Overlap); in handle_stripe()
5205 struct r5dev *dev = &sh->dev[s.failed_num[i]]; in handle_stripe()
5222 if (sh->reconstruct_state == reconstruct_state_result) { in handle_stripe()
5224 = raid5_get_active_stripe(conf, NULL, sh->sector, in handle_stripe()
5228 /* sh cannot be written until sh_src has been read. in handle_stripe()
5229 * so arrange for sh to be delayed a little in handle_stripe()
5231 set_bit(STRIPE_DELAYED, &sh->state); in handle_stripe()
5232 set_bit(STRIPE_HANDLE, &sh->state); in handle_stripe()
5242 sh->reconstruct_state = reconstruct_state_idle; in handle_stripe()
5243 clear_bit(STRIPE_EXPANDING, &sh->state); in handle_stripe()
5245 set_bit(R5_Wantwrite, &sh->dev[i].flags); in handle_stripe()
5246 set_bit(R5_LOCKED, &sh->dev[i].flags); in handle_stripe()
5251 if (s.expanded && test_bit(STRIPE_EXPANDING, &sh->state) && in handle_stripe()
5252 !sh->reconstruct_state) { in handle_stripe()
5254 sh->disks = conf->raid_disks; in handle_stripe()
5255 stripe_set_idx(sh->sector, conf, 0, sh); in handle_stripe()
5256 schedule_reconstruction(sh, &s, 1, 1); in handle_stripe()
5257 } else if (s.expanded && !sh->reconstruct_state && s.locked == 0) { in handle_stripe()
5258 clear_bit(STRIPE_EXPAND_READY, &sh->state); in handle_stripe()
5265 !test_bit(STRIPE_COMPUTE_RUN, &sh->state)) in handle_stripe()
5266 handle_stripe_expansion(conf, sh); in handle_stripe()
5286 struct r5dev *dev = &sh->dev[i]; in handle_stripe()
5290 if (!rdev_set_badblocks(rdev, sh->sector, in handle_stripe()
5297 rdev_clear_badblocks(rdev, sh->sector, in handle_stripe()
5306 rdev_clear_badblocks(rdev, sh->sector, in handle_stripe()
5313 raid_run_ops(sh, s.ops_request); in handle_stripe()
5315 ops_run_io(sh, &s); in handle_stripe()
5328 clear_bit_unlock(STRIPE_ACTIVE, &sh->state); in handle_stripe()
5337 struct stripe_head *sh; in raid5_activate_delayed() local
5338 sh = list_entry(l, struct stripe_head, lru); in raid5_activate_delayed()
5340 clear_bit(STRIPE_DELAYED, &sh->state); in raid5_activate_delayed()
5341 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in raid5_activate_delayed()
5343 list_add_tail(&sh->lru, &conf->hold_list); in raid5_activate_delayed()
5344 raid5_wakeup_stripe_thread(sh); in raid5_activate_delayed()
5357 struct stripe_head *sh = list_entry(head.next, struct stripe_head, lru); in activate_bit_delay() local
5359 list_del_init(&sh->lru); in activate_bit_delay()
5360 atomic_inc(&sh->count); in activate_bit_delay()
5361 hash = sh->hash_lock_index; in activate_bit_delay()
5362 __release_stripe(conf, sh, &temp_inactive_list[hash]); in activate_bit_delay()
5556 struct stripe_head *sh, *tmp; in __get_priority_stripe() local
5566 sh = NULL; in __get_priority_stripe()
5592 sh = list_entry(handle_list->next, typeof(*sh), lru); in __get_priority_stripe()
5596 else if (!test_bit(STRIPE_IO_STARTED, &sh->state)) { in __get_priority_stripe()
5616 sh = tmp; in __get_priority_stripe()
5621 if (sh) { in __get_priority_stripe()
5629 if (!sh) { in __get_priority_stripe()
5639 sh->group = NULL; in __get_priority_stripe()
5641 list_del_init(&sh->lru); in __get_priority_stripe()
5642 BUG_ON(atomic_inc_return(&sh->count) != 1); in __get_priority_stripe()
5643 return sh; in __get_priority_stripe()
5656 struct stripe_head *sh; in raid5_unplug() local
5665 sh = list_first_entry(&cb->list, struct stripe_head, lru); in raid5_unplug()
5666 list_del_init(&sh->lru); in raid5_unplug()
5673 clear_bit(STRIPE_ON_UNPLUG_LIST, &sh->state); in raid5_unplug()
5678 hash = sh->hash_lock_index; in raid5_unplug()
5679 __release_stripe(conf, sh, &cb->temp_inactive_list[hash]); in raid5_unplug()
5692 struct stripe_head *sh) in release_stripe_plug() argument
5700 raid5_release_stripe(sh); in release_stripe_plug()
5713 if (!test_and_set_bit(STRIPE_ON_UNPLUG_LIST, &sh->state)) in release_stripe_plug()
5714 list_add_tail(&sh->lru, &cb->list); in release_stripe_plug()
5716 raid5_release_stripe(sh); in release_stripe_plug()
5723 struct stripe_head *sh; in make_discard_request() local
5753 sh = raid5_get_active_stripe(conf, NULL, logical_sector, 0); in make_discard_request()
5754 set_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); in make_discard_request()
5755 if (test_bit(STRIPE_SYNCING, &sh->state)) { in make_discard_request()
5756 raid5_release_stripe(sh); in make_discard_request()
5757 wait_on_bit(&sh->dev[sh->pd_idx].flags, R5_Overlap, in make_discard_request()
5761 clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); in make_discard_request()
5762 spin_lock_irq(&sh->stripe_lock); in make_discard_request()
5764 if (d == sh->pd_idx || d == sh->qd_idx) in make_discard_request()
5766 if (sh->dev[d].towrite || sh->dev[d].toread) { in make_discard_request()
5767 set_bit(R5_Overlap, &sh->dev[d].flags); in make_discard_request()
5768 spin_unlock_irq(&sh->stripe_lock); in make_discard_request()
5769 raid5_release_stripe(sh); in make_discard_request()
5770 wait_on_bit(&sh->dev[d].flags, R5_Overlap, in make_discard_request()
5775 set_bit(STRIPE_DISCARD, &sh->state); in make_discard_request()
5776 sh->overwrite_disks = 0; in make_discard_request()
5778 if (d == sh->pd_idx || d == sh->qd_idx) in make_discard_request()
5780 sh->dev[d].towrite = bi; in make_discard_request()
5781 set_bit(R5_OVERWRITE, &sh->dev[d].flags); in make_discard_request()
5784 sh->overwrite_disks++; in make_discard_request()
5786 spin_unlock_irq(&sh->stripe_lock); in make_discard_request()
5790 mddev->bitmap_ops->startwrite(mddev, sh->sector, in make_discard_request()
5792 sh->bm_seq = conf->seq_flush + 1; in make_discard_request()
5793 set_bit(STRIPE_BIT_DELAY, &sh->state); in make_discard_request()
5796 set_bit(STRIPE_HANDLE, &sh->state); in make_discard_request()
5797 clear_bit(STRIPE_DELAYED, &sh->state); in make_discard_request()
5798 if (!test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in make_discard_request()
5800 release_stripe_plug(mddev, sh); in make_discard_request()
5821 struct stripe_head *sh) in stripe_ahead_of_reshape() argument
5827 for (dd_idx = 0; dd_idx < sh->disks; dd_idx++) { in stripe_ahead_of_reshape()
5828 if (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx) in stripe_ahead_of_reshape()
5831 min_sector = min(min_sector, sh->dev[dd_idx].sector); in stripe_ahead_of_reshape()
5832 max_sector = max(max_sector, sh->dev[dd_idx].sector); in stripe_ahead_of_reshape()
5848 struct stripe_request_ctx *ctx, struct stripe_head *sh, in add_all_stripe_bios() argument
5853 spin_lock_irq(&sh->stripe_lock); in add_all_stripe_bios()
5855 for (dd_idx = 0; dd_idx < sh->disks; dd_idx++) { in add_all_stripe_bios()
5856 struct r5dev *dev = &sh->dev[dd_idx]; in add_all_stripe_bios()
5858 if (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx) in add_all_stripe_bios()
5865 if (stripe_bio_overlaps(sh, bi, dd_idx, forwrite)) { in add_all_stripe_bios()
5867 spin_unlock_irq(&sh->stripe_lock); in add_all_stripe_bios()
5868 raid5_release_stripe(sh); in add_all_stripe_bios()
5880 for (dd_idx = 0; dd_idx < sh->disks; dd_idx++) { in add_all_stripe_bios()
5881 struct r5dev *dev = &sh->dev[dd_idx]; in add_all_stripe_bios()
5883 if (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx) in add_all_stripe_bios()
5890 __add_stripe_bio(sh, bi, dd_idx, forwrite, previous); in add_all_stripe_bios()
5895 spin_unlock_irq(&sh->stripe_lock); in add_all_stripe_bios()
5938 struct stripe_head *sh; in make_stripe_request() local
5965 sh = raid5_get_active_stripe(conf, ctx, new_sector, flags); in make_stripe_request()
5966 if (unlikely(!sh)) { in make_stripe_request()
5973 stripe_ahead_of_reshape(mddev, conf, sh)) { in make_stripe_request()
5978 * 'sh', we know that if that happens, in make_stripe_request()
5992 if (test_bit(STRIPE_EXPANDING, &sh->state)) { in make_stripe_request()
5998 if (!add_all_stripe_bios(conf, ctx, sh, bi, rw, previous)) { in make_stripe_request()
6003 if (stripe_can_batch(sh)) { in make_stripe_request()
6004 stripe_add_to_batch_list(conf, sh, ctx->batch_last); in make_stripe_request()
6007 atomic_inc(&sh->count); in make_stripe_request()
6008 ctx->batch_last = sh; in make_stripe_request()
6012 set_bit(STRIPE_R5C_PREFLUSH, &sh->state); in make_stripe_request()
6017 set_bit(STRIPE_HANDLE, &sh->state); in make_stripe_request()
6018 clear_bit(STRIPE_DELAYED, &sh->state); in make_stripe_request()
6019 if ((!sh->batch_head || sh == sh->batch_head) && in make_stripe_request()
6021 !test_and_set_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) in make_stripe_request()
6024 release_stripe_plug(mddev, sh); in make_stripe_request()
6028 raid5_release_stripe(sh); in make_stripe_request()
6048 struct stripe_head sh; in raid5_bio_lowest_chunk_sector() local
6054 sector = raid5_compute_sector(conf, r_sector, 0, &dd_idx, &sh); in raid5_bio_lowest_chunk_sector()
6063 while (dd_idx == sh.pd_idx || dd_idx == sh.qd_idx) in raid5_bio_lowest_chunk_sector()
6222 struct stripe_head *sh; in reshape_request() local
6379 sh = raid5_get_active_stripe(conf, NULL, stripe_addr+i, in reshape_request()
6381 set_bit(STRIPE_EXPANDING, &sh->state); in reshape_request()
6386 for (j=sh->disks; j--;) { in reshape_request()
6388 if (j == sh->pd_idx) in reshape_request()
6391 j == sh->qd_idx) in reshape_request()
6393 s = raid5_compute_blocknr(sh, j, 0); in reshape_request()
6398 memset(page_address(sh->dev[j].page), 0, RAID5_STRIPE_SIZE(conf)); in reshape_request()
6399 set_bit(R5_Expanded, &sh->dev[j].flags); in reshape_request()
6400 set_bit(R5_UPTODATE, &sh->dev[j].flags); in reshape_request()
6403 set_bit(STRIPE_EXPAND_READY, &sh->state); in reshape_request()
6404 set_bit(STRIPE_HANDLE, &sh->state); in reshape_request()
6406 list_add(&sh->lru, &stripes); in reshape_request()
6429 sh = raid5_get_active_stripe(conf, NULL, first_sector, in reshape_request()
6431 set_bit(STRIPE_EXPAND_SOURCE, &sh->state); in reshape_request()
6432 set_bit(STRIPE_HANDLE, &sh->state); in reshape_request()
6433 raid5_release_stripe(sh); in reshape_request()
6440 sh = list_entry(stripes.next, struct stripe_head, lru); in reshape_request()
6441 list_del_init(&sh->lru); in reshape_request()
6442 raid5_release_stripe(sh); in reshape_request()
6491 struct stripe_head *sh; in raid5_sync_request() local
6550 sh = raid5_get_active_stripe(conf, NULL, sector_nr, in raid5_sync_request()
6552 if (sh == NULL) { in raid5_sync_request()
6553 sh = raid5_get_active_stripe(conf, NULL, sector_nr, 0); in raid5_sync_request()
6573 set_bit(STRIPE_SYNC_REQUESTED, &sh->state); in raid5_sync_request()
6574 set_bit(STRIPE_HANDLE, &sh->state); in raid5_sync_request()
6576 raid5_release_stripe(sh); in raid5_sync_request()
6594 struct stripe_head *sh; in retry_aligned_read() local
6615 sh = raid5_get_active_stripe(conf, NULL, sector, in retry_aligned_read()
6617 if (!sh) { in retry_aligned_read()
6624 if (!add_stripe_bio(sh, raid_bio, dd_idx, 0, 0)) { in retry_aligned_read()
6625 raid5_release_stripe(sh); in retry_aligned_read()
6631 set_bit(R5_ReadNoMerge, &sh->dev[dd_idx].flags); in retry_aligned_read()
6632 handle_stripe(sh); in retry_aligned_read()
6633 raid5_release_stripe(sh); in retry_aligned_read()
6649 struct stripe_head *batch[MAX_STRIPE_BATCH], *sh; in handle_active_stripes() local
6654 (sh = __get_priority_stripe(conf, group)) != NULL) in handle_active_stripes()
6655 batch[batch_size++] = sh; in handle_active_stripes()