Lines Matching refs:pd_idx

153 	if (idx == sh->pd_idx)  in raid6_idx_to_slot()
288 WARN_ON(test_bit(R5_InJournal, &sh->dev[sh->pd_idx].flags)); in do_release_stripe()
955 while (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx) in stripe_add_to_batch_list()
1623 if (i == sh->qd_idx || i == sh->pd_idx || in set_syndrome_sources()
1854 int count = 0, pd_idx = sh->pd_idx, i; in ops_run_prexor5() local
1858 unsigned int off_dest = off_srcs[count] = sh->dev[pd_idx].offset; in ops_run_prexor5()
1859 struct page *xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; in ops_run_prexor5()
1987 int pd_idx = sh->pd_idx; in ops_complete_reconstruct() local
2004 if (dev->written || i == pd_idx || i == qd_idx) { in ops_complete_reconstruct()
2038 int count, pd_idx = sh->pd_idx, i; in ops_run_reconstruct5() local
2051 if (pd_idx == i) in ops_run_reconstruct5()
2058 set_bit(R5_Discard, &sh->dev[pd_idx].flags); in ops_run_reconstruct5()
2071 off_dest = off_srcs[count] = sh->dev[pd_idx].offset; in ops_run_reconstruct5()
2072 xor_dest = xor_srcs[count++] = sh->dev[pd_idx].page; in ops_run_reconstruct5()
2082 xor_dest = sh->dev[pd_idx].page; in ops_run_reconstruct5()
2083 off_dest = sh->dev[pd_idx].offset; in ops_run_reconstruct5()
2086 if (i != pd_idx) { in ops_run_reconstruct5()
2144 if (sh->pd_idx == i || sh->qd_idx == i) in ops_run_reconstruct6()
2151 set_bit(R5_Discard, &sh->dev[sh->pd_idx].flags); in ops_run_reconstruct6()
2206 int pd_idx = sh->pd_idx; in ops_run_check_p() local
2222 xor_dest = sh->dev[pd_idx].page; in ops_run_check_p()
2223 off_dest = sh->dev[pd_idx].offset; in ops_run_check_p()
2227 if (i == pd_idx || i == qd_idx) in ops_run_check_p()
2814 if (sh->qd_idx >= 0 && sh->pd_idx == i) in raid5_end_read_request()
2953 int pd_idx, qd_idx; in raid5_compute_sector() local
2981 pd_idx = qd_idx = -1; in raid5_compute_sector()
2984 pd_idx = data_disks; in raid5_compute_sector()
2989 pd_idx = data_disks - sector_div(stripe2, raid_disks); in raid5_compute_sector()
2990 if (*dd_idx >= pd_idx) in raid5_compute_sector()
2994 pd_idx = sector_div(stripe2, raid_disks); in raid5_compute_sector()
2995 if (*dd_idx >= pd_idx) in raid5_compute_sector()
2999 pd_idx = data_disks - sector_div(stripe2, raid_disks); in raid5_compute_sector()
3000 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; in raid5_compute_sector()
3003 pd_idx = sector_div(stripe2, raid_disks); in raid5_compute_sector()
3004 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; in raid5_compute_sector()
3007 pd_idx = 0; in raid5_compute_sector()
3011 pd_idx = data_disks; in raid5_compute_sector()
3021 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); in raid5_compute_sector()
3022 qd_idx = pd_idx + 1; in raid5_compute_sector()
3023 if (pd_idx == raid_disks-1) { in raid5_compute_sector()
3026 } else if (*dd_idx >= pd_idx) in raid5_compute_sector()
3030 pd_idx = sector_div(stripe2, raid_disks); in raid5_compute_sector()
3031 qd_idx = pd_idx + 1; in raid5_compute_sector()
3032 if (pd_idx == raid_disks-1) { in raid5_compute_sector()
3035 } else if (*dd_idx >= pd_idx) in raid5_compute_sector()
3039 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); in raid5_compute_sector()
3040 qd_idx = (pd_idx + 1) % raid_disks; in raid5_compute_sector()
3041 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; in raid5_compute_sector()
3044 pd_idx = sector_div(stripe2, raid_disks); in raid5_compute_sector()
3045 qd_idx = (pd_idx + 1) % raid_disks; in raid5_compute_sector()
3046 *dd_idx = (pd_idx + 2 + *dd_idx) % raid_disks; in raid5_compute_sector()
3050 pd_idx = 0; in raid5_compute_sector()
3055 pd_idx = data_disks; in raid5_compute_sector()
3063 pd_idx = sector_div(stripe2, raid_disks); in raid5_compute_sector()
3064 qd_idx = pd_idx + 1; in raid5_compute_sector()
3065 if (pd_idx == raid_disks-1) { in raid5_compute_sector()
3068 } else if (*dd_idx >= pd_idx) in raid5_compute_sector()
3079 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); in raid5_compute_sector()
3080 qd_idx = pd_idx + 1; in raid5_compute_sector()
3081 if (pd_idx == raid_disks-1) { in raid5_compute_sector()
3084 } else if (*dd_idx >= pd_idx) in raid5_compute_sector()
3091 pd_idx = raid_disks - 1 - sector_div(stripe2, raid_disks); in raid5_compute_sector()
3092 qd_idx = (pd_idx + raid_disks - 1) % raid_disks; in raid5_compute_sector()
3093 *dd_idx = (pd_idx + 1 + *dd_idx) % raid_disks; in raid5_compute_sector()
3099 pd_idx = data_disks - sector_div(stripe2, raid_disks-1); in raid5_compute_sector()
3100 if (*dd_idx >= pd_idx) in raid5_compute_sector()
3106 pd_idx = sector_div(stripe2, raid_disks-1); in raid5_compute_sector()
3107 if (*dd_idx >= pd_idx) in raid5_compute_sector()
3113 pd_idx = data_disks - sector_div(stripe2, raid_disks-1); in raid5_compute_sector()
3114 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); in raid5_compute_sector()
3119 pd_idx = sector_div(stripe2, raid_disks-1); in raid5_compute_sector()
3120 *dd_idx = (pd_idx + 1 + *dd_idx) % (raid_disks-1); in raid5_compute_sector()
3125 pd_idx = 0; in raid5_compute_sector()
3137 sh->pd_idx = pd_idx; in raid5_compute_sector()
3168 if (i == sh->pd_idx) in raid5_compute_blocknr()
3176 if (i > sh->pd_idx) in raid5_compute_blocknr()
3181 if (i < sh->pd_idx) in raid5_compute_blocknr()
3183 i -= (sh->pd_idx + 1); in raid5_compute_blocknr()
3202 if (sh->pd_idx == raid_disks-1) in raid5_compute_blocknr()
3204 else if (i > sh->pd_idx) in raid5_compute_blocknr()
3209 if (sh->pd_idx == raid_disks-1) in raid5_compute_blocknr()
3213 if (i < sh->pd_idx) in raid5_compute_blocknr()
3215 i -= (sh->pd_idx + 2); in raid5_compute_blocknr()
3225 if (sh->pd_idx == 0) in raid5_compute_blocknr()
3229 if (i < sh->pd_idx) in raid5_compute_blocknr()
3231 i -= (sh->pd_idx + 1); in raid5_compute_blocknr()
3236 if (i > sh->pd_idx) in raid5_compute_blocknr()
3241 if (i < sh->pd_idx) in raid5_compute_blocknr()
3243 i -= (sh->pd_idx + 1); in raid5_compute_blocknr()
3259 if (check != sh->sector || dummy1 != dd_idx || sh2.pd_idx != sh->pd_idx in raid5_compute_blocknr()
3328 int i, pd_idx = sh->pd_idx, qd_idx = sh->qd_idx, disks = sh->disks; in schedule_reconstruction() local
3374 BUG_ON(!(test_bit(R5_UPTODATE, &sh->dev[pd_idx].flags) || in schedule_reconstruction()
3375 test_bit(R5_Wantcompute, &sh->dev[pd_idx].flags))); in schedule_reconstruction()
3382 if (i == pd_idx || i == qd_idx) in schedule_reconstruction()
3409 set_bit(R5_LOCKED, &sh->dev[pd_idx].flags); in schedule_reconstruction()
3410 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); in schedule_reconstruction()
3425 test_bit(R5_Insync, &sh->dev[pd_idx].flags)) in schedule_reconstruction()
3475 if (i != sh->pd_idx && in stripe_bio_overlaps()
3693 if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) in handle_failed_sync()
3694 wake_up_bit(&sh->dev[sh->pd_idx].flags, R5_Overlap); in handle_failed_sync()
3822 s->failed_num[i] == sh->pd_idx || in need_this_block()
3844 if (s->failed_num[i] != sh->pd_idx && in need_this_block()
3884 ((sh->qd_idx >= 0 && sh->pd_idx == disk_idx) || in fetch_block()
4045 test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)) { in handle_stripe_clean_event()
4047 clear_bit(R5_Discard, &sh->dev[sh->pd_idx].flags); in handle_stripe_clean_event()
4048 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); in handle_stripe_clean_event()
4137 i == sh->pd_idx || i == sh->qd_idx || in handle_stripe_dirtying()
4149 i != sh->pd_idx && i != sh->qd_idx && in handle_stripe_dirtying()
4172 !test_bit(R5_LOCKED, &sh->dev[sh->pd_idx].flags)) { in handle_stripe_dirtying()
4201 i == sh->pd_idx || i == sh->qd_idx || in handle_stripe_dirtying()
4226 i != sh->pd_idx && i != sh->qd_idx && in handle_stripe_dirtying()
4287 clear_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags); in handle_parity_checks5()
4296 dev = &sh->dev[sh->pd_idx]; in handle_parity_checks5()
4347 &sh->dev[sh->pd_idx].flags); in handle_parity_checks5()
4348 sh->ops.target = sh->pd_idx; in handle_parity_checks5()
4368 int pd_idx = sh->pd_idx; in handle_parity_checks6() local
4408 clear_bit(R5_UPTODATE, &sh->dev[pd_idx].flags); in handle_parity_checks6()
4447 dev = &sh->dev[pd_idx]; in handle_parity_checks6()
4516 &sh->dev[pd_idx].flags); in handle_parity_checks6()
4517 *target = pd_idx; in handle_parity_checks6()
4551 if (i != sh->pd_idx && i != sh->qd_idx) { in handle_stripe_expansion()
4584 if (j != sh2->pd_idx && in handle_stripe_expansion()
4955 atomic_read(&sh->count), sh->pd_idx, sh->qd_idx, in handle_stripe()
5021 BUG_ON(!test_bit(R5_UPTODATE, &sh->dev[sh->pd_idx].flags) && in handle_stripe()
5022 !test_bit(R5_Discard, &sh->dev[sh->pd_idx].flags)); in handle_stripe()
5029 (i == sh->pd_idx || i == sh->qd_idx || in handle_stripe()
5039 ((i == sh->pd_idx || i == sh->qd_idx) && in handle_stripe()
5052 pdev = &sh->dev[sh->pd_idx]; in handle_stripe()
5053 s.p_failed = (s.failed >= 1 && s.failed_num[0] == sh->pd_idx) in handle_stripe()
5054 || (s.failed >= 2 && s.failed_num[1] == sh->pd_idx); in handle_stripe()
5167 if (test_and_clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags)) in handle_stripe()
5168 wake_up_bit(&sh->dev[sh->pd_idx].flags, R5_Overlap); in handle_stripe()
5724 set_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); in make_discard_request()
5727 wait_on_bit(&sh->dev[sh->pd_idx].flags, R5_Overlap, in make_discard_request()
5731 clear_bit(R5_Overlap, &sh->dev[sh->pd_idx].flags); in make_discard_request()
5734 if (d == sh->pd_idx || d == sh->qd_idx) in make_discard_request()
5748 if (d == sh->pd_idx || d == sh->qd_idx) in make_discard_request()
5794 if (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx) in stripe_ahead_of_reshape()
5824 if (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx) in add_all_stripe_bios()
5849 if (dd_idx == sh->pd_idx || dd_idx == sh->qd_idx) in add_all_stripe_bios()
6077 while (dd_idx == sh.pd_idx || dd_idx == sh.qd_idx) in raid5_bio_lowest_chunk_sector()
6413 if (j == sh->pd_idx) in reshape_request()