Lines Matching refs:r10_bio

76 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio);
134 struct r10bio *r10_bio;
140 r10_bio = r10bio_pool_alloc(gfp_flags, conf);
141 if (!r10_bio)
167 r10_bio->devs[j].bio = bio;
174 r10_bio->devs[j].repl_bio = bio;
181 struct bio *rbio = r10_bio->devs[j].repl_bio;
188 bio = r10_bio->devs[j].bio;
199 rp->raid_bio = r10_bio;
207 return r10_bio;
216 if (r10_bio->devs[j].bio)
217 bio_uninit(r10_bio->devs[j].bio);
218 kfree(r10_bio->devs[j].bio);
219 if (r10_bio->devs[j].repl_bio)
220 bio_uninit(r10_bio->devs[j].repl_bio);
221 kfree(r10_bio->devs[j].repl_bio);
225 rbio_pool_free(r10_bio, conf);
259 static void put_all_bios(struct r10conf *conf, struct r10bio *r10_bio)
264 struct bio **bio = & r10_bio->devs[i].bio;
268 bio = &r10_bio->devs[i].repl_bio;
269 if (r10_bio->read_slot < 0 && !BIO_SPECIAL(*bio))
275 static void free_r10bio(struct r10bio *r10_bio)
277 struct r10conf *conf = r10_bio->mddev->private;
279 put_all_bios(conf, r10_bio);
280 mempool_free(r10_bio, &conf->r10bio_pool);
283 static void put_buf(struct r10bio *r10_bio)
285 struct r10conf *conf = r10_bio->mddev->private;
287 mempool_free(r10_bio, &conf->r10buf_pool);
298 static void reschedule_retry(struct r10bio *r10_bio)
301 struct mddev *mddev = r10_bio->mddev;
305 list_add(&r10_bio->retry_list, &conf->retry_list);
320 static void raid_end_bio_io(struct r10bio *r10_bio)
322 struct bio *bio = r10_bio->master_bio;
323 struct r10conf *conf = r10_bio->mddev->private;
325 if (!test_and_set_bit(R10BIO_Returned, &r10_bio->state)) {
326 if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
337 free_r10bio(r10_bio);
343 static inline void update_head_pos(int slot, struct r10bio *r10_bio)
345 struct r10conf *conf = r10_bio->mddev->private;
347 conf->mirrors[r10_bio->devs[slot].devnum].head_position =
348 r10_bio->devs[slot].addr + (r10_bio->sectors);
354 static int find_bio_disk(struct r10conf *conf, struct r10bio *r10_bio,
361 if (r10_bio->devs[slot].bio == bio)
363 if (r10_bio->devs[slot].repl_bio == bio) {
369 update_head_pos(slot, r10_bio);
375 return r10_bio->devs[slot].devnum;
381 struct r10bio *r10_bio = bio->bi_private;
384 struct r10conf *conf = r10_bio->mddev->private;
386 slot = r10_bio->read_slot;
387 rdev = r10_bio->devs[slot].rdev;
391 update_head_pos(slot, r10_bio);
403 set_bit(R10BIO_Uptodate, &r10_bio->state);
412 if (!_enough(conf, test_bit(R10BIO_Previous, &r10_bio->state),
417 raid_end_bio_io(r10_bio);
426 (unsigned long long)r10_bio->sector);
427 set_bit(R10BIO_ReadError, &r10_bio->state);
428 reschedule_retry(r10_bio);
432 static void close_write(struct r10bio *r10_bio)
434 struct mddev *mddev = r10_bio->mddev;
439 static void one_write_done(struct r10bio *r10_bio)
441 if (atomic_dec_and_test(&r10_bio->remaining)) {
442 if (test_bit(R10BIO_WriteError, &r10_bio->state))
443 reschedule_retry(r10_bio);
445 close_write(r10_bio);
446 if (test_bit(R10BIO_MadeGood, &r10_bio->state))
447 reschedule_retry(r10_bio);
449 raid_end_bio_io(r10_bio);
456 struct r10bio *r10_bio = bio->bi_private;
459 struct r10conf *conf = r10_bio->mddev->private;
466 dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
501 set_bit(R10BIO_WriteError, &r10_bio->state);
504 r10_bio->devs[slot].bio = NULL;
528 set_bit(R10BIO_Uptodate, &r10_bio->state);
531 if (rdev_has_badblock(rdev, r10_bio->devs[slot].addr,
532 r10_bio->sectors) &&
536 r10_bio->devs[slot].repl_bio = IO_MADE_GOOD;
538 r10_bio->devs[slot].bio = IO_MADE_GOOD;
540 set_bit(R10BIO_MadeGood, &r10_bio->state);
549 one_write_done(r10_bio);
724 struct r10bio *r10_bio,
727 const sector_t this_sector = r10_bio->sector;
729 int sectors = r10_bio->sectors;
739 raid10_find_phys(conf, r10_bio);
747 clear_bit(R10BIO_FailFast, &r10_bio->state);
759 if (r10_bio->devs[slot].bio == IO_BLOCKED)
761 disk = r10_bio->devs[slot].devnum;
764 r10_bio->devs[slot].addr + sectors >
771 r10_bio->devs[slot].addr + sectors > rdev->recovery_offset)
774 dev_sector = r10_bio->devs[slot].addr;
820 set_bit(R10BIO_FailFast, &r10_bio->state);
830 new_distance = r10_bio->devs[slot].addr;
832 new_distance = abs(r10_bio->devs[slot].addr -
853 r10_bio->read_slot = slot;
1074 static sector_t choose_data_offset(struct r10bio *r10_bio,
1078 test_bit(R10BIO_Previous, &r10_bio->state))
1149 struct r10bio *r10_bio, bool io_accounting)
1156 int slot = r10_bio->read_slot;
1160 if (slot >= 0 && r10_bio->devs[slot].rdev) {
1163 * safely dereference the rdev in the r10_bio,
1175 disk = r10_bio->devs[slot].devnum;
1182 err_rdev = r10_bio->devs[slot].rdev;
1186 if (!regular_request_wait(mddev, conf, bio, r10_bio->sectors)) {
1187 free_r10bio(r10_bio);
1191 rdev = read_balance(conf, r10_bio, &max_sectors);
1196 (unsigned long long)r10_bio->sector);
1198 raid_end_bio_io(r10_bio);
1205 (unsigned long long)r10_bio->sector);
1212 set_bit(R10BIO_Returned, &r10_bio->state);
1216 r10_bio->master_bio = bio;
1217 r10_bio->sectors = max_sectors;
1219 slot = r10_bio->read_slot;
1223 r10_bio->master_bio = bio;
1228 r10_bio->devs[slot].bio = read_bio;
1229 r10_bio->devs[slot].rdev = rdev;
1231 read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
1232 choose_data_offset(r10_bio, rdev);
1235 test_bit(R10BIO_FailFast, &r10_bio->state))
1237 read_bio->bi_private = r10_bio;
1238 mddev_trace_remap(mddev, read_bio, r10_bio->sector);
1243 raid_end_bio_io(r10_bio);
1246 static void raid10_write_one_disk(struct mddev *mddev, struct r10bio *r10_bio,
1253 int devnum = r10_bio->devs[n_copy].devnum;
1262 r10_bio->devs[n_copy].repl_bio = mbio;
1264 r10_bio->devs[n_copy].bio = mbio;
1266 mbio->bi_iter.bi_sector = (r10_bio->devs[n_copy].addr +
1267 choose_data_offset(r10_bio, rdev));
1273 mbio->bi_private = r10_bio;
1274 mddev_trace_remap(mddev, mbio, r10_bio->sector);
1278 atomic_inc(&r10_bio->remaining);
1288 static void wait_blocked_dev(struct mddev *mddev, struct r10bio *r10_bio)
1301 sector_t dev_sector = r10_bio->devs[i].addr;
1308 r10_bio->sectors &&
1310 r10_bio->sectors) < 0)
1345 struct r10bio *r10_bio)
1373 sectors = r10_bio->sectors;
1375 free_r10bio(r10_bio);
1410 * gets its own r10_bio with a set of bios attached.
1413 r10_bio->read_slot = -1; /* make sure repl_bio gets freed */
1414 raid10_find_phys(conf, r10_bio);
1416 wait_blocked_dev(mddev, r10_bio);
1418 max_sectors = r10_bio->sectors;
1421 int d = r10_bio->devs[i].devnum;
1431 r10_bio->devs[i].bio = NULL;
1432 r10_bio->devs[i].repl_bio = NULL;
1438 sector_t dev_sector = r10_bio->devs[i].addr;
1473 r10_bio->devs[i].bio = bio;
1477 r10_bio->devs[i].repl_bio = bio;
1482 if (max_sectors < r10_bio->sectors)
1483 r10_bio->sectors = max_sectors;
1485 if (r10_bio->sectors < bio_sectors(bio)) {
1487 bio = bio_submit_split_bioset(bio, r10_bio->sectors,
1491 set_bit(R10BIO_Returned, &r10_bio->state);
1495 r10_bio->master_bio = bio;
1499 r10_bio->master_bio = bio;
1500 atomic_set(&r10_bio->remaining, 1);
1503 if (r10_bio->devs[i].bio)
1504 raid10_write_one_disk(mddev, r10_bio, bio, false, i);
1505 if (r10_bio->devs[i].repl_bio)
1506 raid10_write_one_disk(mddev, r10_bio, bio, true, i);
1508 one_write_done(r10_bio);
1512 int d = r10_bio->devs[k].devnum;
1516 if (r10_bio->devs[k].bio) {
1518 r10_bio->devs[k].bio = NULL;
1520 if (r10_bio->devs[k].repl_bio) {
1522 r10_bio->devs[k].repl_bio = NULL;
1526 raid_end_bio_io(r10_bio);
1532 struct r10bio *r10_bio;
1534 r10_bio = mempool_alloc(&conf->r10bio_pool, GFP_NOIO);
1536 r10_bio->master_bio = bio;
1537 r10_bio->sectors = sectors;
1539 r10_bio->mddev = mddev;
1540 r10_bio->sector = bio->bi_iter.bi_sector;
1541 r10_bio->state = 0;
1542 r10_bio->read_slot = -1;
1543 memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) *
1547 raid10_read_request(mddev, bio, r10_bio, true);
1549 raid10_write_request(mddev, bio, r10_bio);
1576 struct r10bio *r10_bio = bio->bi_private;
1577 struct r10conf *conf = r10_bio->mddev->private;
1585 if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
1586 set_bit(R10BIO_Uptodate, &r10_bio->state);
1588 dev = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
1592 raid_end_discard_bio(r10_bio);
1608 struct r10bio *r10_bio, *first_r10bio;
1726 r10_bio = mempool_alloc(&conf->r10bio_pool, GFP_NOIO);
1727 r10_bio->mddev = mddev;
1728 r10_bio->state = 0;
1729 r10_bio->sectors = 0;
1730 memset(r10_bio->devs, 0, sizeof(r10_bio->devs[0]) * geo->raid_disks);
1731 wait_blocked_dev(mddev, r10_bio);
1742 r10_bio->master_bio = bio;
1743 set_bit(R10BIO_Discard, &r10_bio->state);
1745 first_r10bio = r10_bio;
1747 r10_bio->master_bio = (struct bio *)first_r10bio;
1759 r10_bio->devs[disk].bio = NULL;
1760 r10_bio->devs[disk].repl_bio = NULL;
1770 r10_bio->devs[disk].bio = bio;
1774 r10_bio->devs[disk].repl_bio = bio;
1779 atomic_set(&r10_bio->remaining, 1);
1816 if (r10_bio->devs[disk].bio) {
1821 mbio->bi_private = r10_bio;
1822 r10_bio->devs[disk].bio = mbio;
1823 r10_bio->devs[disk].devnum = disk;
1824 atomic_inc(&r10_bio->remaining);
1826 dev_start + choose_data_offset(r10_bio, rdev),
1830 if (r10_bio->devs[disk].repl_bio) {
1835 rbio->bi_private = r10_bio;
1836 r10_bio->devs[disk].repl_bio = rbio;
1837 r10_bio->devs[disk].devnum = disk;
1838 atomic_inc(&r10_bio->remaining);
1840 dev_start + choose_data_offset(r10_bio, rrdev),
1852 raid_end_discard_bio(r10_bio);
1857 raid_end_discard_bio(r10_bio);
2219 static void __end_sync_read(struct r10bio *r10_bio, struct bio *bio, int d)
2221 struct r10conf *conf = r10_bio->mddev->private;
2224 set_bit(R10BIO_Uptodate, &r10_bio->state);
2229 atomic_add(r10_bio->sectors,
2236 if (test_bit(R10BIO_IsRecover, &r10_bio->state) ||
2237 atomic_dec_and_test(&r10_bio->remaining)) {
2241 reschedule_retry(r10_bio);
2247 struct r10bio *r10_bio = get_resync_r10bio(bio);
2248 struct r10conf *conf = r10_bio->mddev->private;
2249 int d = find_bio_disk(conf, r10_bio, bio, NULL, NULL);
2251 __end_sync_read(r10_bio, bio, d);
2257 struct r10bio *r10_bio = bio->bi_private;
2259 __end_sync_read(r10_bio, bio, r10_bio->read_slot);
2262 static void end_sync_request(struct r10bio *r10_bio)
2264 struct mddev *mddev = r10_bio->mddev;
2266 while (atomic_dec_and_test(&r10_bio->remaining)) {
2267 if (r10_bio->master_bio == NULL) {
2269 sector_t s = r10_bio->sectors;
2270 if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
2271 test_bit(R10BIO_WriteError, &r10_bio->state))
2272 reschedule_retry(r10_bio);
2274 put_buf(r10_bio);
2278 struct r10bio *r10_bio2 = (struct r10bio *)r10_bio->master_bio;
2279 if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
2280 test_bit(R10BIO_WriteError, &r10_bio->state))
2281 reschedule_retry(r10_bio);
2283 put_buf(r10_bio);
2284 r10_bio = r10_bio2;
2291 struct r10bio *r10_bio = get_resync_r10bio(bio);
2292 struct mddev *mddev = r10_bio->mddev;
2299 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
2313 set_bit(R10BIO_WriteError, &r10_bio->state);
2315 } else if (rdev_has_badblock(rdev, r10_bio->devs[slot].addr,
2316 r10_bio->sectors)) {
2317 set_bit(R10BIO_MadeGood, &r10_bio->state);
2322 end_sync_request(r10_bio);
2341 static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2349 atomic_set(&r10_bio->remaining, 1);
2353 if (!r10_bio->devs[i].bio->bi_status)
2360 fbio = r10_bio->devs[i].bio;
2361 fbio->bi_iter.bi_size = r10_bio->sectors << 9;
2365 vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9);
2372 tbio = r10_bio->devs[i].bio;
2380 d = r10_bio->devs[i].devnum;
2382 if (!r10_bio->devs[i].bio->bi_status) {
2387 int sectors = r10_bio->sectors;
2400 atomic64_add(r10_bio->sectors, &mddev->resync_mismatches);
2419 rp->raid_bio = r10_bio;
2421 tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
2427 atomic_inc(&r10_bio->remaining);
2439 tbio = r10_bio->devs[i].repl_bio;
2442 if (r10_bio->devs[i].bio->bi_end_io != end_sync_write
2443 && r10_bio->devs[i].bio != fbio)
2445 atomic_inc(&r10_bio->remaining);
2450 if (atomic_dec_and_test(&r10_bio->remaining)) {
2451 md_done_sync(mddev, r10_bio->sectors);
2452 put_buf(r10_bio);
2461 * There is a separate r10_bio for each non-in_sync drive.
2466 static void fix_recovery_read_error(struct r10bio *r10_bio)
2475 struct mddev *mddev = r10_bio->mddev;
2477 struct bio *bio = r10_bio->devs[0].bio;
2479 int sectors = r10_bio->sectors;
2481 int dr = r10_bio->devs[0].devnum;
2482 int dw = r10_bio->devs[1].devnum;
2495 addr = r10_bio->devs[0].addr + sect;
2503 addr = r10_bio->devs[1].addr + sect;
2527 addr = r10_bio->devs[1].addr + sect;
2547 static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2551 struct bio *wbio = r10_bio->devs[1].bio;
2552 struct bio *wbio2 = r10_bio->devs[1].repl_bio;
2561 if (!test_bit(R10BIO_Uptodate, &r10_bio->state)) {
2562 fix_recovery_read_error(r10_bio);
2564 end_sync_request(r10_bio);
2566 end_sync_request(r10_bio);
2574 d = r10_bio->devs[1].devnum;
2613 static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10bio *r10_bio)
2615 int sect = 0; /* Offset from r10_bio->sector */
2616 int sectors = r10_bio->sectors, slot = r10_bio->read_slot;
2618 int d = r10_bio->devs[slot].devnum;
2631 r10_bio->devs[slot].bio = IO_BLOCKED;
2645 d = r10_bio->devs[sl].devnum;
2651 r10_bio->devs[sl].addr + sect,
2655 r10_bio->devs[sl].addr +
2674 int dn = r10_bio->devs[slot].devnum;
2679 r10_bio->devs[slot].addr
2682 r10_bio->devs[slot].bio
2694 d = r10_bio->devs[sl].devnum;
2703 r10_bio->devs[sl].addr +
2712 choose_data_offset(r10_bio,
2726 d = r10_bio->devs[sl].devnum;
2735 r10_bio->devs[sl].addr +
2744 choose_data_offset(r10_bio, rdev)),
2755 choose_data_offset(r10_bio, rdev)),
2768 static void narrow_write_error(struct r10bio *r10_bio, int i)
2770 struct bio *bio = r10_bio->master_bio;
2771 struct mddev *mddev = r10_bio->mddev;
2773 struct md_rdev *rdev = conf->mirrors[r10_bio->devs[i].devnum].rdev;
2788 int sect_to_write = r10_bio->sectors;
2795 sector = r10_bio->sector;
2796 sectors = ((r10_bio->sector + block_sectors)
2809 wsector = r10_bio->devs[i].addr + (sector - r10_bio->sector);
2811 choose_data_offset(r10_bio, rdev);
2831 static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
2833 int slot = r10_bio->read_slot;
2836 struct md_rdev *rdev = r10_bio->devs[slot].rdev;
2846 bio = r10_bio->devs[slot].bio;
2848 r10_bio->devs[slot].bio = NULL;
2851 r10_bio->devs[slot].bio = IO_BLOCKED;
2854 fix_read_error(conf, mddev, r10_bio);
2860 r10_bio->state = 0;
2861 raid10_read_request(mddev, r10_bio->master_bio, r10_bio, false);
2869 static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio)
2880 if (test_bit(R10BIO_IsSync, &r10_bio->state) ||
2881 test_bit(R10BIO_IsRecover, &r10_bio->state)) {
2883 int dev = r10_bio->devs[m].devnum;
2885 if (r10_bio->devs[m].bio == NULL ||
2886 r10_bio->devs[m].bio->bi_end_io == NULL)
2888 if (!r10_bio->devs[m].bio->bi_status)
2891 r10_bio->devs[m].addr,
2892 r10_bio->sectors, 0);
2895 r10_bio->devs[m].addr,
2896 r10_bio->sectors, 0);
2898 if (r10_bio->devs[m].repl_bio == NULL ||
2899 r10_bio->devs[m].repl_bio->bi_end_io == NULL)
2902 if (!r10_bio->devs[m].repl_bio->bi_status)
2905 r10_bio->devs[m].addr,
2906 r10_bio->sectors, 0);
2909 r10_bio->devs[m].addr,
2910 r10_bio->sectors, 0);
2912 put_buf(r10_bio);
2916 int dev = r10_bio->devs[m].devnum;
2917 struct bio *bio = r10_bio->devs[m].bio;
2922 r10_bio->devs[m].addr,
2923 r10_bio->sectors, 0);
2927 narrow_write_error(r10_bio, m);
2930 bio = r10_bio->devs[m].repl_bio;
2935 r10_bio->devs[m].addr,
2936 r10_bio->sectors, 0);
2942 list_add(&r10_bio->retry_list, &conf->bio_end_io_list);
2953 &r10_bio->state))
2954 close_write(r10_bio);
2955 raid_end_bio_io(r10_bio);
2963 struct r10bio *r10_bio;
2983 r10_bio = list_first_entry(&tmp, struct r10bio,
2985 list_del(&r10_bio->retry_list);
2988 &r10_bio->state))
2989 close_write(r10_bio);
2990 raid_end_bio_io(r10_bio);
3004 r10_bio = list_entry(head->prev, struct r10bio, retry_list);
3009 mddev = r10_bio->mddev;
3011 if (test_bit(R10BIO_MadeGood, &r10_bio->state) ||
3012 test_bit(R10BIO_WriteError, &r10_bio->state))
3013 handle_write_completed(conf, r10_bio);
3014 else if (test_bit(R10BIO_IsReshape, &r10_bio->state))
3015 reshape_request_write(mddev, r10_bio);
3016 else if (test_bit(R10BIO_IsSync, &r10_bio->state))
3017 sync_request_write(mddev, r10_bio);
3018 else if (test_bit(R10BIO_IsRecover, &r10_bio->state))
3019 recovery_request_write(mddev, r10_bio);
3020 else if (test_bit(R10BIO_ReadError, &r10_bio->state))
3021 handle_read_error(mddev, r10_bio);
3136 * a number of r10_bio structures, one for each out-of-sync device.
3141 * The r10_bio structures are linked using a borrowed master_bio pointer.
3142 * This link is counted in ->remaining. When the r10_bio that points to NULL
3152 struct r10bio *r10_bio;
3273 r10_bio = NULL;
3298 rb2 = r10_bio;
3326 r10_bio = raid10_alloc_init_r10buf(conf);
3327 r10_bio->state = 0;
3329 atomic_set(&r10_bio->remaining, 0);
3331 r10_bio->master_bio = (struct bio*)rb2;
3334 r10_bio->mddev = mddev;
3335 set_bit(R10BIO_IsRecover, &r10_bio->state);
3336 r10_bio->sector = sect;
3338 raid10_find_phys(conf, r10_bio);
3357 int d = r10_bio->devs[j].devnum;
3366 sector = r10_bio->devs[j].addr;
3381 bio = r10_bio->devs[0].bio;
3388 from_addr = r10_bio->devs[j].addr;
3396 if (r10_bio->devs[k].devnum == i)
3399 to_addr = r10_bio->devs[k].addr;
3400 r10_bio->devs[0].devnum = d;
3401 r10_bio->devs[0].addr = from_addr;
3402 r10_bio->devs[1].devnum = i;
3403 r10_bio->devs[1].addr = to_addr;
3406 bio = r10_bio->devs[1].bio;
3414 atomic_inc(&r10_bio->remaining);
3416 r10_bio->devs[1].bio->bi_end_io = NULL;
3419 bio = r10_bio->devs[1].repl_bio;
3435 atomic_inc(&r10_bio->remaining);
3447 if (r10_bio->devs[k].devnum == i)
3453 r10_bio->devs[k].addr,
3458 r10_bio->devs[k].addr,
3461 mdname(mddev), r10_bio->devs[k].addr, max_sync);
3463 put_buf(r10_bio);
3466 r10_bio = rb2;
3477 if (r10_bio->devs[0].bio->bi_opf & MD_FAILFAST) {
3484 int d = r10_bio->devs[j].devnum;
3491 r10_bio->devs[0].bio->bi_opf
3496 while (r10_bio) {
3497 struct r10bio *rb2 = r10_bio;
3498 r10_bio = (struct r10bio*) rb2->master_bio;
3531 r10_bio = raid10_alloc_init_r10buf(conf);
3532 r10_bio->state = 0;
3534 r10_bio->mddev = mddev;
3535 atomic_set(&r10_bio->remaining, 0);
3539 r10_bio->master_bio = NULL;
3540 r10_bio->sector = sector_nr;
3541 set_bit(R10BIO_IsSync, &r10_bio->state);
3542 raid10_find_phys(conf, r10_bio);
3543 r10_bio->sectors = (sector_nr | chunk_mask) - sector_nr + 1;
3546 int d = r10_bio->devs[i].devnum;
3551 if (r10_bio->devs[i].repl_bio)
3552 r10_bio->devs[i].repl_bio->bi_end_io = NULL;
3554 bio = r10_bio->devs[i].bio;
3560 sector = r10_bio->devs[i].addr;
3573 atomic_inc(&r10_bio->remaining);
3591 bio = r10_bio->devs[i].repl_bio;
3594 sector = r10_bio->devs[i].addr;
3608 int d = r10_bio->devs[i].devnum;
3609 if (r10_bio->devs[i].bio->bi_end_io)
3612 if (r10_bio->devs[i].repl_bio &&
3613 r10_bio->devs[i].repl_bio->bi_end_io)
3618 put_buf(r10_bio);
3647 r10_bio->sectors = nr_sectors;
3700 r10_bio = get_resync_r10bio(bio);
3701 r10_bio->sectors = nr_sectors;
4599 struct r10bio *r10_bio;
4704 r10_bio = raid10_alloc_init_r10buf(conf);
4705 r10_bio->state = 0;
4707 atomic_set(&r10_bio->remaining, 0);
4708 r10_bio->mddev = mddev;
4709 r10_bio->sector = sector_nr;
4710 set_bit(R10BIO_IsReshape, &r10_bio->state);
4711 r10_bio->sectors = last - sector_nr + 1;
4712 rdev = read_balance(conf, r10_bio, &max_sectors);
4713 BUG_ON(!test_bit(R10BIO_Previous, &r10_bio->state));
4720 mempool_free(r10_bio, &conf->r10buf_pool);
4727 read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
4729 read_bio->bi_private = r10_bio;
4731 r10_bio->master_bio = read_bio;
4732 r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
4761 __raid10_find_phys(&conf->geo, r10_bio);
4768 int d = r10_bio->devs[s/2].devnum;
4772 b = r10_bio->devs[s/2].repl_bio;
4775 b = r10_bio->devs[s/2].bio;
4781 b->bi_iter.bi_sector = r10_bio->devs[s/2].addr +
4792 pages = get_resync_pages(r10_bio->devs[0].bio)->pages;
4808 r10_bio->sectors = nr_sectors;
4811 atomic_inc(&r10_bio->remaining);
4831 static void end_reshape_request(struct r10bio *r10_bio);
4833 struct r10bio *r10_bio);
4834 static void reshape_request_write(struct mddev *mddev, struct r10bio *r10_bio)
4844 if (!test_bit(R10BIO_Uptodate, &r10_bio->state))
4845 if (handle_reshape_read_error(mddev, r10_bio) < 0) {
4847 md_done_sync(mddev, r10_bio->sectors);
4855 atomic_set(&r10_bio->remaining, 1);
4858 int d = r10_bio->devs[s/2].devnum;
4862 b = r10_bio->devs[s/2].repl_bio;
4865 b = r10_bio->devs[s/2].bio;
4871 atomic_inc(&r10_bio->remaining);
4875 end_reshape_request(r10_bio);
4909 struct r10bio *r10_bio)
4912 int sectors = r10_bio->sectors;
4926 pages = get_resync_pages(r10_bio->devs[0].bio)->pages;
4928 r10b->sector = r10_bio->sector;
4981 struct r10bio *r10_bio = get_resync_r10bio(bio);
4982 struct mddev *mddev = r10_bio->mddev;
4989 d = find_bio_disk(conf, r10_bio, bio, &slot, &repl);
4999 end_reshape_request(r10_bio);
5002 static void end_reshape_request(struct r10bio *r10_bio)
5004 if (!atomic_dec_and_test(&r10_bio->remaining))
5006 md_done_sync(r10_bio->mddev, r10_bio->sectors);
5007 bio_put(r10_bio->master_bio);
5008 put_buf(r10_bio);