Home
last modified time | relevance | path

Searched refs:bio_end_sector (Results 1 – 22 of 22) sorted by relevance

/linux/drivers/md/
H A Ddm-ebs-target.c160 if (blocks && __block_mod(bio_end_sector(bio), ec->u_bs)) in __ebs_discard_bio()
199 block2 = __sector_to_block(ec, bio_end_sector(bio)); in __ebs_process_bios()
202 if (__block_mod(bio_end_sector(bio), ec->u_bs) && block2 != block1) in __ebs_process_bios()
376 __block_mod(bio_end_sector(bio), ec->u_bs) || in ebs_map()
H A Dmd-linear.c257 if (unlikely(bio_end_sector(bio) > end_sector)) { in linear_make_request()
H A Draid0.c465 if (bio_end_sector(bio) > zone->zone_end) { in raid0_handle_discard()
480 end = bio_end_sector(bio); in raid0_handle_discard()
H A Ddm-stripe.c257 stripe_map_range_sector(sc, bio_end_sector(bio), in stripe_map_range()
H A Draid1.c320 (unsigned long long) bio_end_sector(bio) - 1); in raid_end_bio_io()
541 (unsigned long long) bio_end_sector(mbio) - 1); in raid1_end_write_request()
1469 bio->bi_iter.bi_sector, bio_end_sector(bio))) { in raid1_write_request()
1481 bio_end_sector(bio))) in raid1_write_request()
H A Ddm-writecache.c1307 bio_end_sector(bio)); in writecache_flush_thread()
1532 writecache_discard(wc, bio->bi_iter.bi_sector, bio_end_sector(bio)); in writecache_map_discard()
1804 if (unlikely(bio_end_sector(&wb->bio) >= wc->data_device_sectors)) in wc_add_block()
H A Draid10.c1359 bio_end_sector(bio)))) { in raid10_write_request()
1370 bio->bi_iter.bi_sector, bio_end_sector(bio))) in raid10_write_request()
1657 bio_end = bio_end_sector(bio); in raid10_handle_discard()
1706 bio_end = bio_end_sector(bio); in raid10_handle_discard()
H A Draid5.c3451 if (bio_end_sector(*bip) > bi->bi_iter.bi_sector) in stripe_bio_overlaps()
3456 if (*bip && (*bip)->bi_iter.bi_sector < bio_end_sector(bi)) in stripe_bio_overlaps()
3527 if (bio_end_sector(bi) >= sector) in __add_stripe_bio()
3528 sector = bio_end_sector(bi); in __add_stripe_bio()
5682 last_sector = bio_end_sector(bi); in make_discard_request()
6110 ctx.last_sector = bio_end_sector(bi); in raid5_make_request()
6594 last_sector = bio_end_sector(raid_bio); in retry_aligned_read()
H A Draid5-ppl.c498 bio->bi_iter.bi_sector = bio_end_sector(prev); in ppl_submit_iounit()
H A Ddm-verity-target.c728 if (bio_end_sector(bio) >> in verity_map()
H A Ddm-cache-target.c1018 sector_t se = bio_end_sector(bio); in calc_discard_block_range()
H A Ddm.c530 sector = bio_end_sector(bio) - io->sector_offset; in dm_io_acct()
H A Ddm-raid.c3352 if (unlikely(bio_has_data(bio) && bio_end_sector(bio) > mddev->array_sectors)) in raid_map()
H A Ddm-thin.c1983 if (bio_end_sector(bio) <= tc->origin_size) in process_cell()
/linux/fs/bcachefs/
H A Dfs-io-buffered.c108 pgoff_t folio_offset = bio_end_sector(bio) >> PAGE_SECTORS_SHIFT; in readpage_bio_extend()
146 BUG_ON(folio_sector(folio) != bio_end_sector(bio)); in readpage_bio_extend()
632 bio_end_sector(&w->io->op.wbio.bio) != sector)) in __bch2_writepage()
/linux/fs/iomap/
H A Dioend.c128 iomap_sector(&wpc->iomap, pos) != bio_end_sector(&ioend->io_bio)) in iomap_can_add_to_ioend()
H A Dbuffered-io.c400 bio_end_sector(ctx->bio) != sector || in iomap_readpage_iter()
/linux/include/linux/
H A Dbio.h41 #define bio_end_sector(bio) bvec_iter_end_sector((bio)->bi_iter) macro
/linux/block/
H A Dmq-deadline.c604 sector_t sector = bio_end_sector(bio); in dd_request_merge()
H A Dblk-iocost.c2631 iocg->cursor = bio_end_sector(bio); in ioc_rqos_throttle()
2747 sector_t bio_end = bio_end_sector(bio); in ioc_rqos_merge()
H A Dbfq-iosched.c1804 end = bio_end_sector(bio) - 1; in bfq_actuator_index()
2369 return elv_rb_find(&bfqq->sort_list, bio_end_sector(bio)); in bfq_find_rq_fmerge()
/linux/fs/btrfs/
H A Dextent_io.c655 bio_end_sector(bio) == sector; in btrfs_bio_is_contig()