/linux/block/ |
H A D | blk-merge.c | 155 if (bio_sectors(bio) <= max_discard_sectors) in bio_split_discard() 419 if (bio_sectors(bio) <= max_sectors) in bio_split_write_zeroes() 649 if (blk_rq_sectors(req) + bio_sectors(bio) > in ll_back_merge_fn() 668 if (blk_rq_sectors(req) + bio_sectors(bio) > in ll_front_merge_fn() 684 if (blk_rq_sectors(req) + bio_sectors(next->bio) > in req_attempt_discard_merge() 970 else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector) in blk_try_merge() 1056 if (blk_rq_sectors(req) + bio_sectors(bio) > in bio_attempt_discard_merge()
|
H A D | blk-zoned.c | 857 zwplug->wp_offset += bio_sectors(bio); in blk_zone_write_plug_bio_merged() 915 zwplug->wp_offset += bio_sectors(bio); in blk_zone_write_plug_init_request() 917 req_back_sector += bio_sectors(bio); in blk_zone_write_plug_init_request() 978 zwplug->wp_offset += bio_sectors(bio); in blk_zone_wplug_prepare_bio() 1134 if (op_is_flush(bio->bi_opf) && !bio_sectors(bio)) in blk_zone_plug_bio()
|
H A D | bounce.c | 229 if (sectors < bio_sectors(bio_orig)) { in __blk_queue_bounce()
|
H A D | bio.c | 1573 if (WARN_ON_ONCE(sectors >= bio_sectors(bio))) in bio_split() 1618 offset + size > bio_sectors(bio))) in bio_trim()
|
/linux/fs/bcachefs/ |
H A D | io_write.c | 467 bio_sectors(&n->bio)); in bch2_submit_wbio_replicas() 852 BUG_ON(bio_sectors(bio) != op->crc.compressed_size); in bch2_write_prep_encoded_data() 929 BUG_ON(!bio_sectors(src)); in bch2_write_extent() 1038 bio_sectors(src) - (src_len >> 9), in bch2_write_extent() 1053 bio_sectors(src) - (src_len >> 9), in bch2_write_extent() 1321 bch2_cut_back(POS(op->pos.inode, op->pos.offset + bio_sectors(bio)), op->insert_keys.top); in bch2_nocow_write() 1339 if (k.k->p.offset < op->pos.offset + bio_sectors(bio)) { in bch2_nocow_write() 1348 op->pos.offset += bio_sectors(bio); in bch2_nocow_write() 1349 op->written += bio_sectors(bio); in bch2_nocow_write() 1568 sectors = bio_sectors(bio); in bch2_write_data_inline() [all …]
|
H A D | fs-io-direct.c | 291 dio->op.pos.offset, bio_sectors(bio), in bch2_dio_write_check_allocated() 518 bio_sectors(bio), true); in bch2_dio_write_loop() 522 ret = bch2_disk_reservation_get(c, &dio->op.res, bio_sectors(bio), in bch2_dio_write_loop()
|
H A D | checksum.c | 424 { NULL, bio_sectors(bio) - len_a - len_b, new_csum_type, { 0 } }, in bch2_rechecksum_bio() 430 BUG_ON(len_a + len_b > bio_sectors(bio)); in bch2_rechecksum_bio() 431 BUG_ON(crc_old.uncompressed_size != bio_sectors(bio)); in bch2_rechecksum_bio()
|
H A D | io_read.c | 1057 EBUG_ON(bio_sectors(&rbio->bio) != pick.crc.compressed_size); in __bch2_read_extent() 1094 this_cpu_add(c->counters[BCH_COUNTER_io_read], bio_sectors(&rbio->bio)); in __bch2_read_extent() 1095 bch2_increment_clock(c, bio_sectors(&rbio->bio), READ); in __bch2_read_extent() 1134 bio_sectors(&rbio->bio)); in __bch2_read_extent()
|
H A D | data_update.c | 470 while (bio_sectors(bio)) { in bch2_update_unwritten_extent() 471 unsigned sectors = bio_sectors(bio); in bch2_update_unwritten_extent()
|
H A D | fs-io-buffered.c | 100 while (bio_sectors(bio) < sectors_this_extent && in readpage_bio_extend() 210 bytes = min(sectors, bio_sectors(&rbio->bio)) << 9; in bchfs_read()
|
H A D | super-io.c | 949 this_cpu_add(ca->io_done->sectors[READ][BCH_DATA_sb], bio_sectors(bio)); in read_back_super() 975 bio_sectors(bio)); in write_one_super()
|
/linux/fs/btrfs/ |
H A D | raid56.h | 119 struct sector_ptr *bio_sectors; member
|
/linux/drivers/md/bcache/ |
H A D | writeback.h | 117 bio_sectors(bio))) in should_writeback()
|
/linux/include/linux/ |
H A D | bio.h | 39 #define bio_sectors(bio) bvec_iter_sectors((bio)->bi_iter) macro 340 if (sectors >= bio_sectors(bio)) in bio_next_split()
|
H A D | blkdev.h | 920 return bio_sectors(bio) && in bio_straddles_zones()
|
/linux/drivers/md/ |
H A D | dm-integrity.c | 1612 if (likely(!bio->bi_status) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) { in dec_in_flight() 1727 alignment = dio->range.logical_sector | bio_sectors(bio) | (PAGE_SIZE >> SECTOR_SHIFT); in integrity_recheck() 1914 if (unlikely(logical_sector + bio_sectors(bio) > ic->provided_data_sectors)) { in dm_integrity_check_limits() 1916 logical_sector, bio_sectors(bio), in dm_integrity_check_limits() 1920 if (unlikely((logical_sector | bio_sectors(bio)) & (unsigned int)(ic->sectors_per_block - 1))) { in dm_integrity_check_limits() 1923 logical_sector, bio_sectors(bio)); in dm_integrity_check_limits() 1966 sector_t end_boundary = (sec + bio_sectors(bio) - 1) >> log2_max_io_len; in dm_integrity_map() 1996 unsigned int wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block; in dm_integrity_map() 2208 dio->range.n_sectors = bio_sectors(bio); in dm_integrity_map_continue() 2430 dio->payload_len = ic->tuple_size * (bio_sectors(bio) >> ic->sb->log2_sectors_per_block); in dm_integrity_map_inline() [all …]
|
H A D | dm-crypt.c | 1182 if (!bio_sectors(bio) || !io->cc->tuple_size) in dm_crypt_integrity_io_alloc() 1189 tag_len = io->cc->tuple_size * (bio_sectors(bio) >> io->cc->sector_shift); in dm_crypt_integrity_io_alloc() 3508 if (bio_sectors(bio)) in crypt_map() 3518 if (unlikely(bio_sectors(bio) > max_sectors)) in crypt_map() 3535 unsigned int tag_len = cc->tuple_size * (bio_sectors(bio) >> cc->sector_shift); in crypt_map() 3543 if (bio_sectors(bio) > cc->tag_pool_max_sectors) in crypt_map()
|
H A D | raid10.c | 1202 if (max_sectors < bio_sectors(bio)) { in raid10_read_request() 1489 if (r10_bio->sectors < bio_sectors(bio)) { in raid10_write_request() 1666 if (bio_sectors(bio) < stripe_size*2) in raid10_handle_discard() 1689 split_size = bio_sectors(bio) - remainder; in raid10_handle_discard() 1874 int sectors = bio_sectors(bio); in raid10_make_request() 2436 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio)); in sync_request_write() 2459 bio_sectors(tbio)); in sync_request_write() 2593 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio)); in recovery_request_write() 2599 bio_sectors(wbio2)); in recovery_request_write()
|
H A D | raid1.c | 1293 r1_bio->sectors = bio_sectors(bio); in init_r1bio() 1379 if (max_sectors < bio_sectors(bio)) { in raid1_read_request() 1439 bio_sectors(bio)) < 0) in wait_blocked_rdev() 1588 if (max_sectors < bio_sectors(bio)) { in raid1_write_request() 1710 bio->bi_iter.bi_sector, bio_sectors(bio)); in raid1_make_request() 2382 md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio)); in sync_request_write()
|
H A D | dm-ebs-target.c | 50 sector_t end_sector = __block_mod(bio->bi_iter.bi_sector, ec->u_bs) + bio_sectors(bio); in __nr_blocks()
|
H A D | raid0.c | 613 if (sectors < bio_sectors(bio)) { in raid0_make_request()
|
H A D | dm-zoned-target.c | 631 unsigned int nr_sectors = bio_sectors(bio); in dmz_map()
|
H A D | dm-raid1.c | 477 io->count = bio_sectors(bio); in map_region()
|
/linux/drivers/nvme/target/ |
H A D | io-cmd-bdev.c | 217 resid = bio_integrity_bytes(bi, bio_sectors(bio)); in nvmet_bdev_alloc_bip()
|
/linux/drivers/target/ |
H A D | target_core_iblock.c | 705 resid = bio_integrity_bytes(bi, bio_sectors(bio)); in iblock_alloc_bip()
|