Home
last modified time | relevance | path

Searched refs:bio_sectors (Results 1 – 23 of 23) sorted by relevance

/linux/drivers/md/bcache/
H A Drequest.c114 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector); in bch_data_invalidate()
116 while (bio_sectors(bio)) { in bch_data_invalidate()
117 unsigned int sectors = min(bio_sectors(bio), in bch_data_invalidate()
195 if (atomic_sub_return(bio_sectors(bio), &op->c->sectors_to_gc) < 0) in CLOSURE_CALLBACK()
222 if (!bch_alloc_sectors(op->c, k, bio_sectors(bio), in CLOSURE_CALLBACK()
412 bio_sectors(bio) & (c->cache->sb.block_size - 1)) { in check_should_bypass()
468 bch_rescale_priorities(c, bio_sectors(bio)); in check_should_bypass()
471 bch_mark_sectors_bypassed(c, dc, bio_sectors(bio)); in check_should_bypass()
539 unsigned int bio_sectors = bio_sectors(bio); in cache_lookup_fn() local
550 BUG_ON(bio_sectors <= sectors); in cache_lookup_fn()
[all …]
H A Dwriteback.h117 bio_sectors(bio))) in should_writeback()
/linux/include/trace/events/
H A Dblock.h324 __entry->nr_sector = bio_sectors(bio);
352 __entry->nr_sector = bio_sectors(bio);
544 __entry->nr_sector = bio_sectors(bio);
627 __entry->nr_sectors = bio_sectors(bio);
/linux/block/
H A Dblk-zoned.c1243 bio->bi_iter.bi_sector, bio_sectors(bio)); in disk_zone_wplug_add_bio()
1278 zwplug->wp_offset += bio_sectors(bio); in blk_zone_write_plug_bio_merged()
1337 zwplug->wp_offset += bio_sectors(bio); in blk_zone_write_plug_init_request()
1340 req_back_sector += bio_sectors(bio); in blk_zone_write_plug_init_request()
1401 zwplug->wp_offset += bio_sectors(bio); in blk_zone_wplug_prepare_bio()
1753 bio->bi_iter.bi_sector, bio_sectors(bio)); in blk_zone_wplug_bio_work()
H A Dbio.c1700 if (WARN_ON_ONCE(sectors >= bio_sectors(bio))) in bio_split()
1745 offset + size > bio_sectors(bio))) in bio_trim()
/linux/drivers/md/
H A Ddm.c503 return bio_sectors(bio); in dm_io_sectors()
1319 unsigned int bio_sectors = bio_sectors(bio); in dm_accept_partial_bio() local
1322 BUG_ON(bio_sectors > *tio->len_ptr); in dm_accept_partial_bio()
1323 BUG_ON(n_sectors > bio_sectors); in dm_accept_partial_bio()
1336 *tio->len_ptr -= bio_sectors - n_sectors; in dm_accept_partial_bio()
1345 io->sector_offset = bio_sectors(io->orig_bio); in dm_accept_partial_bio()
1463 io->sector_offset = bio_sectors(ci->bio); in setup_split_accounting()
1775 ci->sector_count = bio_sectors(bio); in init_clone_info()
H A Ddm-log-writes.c671 if (!bio_sectors(bio) && !flush_bio) in log_writes_map()
705 block->nr_sectors = bio_to_dev_sectors(lc, bio_sectors(bio)); in log_writes_map()
717 if (flush_bio && !bio_sectors(bio)) { in log_writes_map()
H A Ddm-zone.c157 return !op_is_flush(bio->bi_opf) && bio_sectors(bio); in dm_is_zone_write()
H A Ddm-integrity.c1621 if (likely(!bio->bi_status) && unlikely(bio_sectors(bio) != dio->range.n_sectors)) { in dec_in_flight()
1835 alignment = dio->range.logical_sector | bio_sectors(bio) | (PAGE_SIZE >> SECTOR_SHIFT); in integrity_recheck()
2022 if (unlikely(logical_sector + bio_sectors(bio) > ic->provided_data_sectors)) { in dm_integrity_check_limits()
2024 logical_sector, bio_sectors(bio), in dm_integrity_check_limits()
2028 if (unlikely((logical_sector | bio_sectors(bio)) & (unsigned int)(ic->sectors_per_block - 1))) { in dm_integrity_check_limits()
2031 logical_sector, bio_sectors(bio)); in dm_integrity_check_limits()
2075 sector_t end_boundary = (sec + bio_sectors(bio) - 1) >> log2_max_io_len; in dm_integrity_map()
2105 unsigned int wanted_tag_size = bio_sectors(bio) >> ic->sb->log2_sectors_per_block; in dm_integrity_map()
2306 dio->range.n_sectors = bio_sectors(bio); in dm_integrity_map_continue()
2528 dio->payload_len = ic->tuple_size * (bio_sectors(bio) >> ic->sb->log2_sectors_per_block); in dm_integrity_map_inline()
[all …]
H A Ddm-ebs-target.c50 sector_t end_sector = __block_mod(bio->bi_iter.bi_sector, ec->u_bs) + bio_sectors(bio); in __nr_blocks()
H A Ddm-crypt.c1141 if (!bio_sectors(bio) || !io->cc->tuple_size) in dm_crypt_integrity_io_alloc()
1148 tag_len = io->cc->tuple_size * (bio_sectors(bio) >> io->cc->sector_shift); in dm_crypt_integrity_io_alloc()
3468 if (bio_sectors(bio)) in crypt_map()
3485 if (unlikely(bio_sectors(bio) > max_sectors)) { in crypt_map()
3505 unsigned int tag_len = cc->tuple_size * (bio_sectors(bio) >> cc->sector_shift); in crypt_map()
3513 if (bio_sectors(bio) > cc->tag_pool_max_sectors) in crypt_map()
H A Ddm-zoned-target.c631 unsigned int nr_sectors = bio_sectors(bio); in dmz_map()
H A Ddm-raid1.c476 io->count = bio_sectors(bio); in map_region()
H A Draid5.c5337 unsigned int bio_sectors = bio_sectors(bio); in in_chunk_boundary() local
5341 ((sector & (chunk_sectors - 1)) + bio_sectors); in in_chunk_boundary()
5428 end_sector = sector + bio_sectors(raid_bio); in raid5_read_one_chunk()
5447 if (rdev_has_badblock(rdev, sector, bio_sectors(raid_bio))) { in raid5_read_one_chunk()
5494 if (sectors < bio_sectors(raid_bio)) { in chunk_aligned_read()
6066 if (sectors_per_chunk - chunk_offset >= bio_sectors(bi)) in raid5_bio_lowest_chunk_sector()
H A Ddm-writecache.c1556 if (unlikely((((unsigned int)bio->bi_iter.bi_sector | bio_sectors(bio)) & in writecache_map()
1882 } else if (unlikely(!bio_sectors(bio))) { in __writecache_writeback_pmem()
H A Ddm-verity-target.c795 if (((unsigned int)bio->bi_iter.bi_sector | bio_sectors(bio)) & in verity_map()
H A Ddm-snap.c2678 if (bio_sectors(bio) > available_sectors) in origin_map()
H A Ddm-cache-target.c804 pb->len = bio_sectors(bio); in accounted_begin()
H A Dmd.c450 if (bio_sectors(bio) != 0) in md_submit_bio()
612 if (bio_sectors(bio) == 0) { in md_flush_request()
9244 md_io_clone->sectors = bio_sectors(*bio); in md_clone_bio()
/linux/drivers/nvme/target/
H A Dio-cmd-bdev.c214 resid = bio_integrity_bytes(bi, bio_sectors(bio)); in nvmet_bdev_alloc_bip()
/linux/fs/ext4/
H A Dpage-io.c357 (unsigned) bio_sectors(bio), in ext4_end_bio()
/linux/drivers/scsi/
H A Dsr.c325 block_sectors = bio_sectors(rq->bio); in sr_done()
/linux/drivers/target/
H A Dtarget_core_iblock.c718 resid = bio_integrity_bytes(bi, bio_sectors(bio)); in iblock_alloc_bip()