Home
last modified time | relevance | path

Searched refs:bv_len (Results 1 – 25 of 62) sorted by relevance

123

/linux/drivers/block/
H A Dn64cart.c69 (bv->bv_len & (MIN_ALIGNMENT - 1))); in n64cart_do_bvec()
79 n64cart_write_reg(PI_WRITE_REG, bv->bv_len - 1); in n64cart_do_bvec()
83 dma_unmap_page(dev, dma_addr, bv->bv_len, DMA_FROM_DEVICE); in n64cart_do_bvec()
99 pos += bvec.bv_len; in n64cart_submit_bio()
H A Dbrd.c147 bv.bv_len = min_t(u32, bv.bv_len, PAGE_SIZE - offset); in brd_rw_bvec()
158 memcpy_to_page(page, offset, kaddr, bv.bv_len); in brd_rw_bvec()
161 memcpy_from_page(kaddr, page, offset, bv.bv_len); in brd_rw_bvec()
163 memset(kaddr, 0, bv.bv_len); in brd_rw_bvec()
167 bio_advance_iter_single(bio, &bio->bi_iter, bv.bv_len); in brd_rw_bvec()
/linux/drivers/md/
H A Ddm-ebs-target.c71 unsigned int bv_len = bv->bv_len; in __ebs_rw_bvec() local
76 if (unlikely(!bv->bv_page || !bv_len)) in __ebs_rw_bvec()
82 while (bv_len) { in __ebs_rw_bvec()
83 cur_len = min(dm_bufio_get_block_size(ec->bufio) - buf_off, bv_len); in __ebs_rw_bvec()
86 if (op == REQ_OP_READ || buf_off || bv_len < dm_bufio_get_block_size(ec->bufio)) in __ebs_rw_bvec()
113 bv_len -= cur_len; in __ebs_rw_bvec()
H A Ddm-io-rewind.c27 while (idx >= 0 && bytes && bytes > bv[idx].bv_len) { in dm_bvec_iter_rewind()
28 bytes -= bv[idx].bv_len; in dm_bvec_iter_rewind()
41 iter->bi_bvec_done = bv[idx].bv_len - bytes; in dm_bvec_iter_rewind()
H A Ddm-log-writes.c367 block->vecs[i].bv_len, 0); in log_one_block()
368 if (ret != block->vecs[i].bv_len) { in log_one_block()
380 block->vecs[i].bv_len, 0); in log_one_block()
381 if (ret != block->vecs[i].bv_len) { in log_one_block()
387 sector += block->vecs[i].bv_len >> SECTOR_SHIFT; in log_one_block()
751 block->vecs[i].bv_len = bv.bv_len; in log_writes_map()
/linux/block/
H A Dblk-crypto-fallback.c281 if (!IS_ALIGNED(src_bv.bv_len | src_bv.bv_offset, in __blk_crypto_fallback_encrypt_bio()
287 __bio_add_page(enc_bio, enc_page, src_bv.bv_len, in __blk_crypto_fallback_encrypt_bio()
303 for (i = 0; i < src_bv.bv_len; i += data_unit_size) { in __blk_crypto_fallback_encrypt_bio()
315 src_bv.bv_len); in __blk_crypto_fallback_encrypt_bio()
398 if (!IS_ALIGNED(bv.bv_len | bv.bv_offset, data_unit_size)) in __blk_crypto_fallback_decrypt_bio()
404 for (i = 0; i < bv.bv_len; i += data_unit_size) { in __blk_crypto_fallback_decrypt_bio()
H A Dblk-map.c77 bvec->bv_len, in bio_copy_from_iter()
83 if (ret < bvec->bv_len) in bio_copy_from_iter()
108 bvec->bv_len, in bio_copy_to_iter()
114 if (ret < bvec->bv_len) in bio_copy_to_iter()
299 len += bio->bi_io_vec[i].bv_len; in bio_invalidate_vmalloc_pages()
348 p += bvec->bv_len; in bio_copy_kern_endio_read()
H A Dbio.c316 bio->bi_iter.bi_size += bio->bi_io_vec[i].bv_len; in bio_reuse()
672 if (done + bv.bv_len > new_size) { in bio_truncate()
680 bv.bv_len - offset); in bio_truncate()
683 done += bv.bv_len; in bio_truncate()
946 size_t bv_end = bv->bv_offset + bv->bv_len; in bvec_try_merge_page()
962 bv->bv_len += len; in bvec_try_merge_page()
983 if (len > queue_max_segment_size(q) - bv->bv_len) in bvec_try_merge_hw_page()
1206 if (nbytes < bv->bv_len) { in bio_iov_iter_align_down()
1207 bv->bv_len -= nbytes; in bio_iov_iter_align_down()
1215 nbytes -= bv->bv_len; in bio_iov_iter_align_down()
[all …]
H A Dblk-integrity.c41 if (seg_size + iv.bv_len > queue_max_segment_size(q)) in blk_rq_count_integrity_sg()
44 seg_size += iv.bv_len; in blk_rq_count_integrity_sg()
48 seg_size = iv.bv_len; in blk_rq_count_integrity_sg()
H A Dblk.h128 if (addr1 + vec1->bv_len != addr2) in biovec_phys_mergeable()
134 if ((addr1 | mask) != ((addr2 + vec2->bv_len - 1) | mask)) in biovec_phys_mergeable()
162 ((bprv->bv_offset + bprv->bv_len) & lim->virt_boundary_mask); in __bvec_gap_to_prev()
403 if (bio->bi_iter.bi_size > bv->bv_len - bio->bi_iter.bi_bvec_done) in bio_may_need_split()
405 return bv->bv_len + bv->bv_offset > lim->max_fast_segment_size; in bio_may_need_split()
/linux/drivers/md/bcache/
H A Dutil.c244 start: bv->bv_len = min_t(size_t, PAGE_SIZE - bv->bv_offset, in bch_bio_map()
251 base += bv->bv_len; in bch_bio_map()
254 size -= bv->bv_len; in bch_bio_map()
H A Ddebug.c136 cache_set_err_on(memcmp(p1, p2, bv.bv_len), in bch_data_verify()
144 bio_advance_iter(check, &citer, bv.bv_len); in bch_data_verify()
/linux/drivers/s390/block/
H A Ddasd_fba.c457 if (bv.bv_len & (blksize - 1)) in dasd_fba_build_cp_regular()
460 count += bv.bv_len >> (block->s2b_shift + 9); in dasd_fba_build_cp_regular()
461 if (idal_is_needed (page_address(bv.bv_page), bv.bv_len)) in dasd_fba_build_cp_regular()
462 cidaw += bv.bv_len / blksize; in dasd_fba_build_cp_regular()
504 memcpy(copy + bv.bv_offset, dst, bv.bv_len); in dasd_fba_build_cp_regular()
508 for (off = 0; off < bv.bv_len; off += blksize) { in dasd_fba_build_cp_regular()
582 for (off = 0; off < bv.bv_len; off += blksize) { in dasd_fba_free_cp()
593 memcpy(dst, cda, bv.bv_len); in dasd_fba_free_cp()
H A Ddasd_diag.c537 if (bv.bv_len & (blksize - 1)) in dasd_diag_build_cp()
540 count += bv.bv_len >> (block->s2b_shift + 9); in dasd_diag_build_cp()
557 for (off = 0; off < bv.bv_len; off += blksize) { in dasd_diag_build_cp()
H A Ddcssblk.c918 !IS_ALIGNED(bvec.bv_len, PAGE_SIZE))) in dcssblk_submit_bio()
922 memcpy(page_addr, __va(source_addr), bvec.bv_len); in dcssblk_submit_bio()
924 memcpy(__va(source_addr), page_addr, bvec.bv_len); in dcssblk_submit_bio()
925 bytes_done += bvec.bv_len; in dcssblk_submit_bio()
/linux/net/ceph/
H A Dmessenger_v2.c155 it->bvec->bv_len - it->iov_offset), in do_try_sendpage()
172 iov_iter_bvec(&msg.msg_iter, ITER_SOURCE, &bv, 1, bv.bv_len); in do_try_sendpage()
236 iov_iter_bvec(&con->v2.in_iter, ITER_DEST, &con->v2.in_bvec, 1, bv->bv_len); in set_in_bvec()
281 con->v2.out_bvec.bv_len); in set_out_bvec()
293 con->v2.out_bvec.bv_len); in set_out_bvec_zero()
880 ceph_msg_data_advance(cursor, bv.bv_len); in calc_sg_cnt_cursor()
928 sg_set_page(*sg, bv.bv_page, bv.bv_len, bv.bv_offset); in init_sgs_cursor()
931 ceph_msg_data_advance(cursor, bv.bv_len); in init_sgs_cursor()
1090 len = min_t(int, len, bv.bv_len); in process_v2_sparse_read()
1865 con->v2.in_bvec.bv_len); in prepare_read_data_cont()
[all …]
/linux/fs/squashfs/
H A Dblock.c50 int bytes_to_copy = min_t(int, bvec->bv_len - offset, in copy_bio_to_actor()
69 if (offset >= bvec->bv_len) { in copy_bio_to_actor()
330 if (offset < bvec->bv_len - 1) { in squashfs_read_data()
H A Dlzo_wrapper.c77 int avail = min(bytes, ((int)bvec->bv_len) - offset); in lzo_uncompress()
H A Dzlib_wrapper.c83 avail = min(length, ((int)bvec->bv_len) - offset); in zlib_uncompress()
H A Dlz4_wrapper.c102 int avail = min(bytes, ((int)bvec->bv_len) - offset); in lz4_uncompress()
H A Dzstd_wrapper.c100 avail = min(length, ((int)bvec->bv_len) - offset); in zstd_uncompress()
/linux/drivers/xen/
H A Dbiomerge.c15 return bfn1 + PFN_DOWN(vec1->bv_offset + vec1->bv_len) == bfn2; in xen_biovec_phys_mergeable()
/linux/drivers/infiniband/core/
H A Drw.c238 sg_set_page(sg, bv.bv_page, bv.bv_len, bv.bv_offset); in rdma_rw_init_mr_wrs_bvec()
239 bvec_iter_advance(bvecs, iter, bv.bv_len); in rdma_rw_init_mr_wrs_bvec()
387 ctx->single.sge.length = bv.bv_len; in rdma_rw_init_single_wr_bvec()
453 sge->length = bv.bv_len; in rdma_rw_init_map_wrs_bvec()
456 total_len += bv.bv_len; in rdma_rw_init_map_wrs_bvec()
459 bvec_iter_advance_single(bvecs, iter, bv.bv_len); in rdma_rw_init_map_wrs_bvec()
514 mapped_len, bv.bv_len, dir, 0); in rdma_rw_init_iova_wrs_bvec()
518 mapped_len += bv.bv_len; in rdma_rw_init_iova_wrs_bvec()
519 bvec_iter_advance(bvec, iter, bv.bv_len); in rdma_rw_init_iova_wrs_bvec()
/linux/fs/netfs/
H A Diterator.c123 len = bvecs[ix].bv_len; in netfs_limit_bvec()
132 len = min3(n, bvecs[ix].bv_len - skip, max_size); in netfs_limit_bvec()
/linux/fs/erofs/
H A Dzdata.c1500 bvec->bv_len = PAGE_SIZE; in z_erofs_fill_bio_vec()
1526 bvec->bv_len = round_up(zbv.end, bs) - bvec->bv_offset; in z_erofs_fill_bio_vec()
1732 if (cur + bvec.bv_len > end) in z_erofs_submit_queue()
1733 bvec.bv_len = end - cur; in z_erofs_submit_queue()
1734 DBG_BUGON(bvec.bv_len < sb->s_blocksize); in z_erofs_submit_queue()
1760 if (!bio_add_page(bio, bvec.bv_page, bvec.bv_len, in z_erofs_submit_queue()
1763 last_pa = cur + bvec.bv_len; in z_erofs_submit_queue()
1765 } while ((cur += bvec.bv_len) < end); in z_erofs_submit_queue()

123