/linux/include/rdma/ |
H A D | ib_umem.h | 75 static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter, in __rdma_umem_block_iter_start() argument 79 __rdma_block_iter_start(biter, umem->sgt_append.sgt.sgl, in __rdma_umem_block_iter_start() 81 biter->__sg_advance = ib_umem_offset(umem) & ~(pgsz - 1); in __rdma_umem_block_iter_start() 82 biter->__sg_numblocks = ib_umem_num_dma_blocks(umem, pgsz); in __rdma_umem_block_iter_start() 85 static inline bool __rdma_umem_block_iter_next(struct ib_block_iter *biter) in __rdma_umem_block_iter_next() argument 87 return __rdma_block_iter_next(biter) && biter->__sg_numblocks--; in __rdma_umem_block_iter_next() 101 #define rdma_umem_for_each_dma_block(umem, biter, pgsz) \ argument 102 for (__rdma_umem_block_iter_start(biter, umem, pgsz); \ 103 __rdma_umem_block_iter_next(biter);)
|
H A D | ib_verbs.h | 2894 void __rdma_block_iter_start(struct ib_block_iter *biter, 2898 bool __rdma_block_iter_next(struct ib_block_iter *biter); 2903 * @biter: block iterator holding the memory block in rdma_block_iter_dma_address() 2906 rdma_block_iter_dma_address(struct ib_block_iter *biter) 2908 return biter->__dma_addr & ~(BIT_ULL(biter->__pg_bit) - 1); 2914 * @biter: block iterator holding the memory block 2921 #define rdma_for_each_block(sglist, biter, nents, pgsz) \ 2922 for (__rdma_block_iter_start(biter, sglist, nents, \ 2924 __rdma_block_iter_next(biter);) 2900 rdma_block_iter_dma_address(struct ib_block_iter * biter) rdma_block_iter_dma_address() argument 2915 rdma_for_each_block(sglist,biter,nents,pgsz) global() argument [all...] |
/linux/drivers/infiniband/hw/mlx5/ |
H A D | mem.c | 43 struct ib_block_iter biter; in mlx5_ib_populate_pas() local 45 rdma_umem_for_each_dma_block (umem, &biter, page_size) { in mlx5_ib_populate_pas() 46 *pas = cpu_to_be64(rdma_block_iter_dma_address(&biter) | in mlx5_ib_populate_pas()
|
H A D | umr.c | 645 struct ib_block_iter biter; in _mlx5r_umr_update_mr_pas() local 677 rdma_umem_for_each_dma_block(mr->umem, &biter, BIT(mr->page_shift)) { in _mlx5r_umr_update_mr_pas() 697 cur_ksm->va = cpu_to_be64(rdma_block_iter_dma_address(&biter)); in _mlx5r_umr_update_mr_pas() 703 cpu_to_be64(rdma_block_iter_dma_address(&biter) | in _mlx5r_umr_update_mr_pas()
|
/linux/drivers/infiniband/hw/hns/ |
H A D | hns_roce_alloc.c | 159 struct ib_block_iter biter; in hns_roce_get_umem_bufs() local 163 rdma_umem_for_each_dma_block(umem, &biter, 1 << page_shift) { in hns_roce_get_umem_bufs() 164 bufs[total++] = rdma_block_iter_dma_address(&biter); in hns_roce_get_umem_bufs()
|
/linux/drivers/infiniband/core/ |
H A D | verbs.c | 3096 void __rdma_block_iter_start(struct ib_block_iter *biter, in __rdma_block_iter_start() argument 3100 memset(biter, 0, sizeof(struct ib_block_iter)); in __rdma_block_iter_start() 3101 biter->__sg = sglist; in __rdma_block_iter_start() 3102 biter->__sg_nents = nents; in __rdma_block_iter_start() 3105 biter->__pg_bit = __fls(pgsz); in __rdma_block_iter_start() 3109 bool __rdma_block_iter_next(struct ib_block_iter *biter) in __rdma_block_iter_next() argument 3114 if (!biter->__sg_nents || !biter->__sg) in __rdma_block_iter_next() 3117 biter->__dma_addr = sg_dma_address(biter->__sg) + biter->__sg_advance; in __rdma_block_iter_next() 3118 block_offset = biter->__dma_addr & (BIT_ULL(biter->__pg_bit) - 1); in __rdma_block_iter_next() 3119 sg_delta = BIT_ULL(biter->__pg_bit) - block_offset; in __rdma_block_iter_next() [all …]
|
/linux/drivers/infiniband/hw/vmw_pvrdma/ |
H A D | pvrdma_misc.c | 185 struct ib_block_iter biter; in pvrdma_page_dir_insert_umem() local 192 rdma_umem_for_each_dma_block (umem, &biter, PAGE_SIZE) { in pvrdma_page_dir_insert_umem() 194 pdir, i, rdma_block_iter_dma_address(&biter)); in pvrdma_page_dir_insert_umem()
|
/linux/drivers/dma/ |
H A D | fsl-edma-common.c | 355 len += nbytes * fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, biter); in fsl_edma_desc_residue() 378 size = nbytes * fsl_edma_get_tcd_to_cpu(fsl_chan, edesc->tcd[i].vtcd, biter); in fsl_edma_desc_residue() 448 edma_cp_tcd_to_reg(fsl_chan, tcd, biter); in fsl_edma_set_tcd_regs() 479 u16 biter, u16 doff, dma_addr_t dlast_sga, bool major_int, in fsl_edma_fill_tcd() argument 522 fsl_edma_set_tcd_to_le(fsl_chan, tcd, EDMA_TCD_BITER_BITER(biter), biter); in fsl_edma_fill_tcd()
|
H A D | fsl-edma-common.h | 88 __le16 biter; member 102 __le16 biter; member
|
/linux/fs/erofs/ |
H A D | zdata.c | 492 struct z_erofs_bvec_iter biter; member 707 ret = z_erofs_bvec_enqueue(&fe->biter, bvec, &fe->candidate_bvpage, in z_erofs_attach_page() 853 z_erofs_bvec_iter_begin(&fe->biter, &fe->pcl->bvset, in z_erofs_pcluster_begin() 977 z_erofs_bvec_iter_end(&fe->biter); in z_erofs_pcluster_end() 1221 struct z_erofs_bvec_iter biter; in z_erofs_parse_out_bvecs() local 1225 z_erofs_bvec_iter_begin(&biter, &pcl->bvset, Z_EROFS_INLINE_BVECS, 0); in z_erofs_parse_out_bvecs() 1229 z_erofs_bvec_dequeue(&biter, &bvec, &old_bvpage); in z_erofs_parse_out_bvecs() 1238 old_bvpage = z_erofs_bvec_iter_end(&biter); in z_erofs_parse_out_bvecs()
|
/linux/drivers/infiniband/hw/cxgb4/ |
H A D | mem.c | 497 struct ib_block_iter biter; in c4iw_reg_user_mr() local 548 rdma_umem_for_each_dma_block(mhp->umem, &biter, 1 << shift) { in c4iw_reg_user_mr() 549 pages[i++] = cpu_to_be64(rdma_block_iter_dma_address(&biter)); in c4iw_reg_user_mr()
|
/linux/drivers/infiniband/hw/bnxt_re/ |
H A D | qplib_res.c | 94 struct ib_block_iter biter; in bnxt_qplib_fill_user_dma_pages() local 97 rdma_umem_for_each_dma_block(sginfo->umem, &biter, sginfo->pgsize) { in bnxt_qplib_fill_user_dma_pages() 98 pbl->pg_map_arr[i] = rdma_block_iter_dma_address(&biter); in bnxt_qplib_fill_user_dma_pages()
|
/linux/drivers/infiniband/hw/mana/ |
H A D | main.c | 353 struct ib_block_iter biter; in mana_ib_gd_create_dma_region() local 399 rdma_umem_for_each_dma_block(umem, &biter, page_sz) { in mana_ib_gd_create_dma_region() 402 page_addr_list[tail++] = rdma_block_iter_dma_address(&biter); in mana_ib_gd_create_dma_region()
|
/linux/drivers/infiniband/hw/mthca/ |
H A D | mthca_provider.c | 831 struct ib_block_iter biter; in mthca_reg_user_mr() local 880 rdma_umem_for_each_dma_block(mr->umem, &biter, PAGE_SIZE) { in mthca_reg_user_mr() 881 pages[i++] = rdma_block_iter_dma_address(&biter); in mthca_reg_user_mr()
|
/linux/drivers/infiniband/hw/erdma/ |
H A D | erdma_verbs.c | 556 struct ib_block_iter biter; in erdma_fill_bottom_mtt() local 562 rdma_umem_for_each_dma_block(mem->umem, &biter, mem->page_size) in erdma_fill_bottom_mtt() 563 mtt->buf[idx++] = rdma_block_iter_dma_address(&biter); in erdma_fill_bottom_mtt()
|
/linux/drivers/infiniband/hw/ocrdma/ |
H A D | ocrdma_verbs.c | 819 struct ib_block_iter biter; in build_user_pbes() local 830 rdma_umem_for_each_dma_block (mr->umem, &biter, PAGE_SIZE) { in build_user_pbes() 832 pg_addr = rdma_block_iter_dma_address(&biter); in build_user_pbes()
|
/linux/drivers/infiniband/hw/qedr/ |
H A D | verbs.c | 627 struct ib_block_iter biter; in qedr_populate_pbls() local 649 rdma_umem_for_each_dma_block (umem, &biter, BIT(pg_shift)) { in qedr_populate_pbls() 650 u64 pg_addr = rdma_block_iter_dma_address(&biter); in qedr_populate_pbls()
|
/linux/drivers/infiniband/hw/irdma/ |
H A D | verbs.c | 2340 struct ib_block_iter biter; in irdma_copy_user_pgaddrs() local 2349 rdma_umem_for_each_dma_block(region, &biter, iwmr->page_size) { in irdma_copy_user_pgaddrs() 2350 *pbl = rdma_block_iter_dma_address(&biter); in irdma_copy_user_pgaddrs()
|