/freebsd/sys/ofed/drivers/infiniband/core/ |
H A D | ib_umem.c | 51 static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty) in __ib_umem_release() argument 57 if (umem->nmap > 0) in __ib_umem_release() 58 ib_dma_unmap_sg(dev, umem->sg_head.sgl, in __ib_umem_release() 59 umem->nmap, in __ib_umem_release() 62 for_each_sg(umem->sg_head.sgl, sg, umem->npages, i) { in __ib_umem_release() 68 sg_free_table(&umem->sg_head); in __ib_umem_release() 88 struct ib_umem *umem; in ib_umem_get() local 118 umem = kzalloc(sizeof *umem, GFP_KERNEL); in ib_umem_get() 119 if (!umem) in ib_umem_get() 122 umem->context = context; in ib_umem_get() [all …]
|
H A D | ib_umem_odp.c | 243 int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem) in ib_umem_odp_get() argument 262 umem->odp_data = kzalloc(sizeof(*umem->odp_data), GFP_KERNEL); in ib_umem_odp_get() 263 if (!umem->odp_data) { in ib_umem_odp_get() 267 umem->odp_data->umem = umem; in ib_umem_odp_get() 269 mutex_init(&umem->odp_data->umem_mutex); in ib_umem_odp_get() 271 init_completion(&umem->odp_data->notifier_completion); in ib_umem_odp_get() 273 umem->odp_data->page_list = vzalloc(ib_umem_num_pages(umem) * in ib_umem_odp_get() 274 sizeof(*umem->odp_data->page_list)); in ib_umem_odp_get() 275 if (!umem->odp_data->page_list) { in ib_umem_odp_get() 280 umem->odp_data->dma_list = vzalloc(ib_umem_num_pages(umem) * in ib_umem_odp_get() [all …]
|
H A D | ib_umem_rbtree.c | 55 return ib_umem_start(umem_odp->umem); in node_start() 68 return ib_umem_end(umem_odp->umem) - 1; in node_last() 84 struct ib_umem_odp *umem; in rbt_ib_umem_for_each_in_range() local 91 umem = container_of(node, struct ib_umem_odp, interval_tree); in rbt_ib_umem_for_each_in_range() 92 ret_val = cb(umem->umem, start, last, cookie) || ret_val; in rbt_ib_umem_for_each_in_range()
|
/freebsd/sys/ofed/include/rdma/ |
H A D | ib_umem.h | 62 static inline int ib_umem_offset(struct ib_umem *umem) in ib_umem_offset() argument 64 return umem->address & ((unsigned long)umem->page_size - 1); in ib_umem_offset() 68 static inline unsigned long ib_umem_start(struct ib_umem *umem) in ib_umem_start() argument 70 return umem->address - ib_umem_offset(umem); in ib_umem_start() 74 static inline unsigned long ib_umem_end(struct ib_umem *umem) in ib_umem_end() argument 76 return PAGE_ALIGN(umem->address + umem->length); in ib_umem_end() 79 static inline size_t ib_umem_num_pages(struct ib_umem *umem) in ib_umem_num_pages() argument 81 return (ib_umem_end(umem) - ib_umem_start(umem)) >> PAGE_SHIFT; in ib_umem_num_pages() 88 void ib_umem_release(struct ib_umem *umem); 89 int ib_umem_page_count(struct ib_umem *umem); [all …]
|
H A D | ib_umem_odp.h | 78 struct ib_umem *umem; member 89 int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem); 91 void ib_umem_odp_release(struct ib_umem *umem); 106 int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 start_offset, u64 bcnt, 109 void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 start_offset, 154 struct ib_umem *umem) in ib_umem_odp_get() argument 159 static inline void ib_umem_odp_release(struct ib_umem *umem) {} in ib_umem_odp_release() argument
|
/freebsd/sys/dev/mlx5/mlx5_ib/ |
H A D | mlx5_ib_mem.c | 42 void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, in mlx5_ib_cont_pages() argument 61 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { in mlx5_ib_cont_pages() 126 void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, in __mlx5_ib_populate_pas() argument 130 unsigned long umem_page_shift = ilog2(umem->page_size); in __mlx5_ib_populate_pas() 140 const bool odp = umem->odp_data != NULL; in __mlx5_ib_populate_pas() 147 dma_addr_t pa = umem->odp_data->dma_list[offset + i]; in __mlx5_ib_populate_pas() 156 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { in __mlx5_ib_populate_pas() 175 void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, in mlx5_ib_populate_pas() argument 178 return __mlx5_ib_populate_pas(dev, umem, page_shift, 0, in mlx5_ib_populate_pas() 179 ib_umem_num_pages(umem), pas, in mlx5_ib_populate_pas()
|
H A D | mlx5_ib_doorbell.c | 37 struct ib_umem *umem; member 62 page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK, in mlx5_ib_db_map_user() 64 if (IS_ERR(page->umem)) { in mlx5_ib_db_map_user() 65 err = PTR_ERR(page->umem); in mlx5_ib_db_map_user() 73 db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK); in mlx5_ib_db_map_user() 89 ib_umem_release(db->u.user_page->umem); in mlx5_ib_db_unmap_user()
|
H A D | mlx5_ib_mr.c | 83 if (mr->umem->odp_data) { in update_odp_mr() 92 mr->umem->odp_data->private = mr; in update_odp_mr() 518 mr->umem = NULL; in mlx5_ib_get_dma_mr() 546 static int dma_map_mr_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, in dma_map_mr_pas() argument 564 mlx5_ib_populate_pas(dev, umem, page_shift, pas, MLX5_IB_MTT_PRESENT); in dma_map_mr_pas() 629 struct ib_umem *umem = ib_umem_get(pd->uobject->context, start, length, in mr_umem_get() local 631 if (IS_ERR(umem)) { in mr_umem_get() 632 mlx5_ib_err(dev, "umem get failed (%ld)\n", PTR_ERR(umem)); in mr_umem_get() 633 return (void *)umem; in mr_umem_get() 636 mlx5_ib_cont_pages(umem, start, MLX5_MKEY_PAGE_SHIFT_MASK, npages, page_shift, ncont, order); in mr_umem_get() [all …]
|
H A D | mlx5_ib_srq.c | 108 srq->umem = ib_umem_get(&ucontext->ibucontext, ucmd.buf_addr, buf_size, 0, 0); in create_srq_user() 109 if (IS_ERR(srq->umem)) { in create_srq_user() 111 err = PTR_ERR(srq->umem); in create_srq_user() 115 mlx5_ib_cont_pages(srq->umem, ucmd.buf_addr, 0, &npages, in create_srq_user() 130 mlx5_ib_populate_pas(dev, srq->umem, page_shift, in->pas, 0); in create_srq_user() 151 ib_umem_release(srq->umem); in create_srq_user() 227 ib_umem_release(srq->umem); in destroy_srq_user() 409 ib_umem_release(msrq->umem); in mlx5_ib_destroy_srq()
|
H A D | mlx5_ib_cq.c | 775 cq->buf.umem = ib_umem_get(&context->ibucontext, ucmd.buf_addr, in create_cq_user() 778 if (IS_ERR(cq->buf.umem)) { in create_cq_user() 779 err = PTR_ERR(cq->buf.umem); in create_cq_user() 788 mlx5_ib_cont_pages(cq->buf.umem, ucmd.buf_addr, 0, &npages, &page_shift, in create_cq_user() 802 mlx5_ib_populate_pas(dev, cq->buf.umem, page_shift, pas, 0); in create_cq_user() 827 ib_umem_release(cq->buf.umem); in create_cq_user() 837 ib_umem_release(cq->buf.umem); in destroy_cq_user() 1117 struct ib_umem *umem; in resize_user() local 1120 struct ib_ucontext *context = cq->buf.umem->context; in resize_user() 1133 umem in resize_user() [all...] |
H A D | mlx5_ib.h | 253 struct ib_umem *umem; member 305 struct ib_umem *umem; member 423 struct ib_umem *umem; member 463 struct ib_umem *umem; member 507 struct ib_umem *umem; member 542 struct ib_umem *umem; member 986 void mlx5_ib_cont_pages(struct ib_umem *umem, u64 addr, 990 void __mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, 993 void mlx5_ib_populate_pas(struct mlx5_ib_dev *dev, struct ib_umem *umem, 999 int mlx5_mr_ib_cont_pages(struct ib_umem *umem, u6 [all...] |
H A D | mlx5_ib_devx.c | 139 struct ib_umem *umem; member 2111 obj->umem = ib_umem_get(ucontext, addr, size, access, 0); in devx_umem_get() 2112 if (IS_ERR(obj->umem)) in devx_umem_get() 2113 return PTR_ERR(obj->umem); in devx_umem_get() 2115 mlx5_ib_cont_pages(obj->umem, obj->umem->address, in devx_umem_get() 2120 ib_umem_release(obj->umem); in devx_umem_get() 2125 obj->page_offset = obj->umem->address & page_mask; in devx_umem_get() 2144 void *umem; in devx_umem_reg_cmd_build() local 2147 umem = MLX5_ADDR_OF(create_umem_in, cmd->in, umem); in devx_umem_reg_cmd_build() 2148 mtt = (__be64 *)MLX5_ADDR_OF(umem, umem, mtt); in devx_umem_reg_cmd_build() [all …]
|
/freebsd/sys/dev/mlx4/mlx4_ib/ |
H A D | mlx4_ib_mr.c | 80 mr->umem = NULL; in mlx4_ib_get_dma_mr() 94 struct ib_umem *umem) in mlx4_ib_umem_write_mtt() argument 109 for_each_sg(umem->sg_head.sgl, sg, umem->nmap, entry) { in mlx4_ib_umem_write_mtt() 113 umem->page_size * k; in mlx4_ib_umem_write_mtt() 153 mr->umem = ib_umem_get(pd->uobject->context, start, length, in mlx4_ib_reg_user_mr() 155 if (IS_ERR(mr->umem)) { in mlx4_ib_reg_user_mr() 156 err = PTR_ERR(mr->umem); in mlx4_ib_reg_user_mr() 160 n = ib_umem_page_count(mr->umem); in mlx4_ib_reg_user_mr() 161 shift = ilog2(mr->umem->page_size); in mlx4_ib_reg_user_mr() 168 err = mlx4_ib_umem_write_mtt(dev, &mr->mmr.mtt, mr->umem); in mlx4_ib_reg_user_mr() [all …]
|
H A D | mlx4_ib_doorbell.c | 39 struct ib_umem *umem; member 64 page->umem = ib_umem_get(&context->ibucontext, virt & PAGE_MASK, in mlx4_ib_db_map_user() 66 if (IS_ERR(page->umem)) { in mlx4_ib_db_map_user() 67 err = PTR_ERR(page->umem); in mlx4_ib_db_map_user() 75 db->dma = sg_dma_address(page->umem->sg_head.sgl) + (virt & ~PAGE_MASK); in mlx4_ib_db_map_user() 91 ib_umem_release(db->u.user_page->umem); in mlx4_ib_db_unmap_user()
|
H A D | mlx4_ib_srq.c | 113 srq->umem = in mlx4_ib_create_srq() 115 if (IS_ERR(srq->umem)) in mlx4_ib_create_srq() 116 return PTR_ERR(srq->umem); in mlx4_ib_create_srq() 118 err = mlx4_mtt_init(dev->dev, ib_umem_page_count(srq->umem), in mlx4_ib_create_srq() 119 ilog2(srq->umem->page_size), &srq->mtt); in mlx4_ib_create_srq() 123 err = mlx4_ib_umem_write_mtt(dev, &srq->mtt, srq->umem); in mlx4_ib_create_srq() 213 if (!srq->umem) in mlx4_ib_create_srq() 215 ib_umem_release(srq->umem); in mlx4_ib_create_srq() 289 ib_umem_release(msrq->umem); in mlx4_ib_destroy_srq()
|
H A D | mlx4_ib_cq.c | 141 struct ib_umem **umem, u64 buf_addr, int cqe) in mlx4_ib_get_cq_umem() argument 148 *umem = ib_umem_get(&context->ibucontext, buf_addr, cqe * cqe_size, in mlx4_ib_get_cq_umem() 150 if (IS_ERR(*umem)) in mlx4_ib_get_cq_umem() 151 return PTR_ERR(*umem); in mlx4_ib_get_cq_umem() 153 err = mlx4_mtt_init(dev->dev, ib_umem_page_count(*umem), in mlx4_ib_get_cq_umem() 154 ilog2((*umem)->page_size), &buf->mtt); in mlx4_ib_get_cq_umem() 158 err = mlx4_ib_umem_write_mtt(dev, &buf->mtt, *umem); in mlx4_ib_get_cq_umem() 168 ib_umem_release(*umem); in mlx4_ib_get_cq_umem() 211 err = mlx4_ib_get_cq_umem(dev, udata, &cq->buf, &cq->umem, in mlx4_ib_create_cq() 268 ib_umem_release(cq->umem); in mlx4_ib_create_cq() [all …]
|
/freebsd/sys/ofed/drivers/infiniband/ulp/sdp/ |
H A D | sdp_zcopy.c | 64 BUG_ON(!tx_sa->umem); in sdp_post_srcavail() 65 BUG_ON(!tx_sa->umem->chunk_list.next); in sdp_post_srcavail() 67 chunk = list_entry(tx_sa->umem->chunk_list.next, struct ib_umem_chunk, list); in sdp_post_srcavail() 70 off = tx_sa->umem->offset; in sdp_post_srcavail() 71 len = tx_sa->umem->length; in sdp_post_srcavail() 86 payload_len = MIN(tx_sa->umem->page_size - off, len); in sdp_post_srcavail() 108 tx_sa->bytes_sent = tx_sa->umem->length; in sdp_post_srcavail() 423 struct ib_umem *umem; in sdp_alloc_fmr() local 447 umem = ib_umem_get(&sdp_sk(sk)->context, (unsigned long)uaddr, len, in sdp_alloc_fmr() 450 if (IS_ERR(umem)) { in sdp_alloc_fmr() [all …]
|
/freebsd/sys/dev/bnxt/bnxt_re/ |
H A D | ib_verbs.c | 39 struct scatterlist *get_ib_umem_sgl(struct ib_umem *umem, u32 *nmap) in get_ib_umem_sgl() argument 42 *nmap = umem->nmap; in get_ib_umem_sgl() 43 return umem->sg_head.sgl; in get_ib_umem_sgl() 46 static inline void bnxt_re_peer_mem_release(struct ib_umem *umem) in bnxt_re_peer_mem_release() argument 49 ib_umem_release(umem); in bnxt_re_peer_mem_release() 1107 if (srq->umem && !IS_ERR(srq->umem)) in bnxt_re_destroy_srq() 1108 ib_umem_release(srq->umem); in bnxt_re_destroy_srq() 1150 static inline size_t ib_umem_num_pages_compat(struct ib_umem *umem) in ib_umem_num_pages_compat() argument 1152 return ib_umem_num_pages(umem); in ib_umem_num_pages_compat() 1165 struct ib_umem *umem; in bnxt_re_init_user_srq() local [all …]
|
H A D | ib_verbs.h | 120 struct scatterlist *get_ib_umem_sgl(struct ib_umem *umem, u32 *nmap); 155 struct ib_umem *umem; member 205 struct ib_umem *umem; member 379 static inline size_t ib_umem_num_pages_compat(struct ib_umem *umem); 380 static inline void bnxt_re_peer_mem_release(struct ib_umem *umem);
|
/freebsd/cddl/lib/libumem/ |
H A D | Makefile | 4 LIB= umem 5 SRCS= umem.c
|
/freebsd/sys/dev/cxgbe/iw_cxgbe/ |
H A D | mem.c | 482 mhp->umem = ib_umem_get(pd->uobject->context, start, length, acc, 0); in c4iw_reg_user_mr() 483 if (IS_ERR(mhp->umem)) { in c4iw_reg_user_mr() 484 err = PTR_ERR(mhp->umem); in c4iw_reg_user_mr() 489 shift = ffs(mhp->umem->page_size) - 1; in c4iw_reg_user_mr() 491 n = mhp->umem->nmap; in c4iw_reg_user_mr() 503 for_each_sg(mhp->umem->sg_head.sgl, sg, mhp->umem->nmap, entry) { in c4iw_reg_user_mr() 507 mhp->umem->page_size * k); in c4iw_reg_user_mr() 548 ib_umem_release(mhp->umem); in c4iw_reg_user_mr() 723 if (mhp->umem) in c4iw_dereg_mr() 724 ib_umem_release(mhp->umem); in c4iw_dereg_mr()
|
/freebsd/sys/netsmb/ |
H A D | smb_subr.c | 128 smb_memdupin(void *umem, size_t len) in smb_memdupin() argument 135 if (copyin(umem, p, len) == 0) in smb_memdupin() 145 smb_memdup(const void *umem, int len) in smb_memdup() argument 152 bcopy(umem, p, len); in smb_memdup()
|
H A D | smb_subr.h | 102 void *smb_memdup(const void *umem, int len); 104 void *smb_memdupin(void *umem, size_t len);
|
/freebsd/sys/dev/mthca/ |
H A D | mthca_provider.c | 844 mr->umem = NULL; in mthca_get_dma_mr() 877 mr->umem = ib_umem_get(pd->uobject->context, start, length, acc, in mthca_reg_user_mr() 880 if (IS_ERR(mr->umem)) { in mthca_reg_user_mr() 881 err = PTR_ERR(mr->umem); in mthca_reg_user_mr() 885 shift = ffs(mr->umem->page_size) - 1; in mthca_reg_user_mr() 886 n = mr->umem->nmap; in mthca_reg_user_mr() 904 for_each_sg(mr->umem->sg_head.sgl, sg, mr->umem->nmap, entry) { in mthca_reg_user_mr() 908 mr->umem->page_size * k; in mthca_reg_user_mr() 942 ib_umem_release(mr->umem); in mthca_reg_user_mr() 954 ib_umem_release(mmr->umem); in mthca_dereg_mr()
|
/freebsd/sys/contrib/device-tree/Bindings/remoteproc/ |
H A D | wkup_m3_rproc.txt | 26 regions. These should be named "umem" & "dmem". 45 reg-names = "umem", "dmem";
|