Home
last modified time | relevance | path

Searched refs:umem (Results 1 – 25 of 72) sorted by relevance

123

/linux/net/xdp/
H A Dxdp_umem.c24 static void xdp_umem_unpin_pages(struct xdp_umem *umem) in xdp_umem_unpin_pages() argument
26 unpin_user_pages_dirty_lock(umem->pgs, umem->npgs, true); in xdp_umem_unpin_pages()
28 kvfree(umem->pgs); in xdp_umem_unpin_pages()
29 umem->pgs = NULL; in xdp_umem_unpin_pages()
32 static void xdp_umem_unaccount_pages(struct xdp_umem *umem) in xdp_umem_unaccount_pages() argument
34 if (umem->user) { in xdp_umem_unaccount_pages()
35 atomic_long_sub(umem->npgs, &umem->user->locked_vm); in xdp_umem_unaccount_pages()
36 free_uid(umem->user); in xdp_umem_unaccount_pages()
40 static void xdp_umem_addr_unmap(struct xdp_umem *umem) in xdp_umem_addr_unmap() argument
42 vunmap(umem->addrs); in xdp_umem_addr_unmap()
[all …]
H A Dxsk_buff_pool.c54 struct xdp_umem *umem) in xp_alloc_tx_descs()
56 bool unaligned = umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG;
61 entries = unaligned ? umem->chunks : 0; in xp_create_and_assign_umem()
66 pool->heads = kvcalloc(umem->chunks, sizeof(*pool->heads), GFP_KERNEL); in xp_create_and_assign_umem()
74 pool->chunk_mask = ~((u64)umem->chunk_size - 1); in xp_create_and_assign_umem()
75 pool->addrs_cnt = umem->size; in xp_create_and_assign_umem()
76 pool->heads_cnt = umem->chunks; in xp_create_and_assign_umem()
77 pool->free_heads_cnt = umem->chunks; in xp_create_and_assign_umem()
78 pool->headroom = umem->headroom; in xp_create_and_assign_umem()
79 pool->chunk_size = umem in xp_create_and_assign_umem()
58 xp_create_and_assign_umem(struct xdp_sock * xs,struct xdp_umem * umem) xp_create_and_assign_umem() argument
255 struct xdp_umem *umem = umem_xs->umem; xp_assign_dev_shared() local
338 xp_create_dma_map(struct device * dev,struct net_device * netdev,u32 nr_pages,struct xdp_umem * umem) xp_create_dma_map() argument
[all...]
H A Dxsk_diag.c50 struct xdp_umem *umem = xs->umem; in xsk_diag_put_umem() local
54 if (!umem) in xsk_diag_put_umem()
57 du.id = umem->id; in xsk_diag_put_umem()
58 du.size = umem->size; in xsk_diag_put_umem()
59 du.num_pages = umem->npgs; in xsk_diag_put_umem()
60 du.chunk_size = umem->chunk_size; in xsk_diag_put_umem()
61 du.headroom = umem->headroom; in xsk_diag_put_umem()
65 if (umem->zc) in xsk_diag_put_umem()
67 du.refs = refcount_read(&umem->users); in xsk_diag_put_umem()
H A Dxdp_umem.h11 void xdp_get_umem(struct xdp_umem *umem);
12 void xdp_put_umem(struct xdp_umem *umem, bool defer_cleanup);
/linux/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
H A Dumem.c40 struct nvkm_umem *umem; in nvkm_umem_search() local
46 list_for_each_entry(umem, &master->umem, head) { in nvkm_umem_search()
47 if (umem->object.object == handle) { in nvkm_umem_search()
48 memory = nvkm_memory_ref(umem->memory); in nvkm_umem_search()
55 umem = nvkm_umem(object); in nvkm_umem_search()
56 memory = nvkm_memory_ref(umem->memory); in nvkm_umem_search()
65 struct nvkm_umem *umem = nvkm_umem(object); in nvkm_umem_unmap() local
67 if (!umem->map) in nvkm_umem_unmap()
70 if (umem->io) { in nvkm_umem_unmap()
71 if (!IS_ERR(umem->bar)) { in nvkm_umem_unmap()
[all …]
/linux/drivers/infiniband/core/
H A Dumem.c50 static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty) in __ib_umem_release() argument
52 bool make_dirty = umem->writable && dirty; in __ib_umem_release()
57 ib_dma_unmap_sgtable_attrs(dev, &umem->sgt_append.sgt, in __ib_umem_release()
60 for_each_sgtable_sg(&umem->sgt_append.sgt, sg, i) { in __ib_umem_release()
68 sg_free_append_table(&umem->sgt_append); in __ib_umem_release()
85 unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem, in ib_umem_find_best_pgsz() argument
97 umem->iova = va = virt; in ib_umem_find_best_pgsz()
99 if (umem->is_odp) { in ib_umem_find_best_pgsz()
100 unsigned int page_size = BIT(to_ib_umem_odp(umem)->page_shift); in ib_umem_find_best_pgsz()
114 bits_per((umem->length - 1 + virt) ^ virt)); in ib_umem_find_best_pgsz()
[all …]
H A Dumem_odp.c54 umem_odp->umem.is_odp = 1; in ib_init_umem_implicit_odp()
61 struct ib_device *dev = umem_odp->umem.ibdev; in ib_init_umem_odp()
69 umem_odp->umem.is_odp = 1; in ib_init_umem_odp()
72 start = ALIGN_DOWN(umem_odp->umem.address, page_size); in ib_init_umem_odp()
73 if (check_add_overflow(umem_odp->umem.address, in ib_init_umem_odp()
74 (unsigned long)umem_odp->umem.length, &end)) in ib_init_umem_odp()
109 umem_odp->umem.owning_mm, start, in ib_init_umem_odp()
137 struct ib_umem *umem; in ib_umem_odp_alloc_implicit() local
146 umem = &umem_odp->umem; in ib_umem_odp_alloc_implicit()
147 umem->ibdev = device; in ib_umem_odp_alloc_implicit()
[all …]
H A Dumem_dmabuf.c39 start = ALIGN_DOWN(umem_dmabuf->umem.address, PAGE_SIZE); in ib_umem_dmabuf_map_pages()
40 end = ALIGN(umem_dmabuf->umem.address + umem_dmabuf->umem.length, in ib_umem_dmabuf_map_pages()
65 umem_dmabuf->umem.sgt_append.sgt.sgl = umem_dmabuf->first_sg; in ib_umem_dmabuf_map_pages()
66 umem_dmabuf->umem.sgt_append.sgt.nents = nmap; in ib_umem_dmabuf_map_pages()
125 struct ib_umem *umem; in ib_umem_dmabuf_get_with_dma_device() local
148 umem = &umem_dmabuf->umem; in ib_umem_dmabuf_get_with_dma_device()
149 umem->ibdev = device; in ib_umem_dmabuf_get_with_dma_device()
150 umem->length = size; in ib_umem_dmabuf_get_with_dma_device()
151 umem->address = offset; in ib_umem_dmabuf_get_with_dma_device()
152 umem->writable = ib_access_writable(access); in ib_umem_dmabuf_get_with_dma_device()
[all …]
H A Duverbs_std_types_cq.c72 struct ib_umem *umem = NULL; in UVERBS_HANDLER() local
138 umem = ib_umem_get(ib_dev, buffer_va, buffer_length, IB_ACCESS_LOCAL_WRITE); in UVERBS_HANDLER()
139 if (IS_ERR(umem)) { in UVERBS_HANDLER()
140 ret = PTR_ERR(umem); in UVERBS_HANDLER()
169 umem = &umem_dmabuf->umem; in UVERBS_HANDLER()
180 ib_umem_release(umem); in UVERBS_HANDLER()
194 ret = umem ? ib_dev->ops.create_cq_umem(cq, &attr, umem, attrs) : in UVERBS_HANDLER()
209 ib_umem_release(umem); in UVERBS_HANDLER()
/linux/tools/testing/selftests/bpf/
H A Dxsk.c72 struct xsk_umem *umem; member
92 int xsk_umem__fd(const struct xsk_umem *umem) in xsk_umem__fd() argument
94 return umem ? umem->fd : -EINVAL; in xsk_umem__fd()
163 static int xsk_create_umem_rings(struct xsk_umem *umem, int fd, in xsk_create_umem_rings() argument
172 &umem->config.fill_size, in xsk_create_umem_rings()
173 sizeof(umem->config.fill_size)); in xsk_create_umem_rings()
178 &umem->config.comp_size, in xsk_create_umem_rings()
179 sizeof(umem->config.comp_size)); in xsk_create_umem_rings()
187 map = mmap(NULL, off.fr.desc + umem->config.fill_size * sizeof(__u64), in xsk_create_umem_rings()
193 fill->mask = umem->config.fill_size - 1; in xsk_create_umem_rings()
[all …]
H A Dxskxceiver.c124 struct xsk_umem_info *umem; in ifobj_zc_avail() local
133 umem = calloc(1, sizeof(struct xsk_umem_info)); in ifobj_zc_avail()
134 if (!umem) { in ifobj_zc_avail()
138 umem->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE; in ifobj_zc_avail()
139 ret = xsk_configure_umem(ifobject, umem, bufs, umem_sz); in ifobj_zc_avail()
149 ret = xsk_configure_socket(xsk, umem, ifobject, false); in ifobj_zc_avail()
156 munmap(umem->buffer, umem_sz); in ifobj_zc_avail()
157 xsk_umem__delete(umem->umem); in ifobj_zc_avail()
158 free(umem); in ifobj_zc_avail()
H A Dxsk.h187 int xsk_umem__fd(const struct xsk_umem *umem);
219 int xsk_umem__create(struct xsk_umem **umem,
226 struct xsk_umem *umem,
232 __u32 queue_id, struct xsk_umem *umem,
239 /* Returns 0 for success and -EBUSY if the umem is still in use. */
240 int xsk_umem__delete(struct xsk_umem *umem);
/linux/drivers/infiniband/sw/siw/
H A Dsiw_mem.c42 void siw_umem_release(struct siw_umem *umem) in siw_umem_release() argument
44 int i, num_pages = umem->num_pages; in siw_umem_release()
46 if (umem->base_mem) in siw_umem_release()
47 ib_umem_release(umem->base_mem); in siw_umem_release()
50 kfree(umem->page_chunk[i].plist); in siw_umem_release()
53 kfree(umem->page_chunk); in siw_umem_release()
54 kfree(umem); in siw_umem_release()
116 siw_umem_release(mem->umem); in siw_free_mem()
336 struct siw_umem *umem; in siw_umem_get() local
350 umem = kzalloc(sizeof(*umem), GFP_KERNEL); in siw_umem_get()
[all …]
H A Dsiw_mem.h11 void siw_umem_release(struct siw_umem *umem);
58 static inline struct page *siw_get_upage(struct siw_umem *umem, u64 addr) in siw_get_upage() argument
60 unsigned int page_idx = (addr - umem->fp_addr) >> PAGE_SHIFT, in siw_get_upage()
64 if (likely(page_idx < umem->num_pages)) in siw_get_upage()
65 return umem->page_chunk[chunk_idx].plist[page_in_chunk]; in siw_get_upage()
/linux/drivers/infiniband/hw/mlx5/
H A Dmem.c40 void mlx5_ib_populate_pas(struct ib_umem *umem, size_t page_size, __be64 *pas, in mlx5_ib_populate_pas() argument
45 rdma_umem_for_each_dma_block (umem, &biter, page_size) { in mlx5_ib_populate_pas()
58 struct ib_umem *umem, unsigned long pgsz_bitmap, in __mlx5_umem_find_best_quantized_pgoff() argument
66 page_size = ib_umem_find_best_pgoff(umem, pgsz_bitmap, pgoff_bitmask); in __mlx5_umem_find_best_quantized_pgoff()
77 page_offset = ib_umem_dma_offset(umem, page_size); in __mlx5_umem_find_best_quantized_pgoff()
80 page_offset = ib_umem_dma_offset(umem, page_size); in __mlx5_umem_find_best_quantized_pgoff()
H A Ddoorbell.c42 struct ib_umem *umem; member
69 page->umem = ib_umem_get(context->ibucontext.device, virt & PAGE_MASK, in mlx5_ib_db_map_user()
71 if (IS_ERR(page->umem)) { in mlx5_ib_db_map_user()
72 err = PTR_ERR(page->umem); in mlx5_ib_db_map_user()
82 db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) + in mlx5_ib_db_map_user()
100 ib_umem_release(db->u.user_page->umem); in mlx5_ib_db_unmap_user()
H A Dsrq_cmd.c103 ib_umem_num_dma_blocks(in->umem, page_size) * sizeof(u64))) in __set_srq_page_size()
110 (in)->umem, typ, log_pgsz_fld, \
125 if (in->umem) { in create_srq_cmd()
142 if (in->umem) in create_srq_cmd()
144 in->umem, in create_srq_cmd()
228 if (in->umem) { in create_xrc_srq_cmd()
247 if (in->umem) in create_xrc_srq_cmd()
249 in->umem, in create_xrc_srq_cmd()
341 if (in->umem) { in create_rmp_cmd()
365 if (in->umem) in create_rmp_cmd()
[all …]
/linux/drivers/infiniband/sw/rxe/
H A Drxe_odp.c46 struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem); in rxe_odp_do_pagefault_and_lock()
51 if (umem_odp->umem.writable && !(flags & RXE_PAGEFAULT_RDONLY)) in rxe_odp_do_pagefault_and_lock()
66 struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem); in rxe_odp_init_pages()
69 ret = rxe_odp_do_pagefault_and_lock(mr, mr->umem->address, in rxe_odp_init_pages()
70 mr->umem->length, in rxe_odp_init_pages()
109 mr->umem = &umem_odp->umem; in rxe_odp_mr_init_user()
113 mr->page_offset = ib_umem_offset(&umem_odp->umem); in rxe_odp_mr_init_user()
162 struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem); in rxe_odp_map_range_and_lock()
194 struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem); in __rxe_odp_mr_copy()
230 struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem); in rxe_odp_mr_copy()
[all …]
/linux/drivers/infiniband/hw/mlx4/
H A Ddoorbell.c40 struct ib_umem *umem; member
67 page->umem = ib_umem_get(context->ibucontext.device, virt & PAGE_MASK, in mlx4_ib_db_map_user()
69 if (IS_ERR(page->umem)) { in mlx4_ib_db_map_user()
70 err = PTR_ERR(page->umem); in mlx4_ib_db_map_user()
78 db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) + in mlx4_ib_db_map_user()
95 ib_umem_release(db->u.user_page->umem); in mlx4_ib_db_unmap_user()
H A Dsrq.c117 srq->umem = in mlx4_ib_create_srq()
119 if (IS_ERR(srq->umem)) in mlx4_ib_create_srq()
120 return PTR_ERR(srq->umem); in mlx4_ib_create_srq()
123 dev->dev, ib_umem_num_dma_blocks(srq->umem, PAGE_SIZE), in mlx4_ib_create_srq()
128 err = mlx4_ib_umem_write_mtt(dev, &srq->mtt, srq->umem); in mlx4_ib_create_srq()
213 if (!srq->umem) in mlx4_ib_create_srq()
215 ib_umem_release(srq->umem); in mlx4_ib_create_srq()
289 ib_umem_release(msrq->umem); in mlx4_ib_destroy_srq()
/linux/include/rdma/
H A Dib_umem_odp.h14 struct ib_umem umem; member
41 static inline struct ib_umem_odp *to_ib_umem_odp(struct ib_umem *umem) in to_ib_umem_odp() argument
43 return container_of(umem, struct ib_umem_odp, umem); in to_ib_umem_odp()
/linux/drivers/infiniband/hw/vmw_pvrdma/
H A Dpvrdma_srq.c149 srq->umem = ib_umem_get(ibsrq->device, ucmd.buf_addr, ucmd.buf_size, 0); in pvrdma_create_srq()
150 if (IS_ERR(srq->umem)) { in pvrdma_create_srq()
151 ret = PTR_ERR(srq->umem); in pvrdma_create_srq()
155 srq->npages = ib_umem_num_dma_blocks(srq->umem, PAGE_SIZE); in pvrdma_create_srq()
171 pvrdma_page_dir_insert_umem(&srq->pdir, srq->umem, 0); in pvrdma_create_srq()
209 ib_umem_release(srq->umem); in pvrdma_create_srq()
229 ib_umem_release(srq->umem); in pvrdma_free_srq()
H A Dpvrdma_cq.c142 cq->umem = ib_umem_get(ibdev, ucmd.buf_addr, ucmd.buf_size, in pvrdma_create_cq()
144 if (IS_ERR(cq->umem)) { in pvrdma_create_cq()
145 ret = PTR_ERR(cq->umem); in pvrdma_create_cq()
149 npages = ib_umem_num_dma_blocks(cq->umem, PAGE_SIZE); in pvrdma_create_cq()
177 pvrdma_page_dir_insert_umem(&cq->pdir, cq->umem, 0); in pvrdma_create_cq()
220 ib_umem_release(cq->umem); in pvrdma_create_cq()
232 ib_umem_release(cq->umem); in pvrdma_free_cq()
/linux/drivers/infiniband/hw/hns/
H A Dhns_roce_db.c32 page->umem = ib_umem_get(context->ibucontext.device, page_addr, in hns_roce_db_map_user()
34 if (IS_ERR(page->umem)) { in hns_roce_db_map_user()
35 ret = PTR_ERR(page->umem); in hns_roce_db_map_user()
44 db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) + offset; in hns_roce_db_map_user()
45 db->virt_addr = sg_virt(page->umem->sgt_append.sgt.sgl) + offset; in hns_roce_db_map_user()
63 ib_umem_release(db->u.user_page->umem); in hns_roce_db_unmap_user()
/linux/drivers/vdpa/vdpa_user/
H A Dvduse_dev.c117 struct vduse_umem *umem; member
1046 if (!dev->umem) in vduse_dev_dereg_umem()
1053 if (dev->umem->iova != iova || size != dev->domain->bounce_size) in vduse_dev_dereg_umem()
1057 unpin_user_pages_dirty_lock(dev->umem->pages, in vduse_dev_dereg_umem()
1058 dev->umem->npages, true); in vduse_dev_dereg_umem()
1059 atomic64_sub(dev->umem->npages, &dev->umem->mm->pinned_vm); in vduse_dev_dereg_umem()
1060 mmdrop(dev->umem->mm); in vduse_dev_dereg_umem()
1061 vfree(dev->umem->pages); in vduse_dev_dereg_umem()
1062 kfree(dev->umem); in vduse_dev_dereg_umem()
1063 dev->umem = NULL; in vduse_dev_dereg_umem()
[all …]

123