Home
last modified time | relevance | path

Searched refs:umem (Results 1 – 25 of 52) sorted by relevance

123

/linux/net/xdp/
H A Dxdp_umem.c24 static void xdp_umem_unpin_pages(struct xdp_umem *umem) in xdp_umem_unpin_pages() argument
26 unpin_user_pages_dirty_lock(umem->pgs, umem->npgs, true); in xdp_umem_unpin_pages()
28 kvfree(umem->pgs); in xdp_umem_unpin_pages()
29 umem->pgs = NULL; in xdp_umem_unpin_pages()
32 static void xdp_umem_unaccount_pages(struct xdp_umem *umem) in xdp_umem_unaccount_pages() argument
34 if (umem->user) { in xdp_umem_unaccount_pages()
35 atomic_long_sub(umem->npgs, &umem->user->locked_vm); in xdp_umem_unaccount_pages()
36 free_uid(umem->user); in xdp_umem_unaccount_pages()
40 static void xdp_umem_addr_unmap(struct xdp_umem *umem) in xdp_umem_addr_unmap() argument
42 vunmap(umem->addrs); in xdp_umem_addr_unmap()
[all …]
H A Dxsk_buff_pool.c55 struct xdp_umem *umem) in xp_create_and_assign_umem() argument
57 bool unaligned = umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG; in xp_create_and_assign_umem()
62 entries = unaligned ? umem->chunks : 0; in xp_create_and_assign_umem()
67 pool->heads = kvzalloc_objs(*pool->heads, umem->chunks); in xp_create_and_assign_umem()
75 pool->chunk_mask = ~((u64)umem->chunk_size - 1); in xp_create_and_assign_umem()
76 pool->addrs_cnt = umem->size; in xp_create_and_assign_umem()
77 pool->heads_cnt = umem->chunks; in xp_create_and_assign_umem()
78 pool->free_heads_cnt = umem->chunks; in xp_create_and_assign_umem()
79 pool->headroom = umem->headroom; in xp_create_and_assign_umem()
80 pool->chunk_size = umem->chunk_size; in xp_create_and_assign_umem()
[all …]
H A Dxsk_diag.c50 struct xdp_umem *umem = xs->umem; in xsk_diag_put_umem() local
54 if (!umem) in xsk_diag_put_umem()
57 du.id = umem->id; in xsk_diag_put_umem()
58 du.size = umem->size; in xsk_diag_put_umem()
59 du.num_pages = umem->npgs; in xsk_diag_put_umem()
60 du.chunk_size = umem->chunk_size; in xsk_diag_put_umem()
61 du.headroom = umem->headroom; in xsk_diag_put_umem()
65 if (umem->zc) in xsk_diag_put_umem()
67 du.refs = refcount_read(&umem in xsk_diag_put_umem()
[all...]
H A Dxdp_umem.h11 void xdp_get_umem(struct xdp_umem *umem);
12 void xdp_put_umem(struct xdp_umem *umem, bool defer_cleanup);
/linux/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
H A Dumem.c40 struct nvkm_umem *umem; in nvkm_umem_search() local
46 list_for_each_entry(umem, &master->umem, head) { in nvkm_umem_search()
47 if (umem->object.object == handle) { in nvkm_umem_search()
48 memory = nvkm_memory_ref(umem->memory); in nvkm_umem_search()
55 umem = nvkm_umem(object); in nvkm_umem_search()
56 memory = nvkm_memory_ref(umem->memory); in nvkm_umem_search()
65 struct nvkm_umem *umem = nvkm_umem(object); in nvkm_umem_unmap() local
67 if (!umem->map) in nvkm_umem_unmap()
70 if (umem->io) { in nvkm_umem_unmap()
71 if (!IS_ERR(umem->bar)) { in nvkm_umem_unmap()
[all …]
/linux/drivers/infiniband/core/
H A Dumem.c50 static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty) in __ib_umem_release() argument
52 bool make_dirty = umem->writable && dirty; in __ib_umem_release()
57 ib_dma_unmap_sgtable_attrs(dev, &umem->sgt_append.sgt, in __ib_umem_release()
58 DMA_BIDIRECTIONAL, umem->dma_attrs); in __ib_umem_release()
60 for_each_sgtable_sg(&umem->sgt_append.sgt, sg, i) { in __ib_umem_release()
68 sg_free_append_table(&umem->sgt_append); in __ib_umem_release()
85 unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem, in ib_umem_find_best_pgsz() argument
97 umem->iova = va = virt; in ib_umem_find_best_pgsz()
99 if (umem->is_odp) { in ib_umem_find_best_pgsz()
100 unsigned int page_size = BIT(to_ib_umem_odp(umem)->page_shift); in ib_umem_find_best_pgsz()
[all …]
H A Dumem_odp.c54 umem_odp->umem.is_odp = 1; in ib_init_umem_implicit_odp()
61 struct ib_device *dev = umem_odp->umem.ibdev; in ib_init_umem_odp()
69 umem_odp->umem.is_odp = 1; in ib_init_umem_odp()
72 start = ALIGN_DOWN(umem_odp->umem.address, page_size); in ib_init_umem_odp()
73 if (check_add_overflow(umem_odp->umem.address, in ib_init_umem_odp()
74 (unsigned long)umem_odp->umem.length, &end)) in ib_init_umem_odp()
109 umem_odp->umem.owning_mm, start, in ib_init_umem_odp()
137 struct ib_umem *umem; in ib_umem_odp_alloc_implicit() local
146 umem = &umem_odp->umem; in ib_umem_odp_alloc_implicit()
147 umem->ibdev = device; in ib_umem_odp_alloc_implicit()
[all …]
H A Dumem_dmabuf.c39 start = ALIGN_DOWN(umem_dmabuf->umem.address, PAGE_SIZE); in ib_umem_dmabuf_map_pages()
40 end = ALIGN(umem_dmabuf->umem.address + umem_dmabuf->umem.length, in ib_umem_dmabuf_map_pages()
65 umem_dmabuf->umem.sgt_append.sgt.sgl = umem_dmabuf->first_sg; in ib_umem_dmabuf_map_pages()
66 umem_dmabuf->umem.sgt_append.sgt.nents = nmap; in ib_umem_dmabuf_map_pages()
125 struct ib_umem *umem; in ib_umem_dmabuf_get_with_dma_device() local
145 umem = &umem_dmabuf->umem; in ib_umem_dmabuf_get_with_dma_device()
146 umem->ibdev = device; in ib_umem_dmabuf_get_with_dma_device()
147 umem->length = size; in ib_umem_dmabuf_get_with_dma_device()
148 umem->address = offset; in ib_umem_dmabuf_get_with_dma_device()
149 umem->writable = ib_access_writable(access); in ib_umem_dmabuf_get_with_dma_device()
[all …]
/linux/tools/testing/selftests/bpf/
H A Dxsk.c72 struct xsk_umem *umem; member
92 int xsk_umem__fd(const struct xsk_umem *umem) in xsk_umem__fd() argument
94 return umem ? umem->fd : -EINVAL; in xsk_umem__fd()
163 static int xsk_create_umem_rings(struct xsk_umem *umem, int fd, in xsk_create_umem_rings() argument
172 &umem->config.fill_size, in xsk_create_umem_rings()
173 sizeof(umem->config.fill_size)); in xsk_create_umem_rings()
178 &umem->config.comp_size, in xsk_create_umem_rings()
179 sizeof(umem->config.comp_size)); in xsk_create_umem_rings()
187 map = mmap(NULL, off.fr.desc + umem->config.fill_size * sizeof(__u64), in xsk_create_umem_rings()
193 fill->mask = umem->config.fill_size - 1; in xsk_create_umem_rings()
[all …]
H A Dxskxceiver.c125 struct xsk_umem_info *umem; in ifobj_zc_avail() local
134 umem = calloc(1, sizeof(struct xsk_umem_info)); in ifobj_zc_avail()
135 if (!umem) { in ifobj_zc_avail()
139 umem->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE; in ifobj_zc_avail()
140 ret = xsk_configure_umem(ifobject, umem, bufs, umem_sz); in ifobj_zc_avail()
150 ret = xsk_configure_socket(xsk, umem, ifobject, false); in ifobj_zc_avail()
157 munmap(umem->buffer, umem_sz); in ifobj_zc_avail()
158 xsk_umem__delete(umem->umem); in ifobj_zc_avail()
159 free(umem); in ifobj_zc_avail()
H A Dxsk.h187 int xsk_umem__fd(const struct xsk_umem *umem);
219 int xsk_umem__create(struct xsk_umem **umem,
226 struct xsk_umem *umem,
232 __u32 queue_id, struct xsk_umem *umem,
239 /* Returns 0 for success and -EBUSY if the umem is still in use. */
240 int xsk_umem__delete(struct xsk_umem *umem);
H A Dxskxceiver.h75 struct xsk_umem *umem; global() member
90 struct xsk_umem_info *umem; global() member
134 struct xsk_umem_info *umem; global() member
/linux/tools/testing/selftests/bpf/prog_tests/
H A Dtest_xsk.c70 return !!ifobj->umem->umem; in is_umem_valid()
78 static u64 umem_size(struct xsk_umem_info *umem) in umem_size() argument
80 return umem->num_frames * umem->frame_size; in umem_size()
83 int xsk_configure_umem(struct ifobject *ifobj, struct xsk_umem_info *umem, void *buffer, in xsk_configure_umem() argument
89 .frame_size = umem->frame_size, in xsk_configure_umem()
90 .frame_headroom = umem->frame_headroom, in xsk_configure_umem()
95 if (umem->fill_size) in xsk_configure_umem()
96 cfg.fill_size = umem->fill_size; in xsk_configure_umem()
98 if (umem->comp_size) in xsk_configure_umem()
99 cfg.comp_size = umem->comp_size; in xsk_configure_umem()
[all …]
/linux/drivers/infiniband/sw/siw/
H A Dsiw_mem.c42 void siw_umem_release(struct siw_umem *umem) in siw_umem_release() argument
44 int i, num_pages = umem->num_pages; in siw_umem_release()
46 if (umem->base_mem) in siw_umem_release()
47 ib_umem_release(umem->base_mem); in siw_umem_release()
50 kfree(umem->page_chunk[i].plist); in siw_umem_release()
53 kfree(umem->page_chunk); in siw_umem_release()
54 kfree(umem); in siw_umem_release()
116 siw_umem_release(mem->umem); in siw_free_mem()
336 struct siw_umem *umem; in siw_umem_get() local
350 umem = kzalloc_obj(*umem); in siw_umem_get()
[all …]
H A Dsiw_mem.h11 void siw_umem_release(struct siw_umem *umem);
58 static inline struct page *siw_get_upage(struct siw_umem *umem, u64 addr) in siw_get_upage() argument
60 unsigned int page_idx = (addr - umem->fp_addr) >> PAGE_SHIFT, in siw_get_upage()
64 if (likely(page_idx < umem->num_pages)) in siw_get_upage()
65 return umem->page_chunk[chunk_idx].plist[page_in_chunk]; in siw_get_upage()
/linux/drivers/infiniband/hw/mlx4/
H A Ddoorbell.c40 struct ib_umem *umem; member
67 page->umem = ib_umem_get(context->ibucontext.device, virt & PAGE_MASK, in mlx4_ib_db_map_user()
69 if (IS_ERR(page->umem)) { in mlx4_ib_db_map_user()
70 err = PTR_ERR(page->umem); in mlx4_ib_db_map_user()
78 db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) + in mlx4_ib_db_map_user()
95 ib_umem_release(db->u.user_page->umem); in mlx4_ib_db_unmap_user()
/linux/drivers/infiniband/hw/mlx5/
H A Ddoorbell.c42 struct ib_umem *umem; member
69 page->umem = ib_umem_get(context->ibucontext.device, virt & PAGE_MASK, in mlx5_ib_db_map_user()
71 if (IS_ERR(page->umem)) { in mlx5_ib_db_map_user()
72 err = PTR_ERR(page->umem); in mlx5_ib_db_map_user()
82 db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) + in mlx5_ib_db_map_user()
100 ib_umem_release(db->u.user_page->umem); in mlx5_ib_db_unmap_user()
/linux/drivers/infiniband/hw/vmw_pvrdma/
H A Dpvrdma_mr.c119 struct ib_umem *umem; in pvrdma_reg_user_mr() local
134 umem = ib_umem_get(pd->device, start, length, access_flags); in pvrdma_reg_user_mr()
135 if (IS_ERR(umem)) { in pvrdma_reg_user_mr()
138 return ERR_CAST(umem); in pvrdma_reg_user_mr()
141 npages = ib_umem_num_dma_blocks(umem, PAGE_SIZE); in pvrdma_reg_user_mr()
157 mr->umem = umem; in pvrdma_reg_user_mr()
166 ret = pvrdma_page_dir_insert_umem(&mr->pdir, mr->umem, 0); in pvrdma_reg_user_mr()
195 ib_umem_release(umem); in pvrdma_reg_user_mr()
262 mr->umem = NULL; in pvrdma_alloc_mr()
299 ib_umem_release(mr->umem); in pvrdma_dereg_mr()
/linux/include/rdma/
H A Dib_umem_odp.h14 struct ib_umem umem; member
41 static inline struct ib_umem_odp *to_ib_umem_odp(struct ib_umem *umem) in to_ib_umem_odp() argument
43 return container_of(umem, struct ib_umem_odp, umem); in to_ib_umem_odp()
/linux/drivers/infiniband/hw/hns/
H A Dhns_roce_mr.c571 if (mtr->umem) { in mtr_free_bufs()
572 ib_umem_release(mtr->umem); in mtr_free_bufs()
573 mtr->umem = NULL; in mtr_free_bufs()
594 mtr->umem = ib_umem_get(ibdev, user_addr, total_size, in mtr_alloc_bufs()
596 if (IS_ERR(mtr->umem)) { in mtr_alloc_bufs()
598 mtr->umem); in mtr_alloc_bufs()
602 mtr->umem = NULL; in mtr_alloc_bufs()
657 if (mtr->umem) in mtr_map_bufs()
659 mtr->umem, page_shift); in mtr_map_bufs()
675 mtr->umem ? "umtr" : "kmtr", ret, npage); in mtr_map_bufs()
[all …]
H A Dhns_roce_db.c32 page->umem = ib_umem_get(context->ibucontext.device, page_addr, in hns_roce_db_map_user()
34 if (IS_ERR(page->umem)) { in hns_roce_db_map_user()
35 ret = PTR_ERR(page->umem); in hns_roce_db_map_user()
44 db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) + offset; in hns_roce_db_map_user()
45 db->virt_addr = sg_virt(page->umem->sgt_append.sgt.sgl) + offset; in hns_roce_db_map_user()
63 ib_umem_release(db->u.user_page->umem); in hns_roce_db_unmap_user()
/linux/drivers/vdpa/vdpa_user/
H A Dvduse_dev.c102 struct vduse_umem *umem; member
1187 if (!dev->as[asid].umem) in vduse_dev_dereg_umem()
1194 if (dev->as[asid].umem->iova != iova || in vduse_dev_dereg_umem()
1199 unpin_user_pages_dirty_lock(dev->as[asid].umem->pages, in vduse_dev_dereg_umem()
1200 dev->as[asid].umem->npages, true); in vduse_dev_dereg_umem()
1201 atomic64_sub(dev->as[asid].umem->npages, &dev->as[asid].umem->mm->pinned_vm); in vduse_dev_dereg_umem()
1202 mmdrop(dev->as[asid].umem->mm); in vduse_dev_dereg_umem()
1203 vfree(dev->as[asid].umem->pages); in vduse_dev_dereg_umem()
1204 kfree(dev->as[asid].umem); in vduse_dev_dereg_umem()
1205 dev->as[asid].umem = NULL; in vduse_dev_dereg_umem()
[all …]
/linux/include/net/
H A Dxdp_sock_drv.h62 struct xdp_umem *umem = pool->umem; in xsk_pool_get_rx_frame_size() local
69 mbuf = pool->dev && (umem->flags & XDP_UMEM_SG_FLAG); in xsk_pool_get_rx_frame_size()
101 struct xdp_umem *umem = pool->umem; in xsk_pool_dma_map() local
103 return xp_dma_map(pool, dev, attrs, umem->pgs, umem->npgs); in xsk_pool_dma_map()
/linux/drivers/infiniband/sw/rxe/
H A Drxe_mr.c195 struct ib_umem *umem; in rxe_mr_init_user() local
200 umem = ib_umem_get(&rxe->ib_dev, start, length, access); in rxe_mr_init_user()
201 if (IS_ERR(umem)) { in rxe_mr_init_user()
203 (int)PTR_ERR(umem)); in rxe_mr_init_user()
204 return PTR_ERR(umem); in rxe_mr_init_user()
207 err = alloc_mr_page_info(mr, ib_umem_num_pages(umem)); in rxe_mr_init_user()
211 err = rxe_mr_fill_pages_from_sgt(mr, &umem->sgt_append.sgt); in rxe_mr_init_user()
215 mr->umem = umem; in rxe_mr_init_user()
223 ib_umem_release(umem); in rxe_mr_init_user()
812 ib_umem_release(mr->umem); in rxe_mr_cleanup()
/linux/drivers/infiniband/sw/rdmavt/
H A Dmr.c343 struct ib_umem *umem; in rvt_reg_user_mr() local
354 umem = ib_umem_get(pd->device, start, length, mr_access_flags); in rvt_reg_user_mr()
355 if (IS_ERR(umem)) in rvt_reg_user_mr()
356 return ERR_CAST(umem); in rvt_reg_user_mr()
358 n = ib_umem_num_pages(umem); in rvt_reg_user_mr()
369 mr->mr.offset = ib_umem_offset(umem); in rvt_reg_user_mr()
371 mr->umem = umem; in rvt_reg_user_mr()
376 for_each_sgtable_page (&umem->sgt_append.sgt, &sg_iter, 0) { in rvt_reg_user_mr()
398 ib_umem_release(umem); in rvt_reg_user_mr()
526 ib_umem_release(mr->umem); in rvt_dereg_mr()
[all …]

123