Home
last modified time | relevance | path

Searched refs:umem (Results 1 – 25 of 83) sorted by relevance

1234

/linux/net/xdp/
H A Dxdp_umem.c24 static void xdp_umem_unpin_pages(struct xdp_umem *umem) in xdp_umem_unpin_pages() argument
26 unpin_user_pages_dirty_lock(umem->pgs, umem->npgs, true); in xdp_umem_unpin_pages()
28 kvfree(umem->pgs); in xdp_umem_unpin_pages()
29 umem->pgs = NULL; in xdp_umem_unpin_pages()
32 static void xdp_umem_unaccount_pages(struct xdp_umem *umem) in xdp_umem_unaccount_pages() argument
34 if (umem->user) { in xdp_umem_unaccount_pages()
35 atomic_long_sub(umem->npgs, &umem->user->locked_vm); in xdp_umem_unaccount_pages()
36 free_uid(umem->user); in xdp_umem_unaccount_pages()
40 static void xdp_umem_addr_unmap(struct xdp_umem *umem) in xdp_umem_addr_unmap() argument
42 vunmap(umem->addrs); in xdp_umem_addr_unmap()
[all …]
H A Dxsk_buff_pool.c53 struct xdp_umem *umem) in xp_create_and_assign_umem() argument
55 bool unaligned = umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG; in xp_create_and_assign_umem()
60 entries = unaligned ? umem->chunks : 0; in xp_create_and_assign_umem()
65 pool->heads = kvzalloc_objs(*pool->heads, umem->chunks); in xp_create_and_assign_umem()
73 pool->chunk_mask = ~((u64)umem->chunk_size - 1); in xp_create_and_assign_umem()
74 pool->addrs_cnt = umem->size; in xp_create_and_assign_umem()
75 pool->heads_cnt = umem->chunks; in xp_create_and_assign_umem()
76 pool->free_heads_cnt = umem->chunks; in xp_create_and_assign_umem()
77 pool->headroom = umem->headroom; in xp_create_and_assign_umem()
78 pool->chunk_size = umem->chunk_size; in xp_create_and_assign_umem()
[all …]
H A Dxsk_diag.c50 struct xdp_umem *umem = xs->umem; in xsk_diag_put_umem() local
54 if (!umem) in xsk_diag_put_umem()
57 du.id = umem->id; in xsk_diag_put_umem()
58 du.size = umem->size; in xsk_diag_put_umem()
59 du.num_pages = umem->npgs; in xsk_diag_put_umem()
60 du.chunk_size = umem->chunk_size; in xsk_diag_put_umem()
61 du.headroom = umem->headroom; in xsk_diag_put_umem()
65 if (umem->zc) in xsk_diag_put_umem()
67 du.refs = refcount_read(&umem->users); in xsk_diag_put_umem()
/linux/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
H A Dumem.c40 struct nvkm_umem *umem; in nvkm_umem_search() local
46 list_for_each_entry(umem, &master->umem, head) { in nvkm_umem_search()
47 if (umem->object.object == handle) { in nvkm_umem_search()
48 memory = nvkm_memory_ref(umem->memory); in nvkm_umem_search()
55 umem = nvkm_umem(object); in nvkm_umem_search()
56 memory = nvkm_memory_ref(umem->memory); in nvkm_umem_search()
65 struct nvkm_umem *umem = nvkm_umem(object); in nvkm_umem_unmap() local
67 if (!umem->map) in nvkm_umem_unmap()
70 if (umem->io) { in nvkm_umem_unmap()
71 if (!IS_ERR(umem->bar)) { in nvkm_umem_unmap()
[all …]
/linux/drivers/infiniband/core/
H A Dumem.c50 static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty) in __ib_umem_release() argument
52 bool make_dirty = umem->writable && dirty; in __ib_umem_release()
57 ib_dma_unmap_sgtable_attrs(dev, &umem->sgt_append.sgt, in __ib_umem_release()
60 for_each_sgtable_sg(&umem->sgt_append.sgt, sg, i) { in __ib_umem_release()
68 sg_free_append_table(&umem->sgt_append); in __ib_umem_release()
85 unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem, in ib_umem_find_best_pgsz() argument
97 umem->iova = va = virt; in ib_umem_find_best_pgsz()
99 if (umem->is_odp) { in ib_umem_find_best_pgsz()
100 unsigned int page_size = BIT(to_ib_umem_odp(umem)->page_shift); in ib_umem_find_best_pgsz()
114 bits_per((umem->length - 1 + virt) ^ virt)); in ib_umem_find_best_pgsz()
[all …]
H A Dumem_odp.c54 umem_odp->umem.is_odp = 1; in ib_init_umem_implicit_odp()
61 struct ib_device *dev = umem_odp->umem.ibdev; in ib_init_umem_odp()
69 umem_odp->umem.is_odp = 1; in ib_init_umem_odp()
72 start = ALIGN_DOWN(umem_odp->umem.address, page_size); in ib_init_umem_odp()
73 if (check_add_overflow(umem_odp->umem.address, in ib_init_umem_odp()
74 (unsigned long)umem_odp->umem.length, &end)) in ib_init_umem_odp()
109 umem_odp->umem.owning_mm, start, in ib_init_umem_odp()
137 struct ib_umem *umem; in ib_umem_odp_alloc_implicit() local
146 umem = &umem_odp->umem; in ib_umem_odp_alloc_implicit()
147 umem->ibdev = device; in ib_umem_odp_alloc_implicit()
[all …]
H A Dumem_dmabuf.c39 start = ALIGN_DOWN(umem_dmabuf->umem.address, PAGE_SIZE); in ib_umem_dmabuf_map_pages()
40 end = ALIGN(umem_dmabuf->umem.address + umem_dmabuf->umem.length, in ib_umem_dmabuf_map_pages()
65 umem_dmabuf->umem.sgt_append.sgt.sgl = umem_dmabuf->first_sg; in ib_umem_dmabuf_map_pages()
66 umem_dmabuf->umem.sgt_append.sgt.nents = nmap; in ib_umem_dmabuf_map_pages()
125 struct ib_umem *umem; in ib_umem_dmabuf_get_with_dma_device() local
145 umem = &umem_dmabuf->umem; in ib_umem_dmabuf_get_with_dma_device()
146 umem->ibdev = device; in ib_umem_dmabuf_get_with_dma_device()
147 umem->length = size; in ib_umem_dmabuf_get_with_dma_device()
148 umem->address = offset; in ib_umem_dmabuf_get_with_dma_device()
149 umem->writable = ib_access_writable(access); in ib_umem_dmabuf_get_with_dma_device()
[all …]
H A Duverbs_std_types_cq.c72 struct ib_umem *umem = NULL; in UVERBS_HANDLER() local
138 umem = ib_umem_get(ib_dev, buffer_va, buffer_length, IB_ACCESS_LOCAL_WRITE); in UVERBS_HANDLER()
139 if (IS_ERR(umem)) { in UVERBS_HANDLER()
140 ret = PTR_ERR(umem); in UVERBS_HANDLER()
169 umem = &umem_dmabuf->umem; in UVERBS_HANDLER()
180 ib_umem_release(umem); in UVERBS_HANDLER()
194 ret = umem ? ib_dev->ops.create_cq_umem(cq, &attr, umem, attrs) : in UVERBS_HANDLER()
209 ib_umem_release(umem); in UVERBS_HANDLER()
/linux/tools/testing/selftests/bpf/
H A Dxsk.c72 struct xsk_umem *umem; member
92 int xsk_umem__fd(const struct xsk_umem *umem) in xsk_umem__fd() argument
94 return umem ? umem->fd : -EINVAL; in xsk_umem__fd()
163 static int xsk_create_umem_rings(struct xsk_umem *umem, int fd, in xsk_create_umem_rings() argument
172 &umem->config.fill_size, in xsk_create_umem_rings()
173 sizeof(umem->config.fill_size)); in xsk_create_umem_rings()
178 &umem->config.comp_size, in xsk_create_umem_rings()
179 sizeof(umem->config.comp_size)); in xsk_create_umem_rings()
187 map = mmap(NULL, off.fr.desc + umem->config.fill_size * sizeof(__u64), in xsk_create_umem_rings()
193 fill->mask = umem->config.fill_size - 1; in xsk_create_umem_rings()
[all …]
H A Dxskxceiver.c124 struct xsk_umem_info *umem; in ifobj_zc_avail() local
133 umem = calloc(1, sizeof(struct xsk_umem_info)); in ifobj_zc_avail()
134 if (!umem) { in ifobj_zc_avail()
138 umem->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE; in ifobj_zc_avail()
139 ret = xsk_configure_umem(ifobject, umem, bufs, umem_sz); in ifobj_zc_avail()
149 ret = xsk_configure_socket(xsk, umem, ifobject, false); in ifobj_zc_avail()
156 munmap(umem->buffer, umem_sz); in ifobj_zc_avail()
157 xsk_umem__delete(umem->umem); in ifobj_zc_avail()
158 free(umem); in ifobj_zc_avail()
/linux/tools/testing/selftests/bpf/prog_tests/
H A Dtest_xsk.c70 return !!ifobj->umem->umem; in is_umem_valid()
78 static u64 umem_size(struct xsk_umem_info *umem) in umem_size() argument
80 return umem->num_frames * umem->frame_size; in umem_size()
83 int xsk_configure_umem(struct ifobject *ifobj, struct xsk_umem_info *umem, void *buffer, in xsk_configure_umem() argument
89 .frame_size = umem->frame_size, in xsk_configure_umem()
90 .frame_headroom = umem->frame_headroom, in xsk_configure_umem()
95 if (umem->fill_size) in xsk_configure_umem()
96 cfg.fill_size = umem->fill_size; in xsk_configure_umem()
98 if (umem->comp_size) in xsk_configure_umem()
99 cfg.comp_size = umem->comp_size; in xsk_configure_umem()
[all …]
/linux/drivers/infiniband/sw/siw/
H A Dsiw_mem.c42 void siw_umem_release(struct siw_umem *umem) in siw_umem_release() argument
44 int i, num_pages = umem->num_pages; in siw_umem_release()
46 if (umem->base_mem) in siw_umem_release()
47 ib_umem_release(umem->base_mem); in siw_umem_release()
50 kfree(umem->page_chunk[i].plist); in siw_umem_release()
53 kfree(umem->page_chunk); in siw_umem_release()
54 kfree(umem); in siw_umem_release()
116 siw_umem_release(mem->umem); in siw_free_mem()
336 struct siw_umem *umem; in siw_umem_get() local
350 umem = kzalloc_obj(*umem); in siw_umem_get()
[all …]
H A Dsiw_mem.h11 void siw_umem_release(struct siw_umem *umem);
58 static inline struct page *siw_get_upage(struct siw_umem *umem, u64 addr) in siw_get_upage() argument
60 unsigned int page_idx = (addr - umem->fp_addr) >> PAGE_SHIFT, in siw_get_upage()
64 if (likely(page_idx < umem->num_pages)) in siw_get_upage()
65 return umem->page_chunk[chunk_idx].plist[page_in_chunk]; in siw_get_upage()
/linux/drivers/infiniband/hw/mlx4/
H A Dmr.c77 mr->umem = NULL; in mlx4_ib_get_dma_mr()
91 struct ib_umem *umem) in mlx4_ib_umem_write_mtt() argument
97 rdma_umem_for_each_dma_block(umem, &biter, BIT(mtt->page_shift)) { in mlx4_ib_umem_write_mtt()
158 mr->umem = mlx4_get_umem_mr(pd->device, start, length, access_flags); in mlx4_ib_reg_user_mr()
159 if (IS_ERR(mr->umem)) { in mlx4_ib_reg_user_mr()
160 err = PTR_ERR(mr->umem); in mlx4_ib_reg_user_mr()
164 shift = mlx4_ib_umem_calc_optimal_mtt_size(mr->umem, start, &n); in mlx4_ib_reg_user_mr()
175 err = mlx4_ib_umem_write_mtt(dev, &mr->mmr.mtt, mr->umem); in mlx4_ib_reg_user_mr()
192 ib_umem_release(mr->umem); in mlx4_ib_reg_user_mr()
229 !mmr->umem->writable) { in mlx4_ib_rereg_user_mr()
[all …]
H A Ddoorbell.c40 struct ib_umem *umem; member
67 page->umem = ib_umem_get(context->ibucontext.device, virt & PAGE_MASK, in mlx4_ib_db_map_user()
69 if (IS_ERR(page->umem)) { in mlx4_ib_db_map_user()
70 err = PTR_ERR(page->umem); in mlx4_ib_db_map_user()
78 db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) + in mlx4_ib_db_map_user()
95 ib_umem_release(db->u.user_page->umem); in mlx4_ib_db_unmap_user()
/linux/drivers/infiniband/hw/mlx5/
H A Dmem.c40 void mlx5_ib_populate_pas(struct ib_umem *umem, size_t page_size, __be64 *pas, in mlx5_ib_populate_pas() argument
45 rdma_umem_for_each_dma_block (umem, &biter, page_size) { in mlx5_ib_populate_pas()
58 struct ib_umem *umem, unsigned long pgsz_bitmap, in __mlx5_umem_find_best_quantized_pgoff() argument
66 page_size = ib_umem_find_best_pgoff(umem, pgsz_bitmap, pgoff_bitmask); in __mlx5_umem_find_best_quantized_pgoff()
77 page_offset = ib_umem_dma_offset(umem, page_size); in __mlx5_umem_find_best_quantized_pgoff()
80 page_offset = ib_umem_dma_offset(umem, page_size); in __mlx5_umem_find_best_quantized_pgoff()
H A Ddoorbell.c42 struct ib_umem *umem; member
69 page->umem = ib_umem_get(context->ibucontext.device, virt & PAGE_MASK, in mlx5_ib_db_map_user()
71 if (IS_ERR(page->umem)) { in mlx5_ib_db_map_user()
72 err = PTR_ERR(page->umem); in mlx5_ib_db_map_user()
82 db->dma = sg_dma_address(page->umem->sgt_append.sgt.sgl) + in mlx5_ib_db_map_user()
100 ib_umem_release(db->u.user_page->umem); in mlx5_ib_db_unmap_user()
H A Dmr.c58 static struct mlx5_ib_mr *reg_create(struct ib_pd *pd, struct ib_umem *umem,
1087 mr->umem = NULL; in mlx5_ib_get_dma_mr()
1129 static unsigned int mlx5_umem_dmabuf_default_pgsz(struct ib_umem *umem, in mlx5_umem_dmabuf_default_pgsz() argument
1136 umem->iova = iova; in mlx5_umem_dmabuf_default_pgsz()
1141 struct ib_umem *umem, u64 iova, in alloc_cacheable_mr() argument
1151 if (umem->is_dmabuf) in alloc_cacheable_mr()
1152 page_size = mlx5_umem_dmabuf_default_pgsz(umem, iova); in alloc_cacheable_mr()
1154 page_size = mlx5_umem_mkc_find_best_pgsz(dev, umem, iova, in alloc_cacheable_mr()
1160 rb_key.ndescs = ib_umem_num_dma_blocks(umem, page_size); in alloc_cacheable_mr()
1161 rb_key.ats = mlx5_umem_needs_ats(dev, umem, access_flags); in alloc_cacheable_mr()
[all …]
/linux/drivers/infiniband/sw/rxe/
H A Drxe_odp.c46 struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem); in rxe_odp_do_pagefault_and_lock()
51 if (umem_odp->umem.writable && !(flags & RXE_PAGEFAULT_RDONLY)) in rxe_odp_do_pagefault_and_lock()
66 struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem); in rxe_odp_init_pages()
69 ret = rxe_odp_do_pagefault_and_lock(mr, mr->umem->address, in rxe_odp_init_pages()
70 mr->umem->length, in rxe_odp_init_pages()
109 mr->umem = &umem_odp->umem; in rxe_odp_mr_init_user()
161 struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem); in rxe_odp_map_range_and_lock()
193 struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem); in __rxe_odp_mr_copy()
229 struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem); in rxe_odp_mr_copy()
236 if (unlikely(!mr->umem->is_odp)) in rxe_odp_mr_copy()
[all …]
/linux/drivers/infiniband/hw/vmw_pvrdma/
H A Dpvrdma_mr.c119 struct ib_umem *umem; in pvrdma_reg_user_mr() local
134 umem = ib_umem_get(pd->device, start, length, access_flags); in pvrdma_reg_user_mr()
135 if (IS_ERR(umem)) { in pvrdma_reg_user_mr()
138 return ERR_CAST(umem); in pvrdma_reg_user_mr()
141 npages = ib_umem_num_dma_blocks(umem, PAGE_SIZE); in pvrdma_reg_user_mr()
157 mr->umem = umem; in pvrdma_reg_user_mr()
166 ret = pvrdma_page_dir_insert_umem(&mr->pdir, mr->umem, 0); in pvrdma_reg_user_mr()
195 ib_umem_release(umem); in pvrdma_reg_user_mr()
262 mr->umem = NULL; in pvrdma_alloc_mr()
299 ib_umem_release(mr->umem); in pvrdma_dereg_mr()
H A Dpvrdma_srq.c149 srq->umem = ib_umem_get(ibsrq->device, ucmd.buf_addr, ucmd.buf_size, 0); in pvrdma_create_srq()
150 if (IS_ERR(srq->umem)) { in pvrdma_create_srq()
151 ret = PTR_ERR(srq->umem); in pvrdma_create_srq()
155 srq->npages = ib_umem_num_dma_blocks(srq->umem, PAGE_SIZE); in pvrdma_create_srq()
171 pvrdma_page_dir_insert_umem(&srq->pdir, srq->umem, 0); in pvrdma_create_srq()
209 ib_umem_release(srq->umem); in pvrdma_create_srq()
229 ib_umem_release(srq->umem); in pvrdma_free_srq()
/linux/drivers/infiniband/hw/mana/
H A Dmr.c144 mr->umem = ib_umem_get(ibdev, start, length, access_flags); in mana_ib_reg_user_mr()
145 if (IS_ERR(mr->umem)) { in mana_ib_reg_user_mr()
146 err = PTR_ERR(mr->umem); in mana_ib_reg_user_mr()
149 mr->umem); in mana_ib_reg_user_mr()
153 err = mana_ib_create_dma_region(dev, mr->umem, &dma_region_handle, iova); in mana_ib_reg_user_mr()
194 ib_umem_release(mr->umem); in mana_ib_reg_user_mr()
236 mr->umem = &umem_dmabuf->umem; in mana_ib_reg_user_mr_dmabuf()
238 err = mana_ib_create_dma_region(dev, mr->umem, &dma_region_handle, iova); in mana_ib_reg_user_mr_dmabuf()
268 ib_umem_release(mr->umem); in mana_ib_reg_user_mr_dmabuf()
320 if (mr->umem) in mana_ib_dereg_mr()
[all …]
/linux/include/rdma/
H A Dib_umem_odp.h14 struct ib_umem umem; member
41 static inline struct ib_umem_odp *to_ib_umem_odp(struct ib_umem *umem) in to_ib_umem_odp() argument
43 return container_of(umem, struct ib_umem_odp, umem); in to_ib_umem_odp()
/linux/drivers/infiniband/hw/hns/
H A Dhns_roce_mr.c571 if (mtr->umem) { in mtr_free_bufs()
572 ib_umem_release(mtr->umem); in mtr_free_bufs()
573 mtr->umem = NULL; in mtr_free_bufs()
594 mtr->umem = ib_umem_get(ibdev, user_addr, total_size, in mtr_alloc_bufs()
596 if (IS_ERR(mtr->umem)) { in mtr_alloc_bufs()
598 mtr->umem); in mtr_alloc_bufs()
602 mtr->umem = NULL; in mtr_alloc_bufs()
657 if (mtr->umem) in mtr_map_bufs()
659 mtr->umem, page_shift); in mtr_map_bufs()
675 mtr->umem ? "umtr" : "kmtr", ret, npage); in mtr_map_bufs()
[all …]
/linux/drivers/vdpa/vdpa_user/
H A Dvduse_dev.c102 struct vduse_umem *umem; member
1187 if (!dev->as[asid].umem) in vduse_dev_dereg_umem()
1194 if (dev->as[asid].umem->iova != iova || in vduse_dev_dereg_umem()
1199 unpin_user_pages_dirty_lock(dev->as[asid].umem->pages, in vduse_dev_dereg_umem()
1200 dev->as[asid].umem->npages, true); in vduse_dev_dereg_umem()
1201 atomic64_sub(dev->as[asid].umem->npages, &dev->as[asid].umem->mm->pinned_vm); in vduse_dev_dereg_umem()
1202 mmdrop(dev->as[asid].umem->mm); in vduse_dev_dereg_umem()
1203 vfree(dev->as[asid].umem->pages); in vduse_dev_dereg_umem()
1204 kfree(dev->as[asid].umem); in vduse_dev_dereg_umem()
1205 dev->as[asid].umem = NULL; in vduse_dev_dereg_umem()
[all …]

1234