Searched refs:umem_odp (Results 1 – 4 of 4) sorted by relevance
| /linux/drivers/infiniband/core/ |
| H A D | umem_odp.c | 51 static void ib_init_umem_implicit_odp(struct ib_umem_odp *umem_odp) in ib_init_umem_implicit_odp() argument 53 umem_odp->is_implicit_odp = 1; in ib_init_umem_implicit_odp() 54 umem_odp->umem.is_odp = 1; in ib_init_umem_implicit_odp() 55 mutex_init(&umem_odp->umem_mutex); in ib_init_umem_implicit_odp() 58 static int ib_init_umem_odp(struct ib_umem_odp *umem_odp, in ib_init_umem_odp() argument 61 struct ib_device *dev = umem_odp->umem.ibdev; in ib_init_umem_odp() 62 size_t page_size = 1UL << umem_odp->page_shift; in ib_init_umem_odp() 69 umem_odp->umem.is_odp = 1; in ib_init_umem_odp() 70 mutex_init(&umem_odp->umem_mutex); in ib_init_umem_odp() 72 start = ALIGN_DOWN(umem_odp->umem.address, page_size); in ib_init_umem_odp() [all …]
|
| /linux/drivers/infiniband/sw/rxe/ |
| H A D | rxe_odp.c | 17 struct ib_umem_odp *umem_odp = in rxe_ib_invalidate_range() local 24 mutex_lock(&umem_odp->umem_mutex); in rxe_ib_invalidate_range() 27 start = max_t(u64, ib_umem_start(umem_odp), range->start); in rxe_ib_invalidate_range() 28 end = min_t(u64, ib_umem_end(umem_odp), range->end); in rxe_ib_invalidate_range() 31 ib_umem_odp_unmap_dma_pages(umem_odp, start, end); in rxe_ib_invalidate_range() 33 mutex_unlock(&umem_odp->umem_mutex); in rxe_ib_invalidate_range() 46 struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem); in rxe_odp_do_pagefault_and_lock() local 51 if (umem_odp->umem.writable && !(flags & RXE_PAGEFAULT_RDONLY)) in rxe_odp_do_pagefault_and_lock() 59 np = ib_umem_odp_map_dma_and_lock(umem_odp, user_va, bcnt, in rxe_odp_do_pagefault_and_lock() 66 struct ib_umem_odp *umem_odp = to_ib_umem_odp(mr->umem); in rxe_odp_init_pages() local [all …]
|
| /linux/include/rdma/ |
| H A D | ib_umem_odp.h | 47 static inline unsigned long ib_umem_start(struct ib_umem_odp *umem_odp) in ib_umem_start() argument 49 return umem_odp->notifier.interval_tree.start; in ib_umem_start() 53 static inline unsigned long ib_umem_end(struct ib_umem_odp *umem_odp) in ib_umem_end() argument 55 return umem_odp->notifier.interval_tree.last + 1; in ib_umem_end() 58 static inline size_t ib_umem_odp_num_pages(struct ib_umem_odp *umem_odp) in ib_umem_odp_num_pages() argument 60 return (ib_umem_end(umem_odp) - ib_umem_start(umem_odp)) >> in ib_umem_odp_num_pages() 61 umem_odp->page_shift; in ib_umem_odp_num_pages() 75 void ib_umem_odp_release(struct ib_umem_odp *umem_odp); 77 int ib_umem_odp_map_dma_and_lock(struct ib_umem_odp *umem_odp, u64 start_offset, 80 void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset, [all …]
|
| /linux/drivers/infiniband/hw/mlx5/ |
| H A D | odp.c | 269 struct ib_umem_odp *umem_odp = in mlx5_ib_invalidate_range() local 283 mutex_lock(&umem_odp->umem_mutex); in mlx5_ib_invalidate_range() 289 if (!umem_odp->npages) in mlx5_ib_invalidate_range() 291 mr = umem_odp->private; in mlx5_ib_invalidate_range() 295 start = max_t(u64, ib_umem_start(umem_odp), range->start); in mlx5_ib_invalidate_range() 296 end = min_t(u64, ib_umem_end(umem_odp), range->end); in mlx5_ib_invalidate_range() 304 for (addr = start; addr < end; addr += BIT(umem_odp->page_shift)) { in mlx5_ib_invalidate_range() 305 idx = (addr - ib_umem_start(umem_odp)) >> umem_odp->page_shift; in mlx5_ib_invalidate_range() 312 if (umem_odp->map.pfn_list[idx] & HMM_PFN_VALID) { in mlx5_ib_invalidate_range() 348 ib_umem_odp_unmap_dma_pages(umem_odp, start, end); in mlx5_ib_invalidate_range() [all …]
|