xref: /linux/include/rdma/ib_umem_odp.h (revision dd91b5e1d6448794c07378d1be12e3261c8769e7)
1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /*
3  * Copyright (c) 2014 Mellanox Technologies. All rights reserved.
4  */
5 
6 #ifndef IB_UMEM_ODP_H
7 #define IB_UMEM_ODP_H
8 
9 #include <rdma/ib_umem.h>
10 #include <rdma/ib_verbs.h>
11 #include <linux/hmm-dma.h>
12 
13 struct ib_umem_odp {
14 	struct ib_umem umem;
15 	struct mmu_interval_notifier notifier;
16 	struct pid *tgid;
17 
18 	struct hmm_dma_map map;
19 
20 	/*
21 	 * The umem_mutex protects the page_list field of an ODP
22 	 * umem, allowing only a single thread to map/unmap pages. The mutex
23 	 * also protects access to the mmu notifier counters.
24 	 */
25 	struct mutex		umem_mutex;
26 	void			*private; /* for the HW driver to use. */
27 
28 	int npages;
29 
30 	/*
31 	 * An implicit odp umem cannot be DMA mapped, has 0 length, and serves
32 	 * only as an anchor for the driver to hold onto the per_mm. FIXME:
33 	 * This should be removed and drivers should work with the per_mm
34 	 * directly.
35 	 */
36 	bool is_implicit_odp;
37 
38 	unsigned int		page_shift;
39 };
40 
to_ib_umem_odp(struct ib_umem * umem)41 static inline struct ib_umem_odp *to_ib_umem_odp(struct ib_umem *umem)
42 {
43 	return container_of(umem, struct ib_umem_odp, umem);
44 }
45 
46 /* Returns the first page of an ODP umem. */
ib_umem_start(struct ib_umem_odp * umem_odp)47 static inline unsigned long ib_umem_start(struct ib_umem_odp *umem_odp)
48 {
49 	return umem_odp->notifier.interval_tree.start;
50 }
51 
52 /* Returns the address of the page after the last one of an ODP umem. */
ib_umem_end(struct ib_umem_odp * umem_odp)53 static inline unsigned long ib_umem_end(struct ib_umem_odp *umem_odp)
54 {
55 	return umem_odp->notifier.interval_tree.last + 1;
56 }
57 
ib_umem_odp_num_pages(struct ib_umem_odp * umem_odp)58 static inline size_t ib_umem_odp_num_pages(struct ib_umem_odp *umem_odp)
59 {
60 	return (ib_umem_end(umem_odp) - ib_umem_start(umem_odp)) >>
61 	       umem_odp->page_shift;
62 }
63 
64 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
65 
66 struct ib_umem_odp *
67 ib_umem_odp_get(struct ib_device *device, unsigned long addr, size_t size,
68 		int access, const struct mmu_interval_notifier_ops *ops);
69 struct ib_umem_odp *ib_umem_odp_alloc_implicit(struct ib_device *device,
70 					       int access);
71 struct ib_umem_odp *
72 ib_umem_odp_alloc_child(struct ib_umem_odp *root_umem, unsigned long addr,
73 			size_t size,
74 			const struct mmu_interval_notifier_ops *ops);
75 void ib_umem_odp_release(struct ib_umem_odp *umem_odp);
76 
77 int ib_umem_odp_map_dma_and_lock(struct ib_umem_odp *umem_odp, u64 start_offset,
78 				 u64 bcnt, u64 access_mask, bool fault);
79 
80 void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset,
81 				 u64 bound);
82 
83 #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
84 
85 static inline struct ib_umem_odp *
ib_umem_odp_get(struct ib_device * device,unsigned long addr,size_t size,int access,const struct mmu_interval_notifier_ops * ops)86 ib_umem_odp_get(struct ib_device *device, unsigned long addr, size_t size,
87 		int access, const struct mmu_interval_notifier_ops *ops)
88 {
89 	return ERR_PTR(-EINVAL);
90 }
91 
ib_umem_odp_release(struct ib_umem_odp * umem_odp)92 static inline void ib_umem_odp_release(struct ib_umem_odp *umem_odp) {}
93 
94 #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
95 
96 #endif /* IB_UMEM_ODP_H */
97