xref: /linux/include/rdma/ib_umem.h (revision e01027cab38a1a52828eecff447ca5e015b20f92)
1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /*
3  * Copyright (c) 2007 Cisco Systems.  All rights reserved.
4  * Copyright (c) 2020 Intel Corporation.  All rights reserved.
5  */
6 
7 #ifndef IB_UMEM_H
8 #define IB_UMEM_H
9 
10 #include <linux/scatterlist.h>
11 
12 struct ib_device;
13 struct dma_buf_attach_ops;
14 
15 struct ib_umem {
16 	struct ib_device       *ibdev;
17 	struct mm_struct       *owning_mm;
18 	u64 iova;
19 	size_t			length;
20 	unsigned long		address;
21 	u32 writable : 1;
22 	u32 is_odp : 1;
23 	u32 is_dmabuf : 1;
24 	struct sg_append_table sgt_append;
25 };
26 
27 struct ib_umem_dmabuf {
28 	struct ib_umem umem;
29 	struct dma_buf_attachment *attach;
30 	struct sg_table *sgt;
31 	struct scatterlist *first_sg;
32 	struct scatterlist *last_sg;
33 	unsigned long first_sg_offset;
34 	unsigned long last_sg_trim;
35 	void (*pinned_revoke)(void *priv);
36 	void *private;
37 	u8 pinned : 1;
38 	u8 revoked : 1;
39 };
40 
41 static inline struct ib_umem_dmabuf *to_ib_umem_dmabuf(struct ib_umem *umem)
42 {
43 	return container_of(umem, struct ib_umem_dmabuf, umem);
44 }
45 
46 /* Returns the offset of the umem start relative to the first page. */
47 static inline int ib_umem_offset(struct ib_umem *umem)
48 {
49 	return umem->address & ~PAGE_MASK;
50 }
51 
52 static inline dma_addr_t ib_umem_start_dma_addr(struct ib_umem *umem)
53 {
54 	return sg_dma_address(umem->sgt_append.sgt.sgl) + ib_umem_offset(umem);
55 }
56 
57 static inline unsigned long ib_umem_dma_offset(struct ib_umem *umem,
58 					       unsigned long pgsz)
59 {
60 	return ib_umem_start_dma_addr(umem) & (pgsz - 1);
61 }
62 
63 static inline size_t ib_umem_num_dma_blocks(struct ib_umem *umem,
64 					    unsigned long pgsz)
65 {
66 	return (size_t)((ALIGN(umem->iova + umem->length, pgsz) -
67 			 ALIGN_DOWN(umem->iova, pgsz))) /
68 	       pgsz;
69 }
70 
71 static inline size_t ib_umem_num_pages(struct ib_umem *umem)
72 {
73 	return ib_umem_num_dma_blocks(umem, PAGE_SIZE);
74 }
75 #ifdef CONFIG_INFINIBAND_USER_MEM
76 
77 struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
78 			    size_t size, int access);
79 void ib_umem_release(struct ib_umem *umem);
80 int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
81 		      size_t length);
82 unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
83 				     unsigned long pgsz_bitmap,
84 				     unsigned long virt);
85 
86 /**
87  * ib_umem_find_best_pgoff - Find best HW page size
88  *
89  * @umem: umem struct
90  * @pgsz_bitmap: bitmap of HW supported page sizes
91  * @pgoff_bitmask: Mask of bits that can be represented with an offset
92  *
93  * This is very similar to ib_umem_find_best_pgsz() except instead of accepting
94  * an IOVA it accepts a bitmask specifying what address bits can be represented
95  * with a page offset.
96  *
97  * For instance if the HW has multiple page sizes, requires 64 byte alignemnt,
98  * and can support aligned offsets up to 4032 then pgoff_bitmask would be
99  * "111111000000".
100  *
101  * If the pgoff_bitmask requires either alignment in the low bit or an
102  * unavailable page size for the high bits, this function returns 0.
103  *
104  * Returns: best HW page size for the parameters or 0 if none available
105  *   for the given parameters.
106  */
107 static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem,
108 						    unsigned long pgsz_bitmap,
109 						    u64 pgoff_bitmask)
110 {
111 	dma_addr_t dma_addr;
112 
113 	dma_addr = ib_umem_start_dma_addr(umem);
114 	return ib_umem_find_best_pgsz(umem, pgsz_bitmap,
115 				      dma_addr & pgoff_bitmask);
116 }
117 
118 static inline bool ib_umem_is_contiguous(struct ib_umem *umem)
119 {
120 	dma_addr_t dma_addr;
121 	unsigned long pgsz;
122 
123 	/*
124 	 * Select the smallest aligned page that can contain the whole umem if
125 	 * it was contiguous.
126 	 */
127 	dma_addr = ib_umem_start_dma_addr(umem);
128 	pgsz = roundup_pow_of_two((dma_addr ^ (umem->length - 1 + dma_addr)) + 1);
129 	return !!ib_umem_find_best_pgoff(umem, pgsz, U64_MAX);
130 }
131 
132 struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
133 					  unsigned long offset, size_t size,
134 					  int fd, int access,
135 					  const struct dma_buf_attach_ops *ops);
136 struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device,
137 						 unsigned long offset,
138 						 size_t size, int fd,
139 						 int access);
140 struct ib_umem_dmabuf *
141 ib_umem_dmabuf_get_pinned_revocable_and_lock(struct ib_device *device,
142 					     unsigned long offset, size_t size,
143 					     int fd, int access);
144 void ib_umem_dmabuf_set_revoke_locked(struct ib_umem_dmabuf *umem_dmabuf,
145 				      void (*revoke)(void *priv), void *priv);
146 struct ib_umem_dmabuf *
147 ib_umem_dmabuf_get_pinned_with_dma_device(struct ib_device *device,
148 					  struct device *dma_device,
149 					  unsigned long offset, size_t size,
150 					  int fd, int access);
151 int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf);
152 void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf);
153 void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf);
154 void ib_umem_dmabuf_revoke_lock(struct ib_umem_dmabuf *umem_dmabuf);
155 void ib_umem_dmabuf_revoke_unlock(struct ib_umem_dmabuf *umem_dmabuf);
156 void ib_umem_dmabuf_revoke(struct ib_umem_dmabuf *umem_dmabuf);
157 
158 #else /* CONFIG_INFINIBAND_USER_MEM */
159 
160 #include <linux/err.h>
161 
162 static inline struct ib_umem *ib_umem_get(struct ib_device *device,
163 					  unsigned long addr, size_t size,
164 					  int access)
165 {
166 	return ERR_PTR(-EOPNOTSUPP);
167 }
168 static inline void ib_umem_release(struct ib_umem *umem) { }
169 static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
170 		      		    size_t length) {
171 	return -EOPNOTSUPP;
172 }
173 static inline unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
174 						   unsigned long pgsz_bitmap,
175 						   unsigned long virt)
176 {
177 	return 0;
178 }
179 static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem,
180 						    unsigned long pgsz_bitmap,
181 						    u64 pgoff_bitmask)
182 {
183 	return 0;
184 }
185 static inline
186 struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
187 					  unsigned long offset,
188 					  size_t size, int fd,
189 					  int access,
190 					  struct dma_buf_attach_ops *ops)
191 {
192 	return ERR_PTR(-EOPNOTSUPP);
193 }
194 static inline struct ib_umem_dmabuf *
195 ib_umem_dmabuf_get_pinned(struct ib_device *device, unsigned long offset,
196 			  size_t size, int fd, int access)
197 {
198 	return ERR_PTR(-EOPNOTSUPP);
199 }
200 
201 static inline struct ib_umem_dmabuf *
202 ib_umem_dmabuf_get_pinned_revocable_and_lock(struct ib_device *device,
203 					     unsigned long offset, size_t size,
204 					     int fd, int access)
205 {
206 	return ERR_PTR(-EOPNOTSUPP);
207 }
208 
209 static inline void
210 ib_umem_dmabuf_set_revoke_locked(struct ib_umem_dmabuf *umem_dmabuf,
211 				 void (*revoke)(void *priv), void *priv) {}
212 
213 static inline struct ib_umem_dmabuf *
214 ib_umem_dmabuf_get_pinned_with_dma_device(struct ib_device *device,
215 					  struct device *dma_device,
216 					  unsigned long offset, size_t size,
217 					  int fd, int access)
218 {
219 	return ERR_PTR(-EOPNOTSUPP);
220 }
221 
222 static inline int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
223 {
224 	return -EOPNOTSUPP;
225 }
226 static inline void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf) { }
227 static inline void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf) { }
228 static inline void ib_umem_dmabuf_revoke_lock(struct ib_umem_dmabuf *umem_dmabuf) {}
229 static inline void ib_umem_dmabuf_revoke_unlock(struct ib_umem_dmabuf *umem_dmabuf) {}
230 static inline void ib_umem_dmabuf_revoke(struct ib_umem_dmabuf *umem_dmabuf) {}
231 
232 #endif /* CONFIG_INFINIBAND_USER_MEM */
233 #endif /* IB_UMEM_H */
234