xref: /linux/include/rdma/ib_umem.h (revision 06a130e42a5bfc84795464bff023bff4c16f58c5)
1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /*
3  * Copyright (c) 2007 Cisco Systems.  All rights reserved.
4  * Copyright (c) 2020 Intel Corporation.  All rights reserved.
5  */
6 
7 #ifndef IB_UMEM_H
8 #define IB_UMEM_H
9 
10 #include <linux/list.h>
11 #include <linux/scatterlist.h>
12 #include <linux/workqueue.h>
13 #include <rdma/ib_verbs.h>
14 
15 struct ib_ucontext;
16 struct ib_umem_odp;
17 struct dma_buf_attach_ops;
18 
19 struct ib_umem {
20 	struct ib_device       *ibdev;
21 	struct mm_struct       *owning_mm;
22 	u64 iova;
23 	size_t			length;
24 	unsigned long		address;
25 	u32 writable : 1;
26 	u32 is_odp : 1;
27 	u32 is_dmabuf : 1;
28 	struct sg_append_table sgt_append;
29 };
30 
31 struct ib_umem_dmabuf {
32 	struct ib_umem umem;
33 	struct dma_buf_attachment *attach;
34 	struct sg_table *sgt;
35 	struct scatterlist *first_sg;
36 	struct scatterlist *last_sg;
37 	unsigned long first_sg_offset;
38 	unsigned long last_sg_trim;
39 	void *private;
40 	u8 pinned : 1;
41 	u8 revoked : 1;
42 };
43 
44 static inline struct ib_umem_dmabuf *to_ib_umem_dmabuf(struct ib_umem *umem)
45 {
46 	return container_of(umem, struct ib_umem_dmabuf, umem);
47 }
48 
49 /* Returns the offset of the umem start relative to the first page. */
50 static inline int ib_umem_offset(struct ib_umem *umem)
51 {
52 	return umem->address & ~PAGE_MASK;
53 }
54 
55 static inline unsigned long ib_umem_dma_offset(struct ib_umem *umem,
56 					       unsigned long pgsz)
57 {
58 	return (sg_dma_address(umem->sgt_append.sgt.sgl) + ib_umem_offset(umem)) &
59 	       (pgsz - 1);
60 }
61 
62 static inline size_t ib_umem_num_dma_blocks(struct ib_umem *umem,
63 					    unsigned long pgsz)
64 {
65 	return (size_t)((ALIGN(umem->iova + umem->length, pgsz) -
66 			 ALIGN_DOWN(umem->iova, pgsz))) /
67 	       pgsz;
68 }
69 
70 static inline size_t ib_umem_num_pages(struct ib_umem *umem)
71 {
72 	return ib_umem_num_dma_blocks(umem, PAGE_SIZE);
73 }
74 
75 static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
76 						struct ib_umem *umem,
77 						unsigned long pgsz)
78 {
79 	__rdma_block_iter_start(biter, umem->sgt_append.sgt.sgl,
80 				umem->sgt_append.sgt.nents, pgsz);
81 	biter->__sg_advance = ib_umem_offset(umem) & ~(pgsz - 1);
82 	biter->__sg_numblocks = ib_umem_num_dma_blocks(umem, pgsz);
83 }
84 
85 static inline bool __rdma_umem_block_iter_next(struct ib_block_iter *biter)
86 {
87 	return __rdma_block_iter_next(biter) && biter->__sg_numblocks--;
88 }
89 
90 /**
91  * rdma_umem_for_each_dma_block - iterate over contiguous DMA blocks of the umem
92  * @umem: umem to iterate over
93  * @pgsz: Page size to split the list into
94  *
95  * pgsz must be <= PAGE_SIZE or computed by ib_umem_find_best_pgsz(). The
96  * returned DMA blocks will be aligned to pgsz and span the range:
97  * ALIGN_DOWN(umem->address, pgsz) to ALIGN(umem->address + umem->length, pgsz)
98  *
99  * Performs exactly ib_umem_num_dma_blocks() iterations.
100  */
101 #define rdma_umem_for_each_dma_block(umem, biter, pgsz)                        \
102 	for (__rdma_umem_block_iter_start(biter, umem, pgsz);                  \
103 	     __rdma_umem_block_iter_next(biter);)
104 
105 #ifdef CONFIG_INFINIBAND_USER_MEM
106 
107 struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
108 			    size_t size, int access);
109 void ib_umem_release(struct ib_umem *umem);
110 int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
111 		      size_t length);
112 unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
113 				     unsigned long pgsz_bitmap,
114 				     unsigned long virt);
115 
116 /**
117  * ib_umem_find_best_pgoff - Find best HW page size
118  *
119  * @umem: umem struct
120  * @pgsz_bitmap bitmap of HW supported page sizes
121  * @pgoff_bitmask: Mask of bits that can be represented with an offset
122  *
123  * This is very similar to ib_umem_find_best_pgsz() except instead of accepting
124  * an IOVA it accepts a bitmask specifying what address bits can be represented
125  * with a page offset.
126  *
127  * For instance if the HW has multiple page sizes, requires 64 byte alignemnt,
128  * and can support aligned offsets up to 4032 then pgoff_bitmask would be
129  * "111111000000".
130  *
131  * If the pgoff_bitmask requires either alignment in the low bit or an
132  * unavailable page size for the high bits, this function returns 0.
133  */
134 static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem,
135 						    unsigned long pgsz_bitmap,
136 						    u64 pgoff_bitmask)
137 {
138 	struct scatterlist *sg = umem->sgt_append.sgt.sgl;
139 	dma_addr_t dma_addr;
140 
141 	dma_addr = sg_dma_address(sg) + (umem->address & ~PAGE_MASK);
142 	return ib_umem_find_best_pgsz(umem, pgsz_bitmap,
143 				      dma_addr & pgoff_bitmask);
144 }
145 
146 struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
147 					  unsigned long offset, size_t size,
148 					  int fd, int access,
149 					  const struct dma_buf_attach_ops *ops);
150 struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device,
151 						 unsigned long offset,
152 						 size_t size, int fd,
153 						 int access);
154 struct ib_umem_dmabuf *
155 ib_umem_dmabuf_get_pinned_with_dma_device(struct ib_device *device,
156 					  struct device *dma_device,
157 					  unsigned long offset, size_t size,
158 					  int fd, int access);
159 int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf);
160 void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf);
161 void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf);
162 void ib_umem_dmabuf_revoke(struct ib_umem_dmabuf *umem_dmabuf);
163 
164 #else /* CONFIG_INFINIBAND_USER_MEM */
165 
166 #include <linux/err.h>
167 
168 static inline struct ib_umem *ib_umem_get(struct ib_device *device,
169 					  unsigned long addr, size_t size,
170 					  int access)
171 {
172 	return ERR_PTR(-EOPNOTSUPP);
173 }
174 static inline void ib_umem_release(struct ib_umem *umem) { }
175 static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
176 		      		    size_t length) {
177 	return -EOPNOTSUPP;
178 }
179 static inline unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
180 						   unsigned long pgsz_bitmap,
181 						   unsigned long virt)
182 {
183 	return 0;
184 }
185 static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem,
186 						    unsigned long pgsz_bitmap,
187 						    u64 pgoff_bitmask)
188 {
189 	return 0;
190 }
191 static inline
192 struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
193 					  unsigned long offset,
194 					  size_t size, int fd,
195 					  int access,
196 					  struct dma_buf_attach_ops *ops)
197 {
198 	return ERR_PTR(-EOPNOTSUPP);
199 }
200 static inline struct ib_umem_dmabuf *
201 ib_umem_dmabuf_get_pinned(struct ib_device *device, unsigned long offset,
202 			  size_t size, int fd, int access)
203 {
204 	return ERR_PTR(-EOPNOTSUPP);
205 }
206 
207 static inline struct ib_umem_dmabuf *
208 ib_umem_dmabuf_get_pinned_with_dma_device(struct ib_device *device,
209 					  struct device *dma_device,
210 					  unsigned long offset, size_t size,
211 					  int fd, int access)
212 {
213 	return ERR_PTR(-EOPNOTSUPP);
214 }
215 
216 static inline int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
217 {
218 	return -EOPNOTSUPP;
219 }
220 static inline void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf) { }
221 static inline void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf) { }
222 static inline void ib_umem_dmabuf_revoke(struct ib_umem_dmabuf *umem_dmabuf) {}
223 
224 #endif /* CONFIG_INFINIBAND_USER_MEM */
225 #endif /* IB_UMEM_H */
226