xref: /linux/include/rdma/ib_umem.h (revision d8e473182ab9e85708067be81d20424045d939fa)
1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /*
3  * Copyright (c) 2007 Cisco Systems.  All rights reserved.
4  * Copyright (c) 2020 Intel Corporation.  All rights reserved.
5  */
6 
7 #ifndef IB_UMEM_H
8 #define IB_UMEM_H
9 
10 #include <linux/list.h>
11 #include <linux/scatterlist.h>
12 #include <linux/workqueue.h>
13 #include <rdma/ib_verbs.h>
14 
15 struct ib_ucontext;
16 struct ib_umem_odp;
17 struct dma_buf_attach_ops;
18 
19 struct ib_umem {
20 	struct ib_device       *ibdev;
21 	struct mm_struct       *owning_mm;
22 	u64 iova;
23 	size_t			length;
24 	unsigned long		address;
25 	u32 writable : 1;
26 	u32 is_odp : 1;
27 	u32 is_dmabuf : 1;
28 	struct sg_append_table sgt_append;
29 };
30 
31 struct ib_umem_dmabuf {
32 	struct ib_umem umem;
33 	struct dma_buf_attachment *attach;
34 	struct sg_table *sgt;
35 	struct scatterlist *first_sg;
36 	struct scatterlist *last_sg;
37 	unsigned long first_sg_offset;
38 	unsigned long last_sg_trim;
39 	void *private;
40 	u8 pinned : 1;
41 };
42 
43 static inline struct ib_umem_dmabuf *to_ib_umem_dmabuf(struct ib_umem *umem)
44 {
45 	return container_of(umem, struct ib_umem_dmabuf, umem);
46 }
47 
48 /* Returns the offset of the umem start relative to the first page. */
49 static inline int ib_umem_offset(struct ib_umem *umem)
50 {
51 	return umem->address & ~PAGE_MASK;
52 }
53 
54 static inline unsigned long ib_umem_dma_offset(struct ib_umem *umem,
55 					       unsigned long pgsz)
56 {
57 	return (sg_dma_address(umem->sgt_append.sgt.sgl) + ib_umem_offset(umem)) &
58 	       (pgsz - 1);
59 }
60 
61 static inline size_t ib_umem_num_dma_blocks(struct ib_umem *umem,
62 					    unsigned long pgsz)
63 {
64 	return (size_t)((ALIGN(umem->iova + umem->length, pgsz) -
65 			 ALIGN_DOWN(umem->iova, pgsz))) /
66 	       pgsz;
67 }
68 
69 static inline size_t ib_umem_num_pages(struct ib_umem *umem)
70 {
71 	return ib_umem_num_dma_blocks(umem, PAGE_SIZE);
72 }
73 
74 static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
75 						struct ib_umem *umem,
76 						unsigned long pgsz)
77 {
78 	__rdma_block_iter_start(biter, umem->sgt_append.sgt.sgl,
79 				umem->sgt_append.sgt.nents, pgsz);
80 }
81 
82 /**
83  * rdma_umem_for_each_dma_block - iterate over contiguous DMA blocks of the umem
84  * @umem: umem to iterate over
85  * @pgsz: Page size to split the list into
86  *
87  * pgsz must be <= PAGE_SIZE or computed by ib_umem_find_best_pgsz(). The
88  * returned DMA blocks will be aligned to pgsz and span the range:
89  * ALIGN_DOWN(umem->address, pgsz) to ALIGN(umem->address + umem->length, pgsz)
90  *
91  * Performs exactly ib_umem_num_dma_blocks() iterations.
92  */
93 #define rdma_umem_for_each_dma_block(umem, biter, pgsz)                        \
94 	for (__rdma_umem_block_iter_start(biter, umem, pgsz);                  \
95 	     __rdma_block_iter_next(biter);)
96 
97 #ifdef CONFIG_INFINIBAND_USER_MEM
98 
99 struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
100 			    size_t size, int access);
101 void ib_umem_release(struct ib_umem *umem);
102 int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
103 		      size_t length);
104 unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
105 				     unsigned long pgsz_bitmap,
106 				     unsigned long virt);
107 
108 /**
109  * ib_umem_find_best_pgoff - Find best HW page size
110  *
111  * @umem: umem struct
112  * @pgsz_bitmap bitmap of HW supported page sizes
113  * @pgoff_bitmask: Mask of bits that can be represented with an offset
114  *
115  * This is very similar to ib_umem_find_best_pgsz() except instead of accepting
116  * an IOVA it accepts a bitmask specifying what address bits can be represented
117  * with a page offset.
118  *
119  * For instance if the HW has multiple page sizes, requires 64 byte alignemnt,
120  * and can support aligned offsets up to 4032 then pgoff_bitmask would be
121  * "111111000000".
122  *
123  * If the pgoff_bitmask requires either alignment in the low bit or an
124  * unavailable page size for the high bits, this function returns 0.
125  */
126 static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem,
127 						    unsigned long pgsz_bitmap,
128 						    u64 pgoff_bitmask)
129 {
130 	struct scatterlist *sg = umem->sgt_append.sgt.sgl;
131 	dma_addr_t dma_addr;
132 
133 	dma_addr = sg_dma_address(sg) + (umem->address & ~PAGE_MASK);
134 	return ib_umem_find_best_pgsz(umem, pgsz_bitmap,
135 				      dma_addr & pgoff_bitmask);
136 }
137 
138 struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
139 					  unsigned long offset, size_t size,
140 					  int fd, int access,
141 					  const struct dma_buf_attach_ops *ops);
142 struct ib_umem_dmabuf *ib_umem_dmabuf_get_pinned(struct ib_device *device,
143 						 unsigned long offset,
144 						 size_t size, int fd,
145 						 int access);
146 int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf);
147 void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf);
148 void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf);
149 
150 #else /* CONFIG_INFINIBAND_USER_MEM */
151 
152 #include <linux/err.h>
153 
154 static inline struct ib_umem *ib_umem_get(struct ib_device *device,
155 					  unsigned long addr, size_t size,
156 					  int access)
157 {
158 	return ERR_PTR(-EOPNOTSUPP);
159 }
160 static inline void ib_umem_release(struct ib_umem *umem) { }
161 static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
162 		      		    size_t length) {
163 	return -EOPNOTSUPP;
164 }
165 static inline unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
166 						   unsigned long pgsz_bitmap,
167 						   unsigned long virt)
168 {
169 	return 0;
170 }
171 static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem,
172 						    unsigned long pgsz_bitmap,
173 						    u64 pgoff_bitmask)
174 {
175 	return 0;
176 }
177 static inline
178 struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
179 					  unsigned long offset,
180 					  size_t size, int fd,
181 					  int access,
182 					  struct dma_buf_attach_ops *ops)
183 {
184 	return ERR_PTR(-EOPNOTSUPP);
185 }
186 static inline struct ib_umem_dmabuf *
187 ib_umem_dmabuf_get_pinned(struct ib_device *device, unsigned long offset,
188 			  size_t size, int fd, int access)
189 {
190 	return ERR_PTR(-EOPNOTSUPP);
191 }
192 static inline int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
193 {
194 	return -EOPNOTSUPP;
195 }
196 static inline void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf) { }
197 static inline void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf) { }
198 
199 #endif /* CONFIG_INFINIBAND_USER_MEM */
200 #endif /* IB_UMEM_H */
201