xref: /linux/include/rdma/ib_umem.h (revision 3e302dbc6774a27edaea39a1d5107f0c12e35cf2)
1 /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2 /*
3  * Copyright (c) 2007 Cisco Systems.  All rights reserved.
4  * Copyright (c) 2020 Intel Corporation.  All rights reserved.
5  */
6 
7 #ifndef IB_UMEM_H
8 #define IB_UMEM_H
9 
10 #include <linux/list.h>
11 #include <linux/scatterlist.h>
12 #include <linux/workqueue.h>
13 #include <rdma/ib_verbs.h>
14 
15 struct ib_ucontext;
16 struct ib_umem_odp;
17 struct dma_buf_attach_ops;
18 
19 struct ib_umem {
20 	struct ib_device       *ibdev;
21 	struct mm_struct       *owning_mm;
22 	u64 iova;
23 	size_t			length;
24 	unsigned long		address;
25 	u32 writable : 1;
26 	u32 is_odp : 1;
27 	u32 is_dmabuf : 1;
28 	struct work_struct	work;
29 	struct sg_append_table  sgt_append;
30 	struct sg_table sg_head;
31 	int             nmap;
32 	unsigned int    sg_nents;
33 };
34 
35 struct ib_umem_dmabuf {
36 	struct ib_umem umem;
37 	struct dma_buf_attachment *attach;
38 	struct sg_table *sgt;
39 	struct scatterlist *first_sg;
40 	struct scatterlist *last_sg;
41 	unsigned long first_sg_offset;
42 	unsigned long last_sg_trim;
43 	void *private;
44 };
45 
46 static inline struct ib_umem_dmabuf *to_ib_umem_dmabuf(struct ib_umem *umem)
47 {
48 	return container_of(umem, struct ib_umem_dmabuf, umem);
49 }
50 
51 /* Returns the offset of the umem start relative to the first page. */
52 static inline int ib_umem_offset(struct ib_umem *umem)
53 {
54 	return umem->address & ~PAGE_MASK;
55 }
56 
57 static inline unsigned long ib_umem_dma_offset(struct ib_umem *umem,
58 					       unsigned long pgsz)
59 {
60 	return (sg_dma_address(umem->sg_head.sgl) + ib_umem_offset(umem)) &
61 	       (pgsz - 1);
62 }
63 
64 static inline size_t ib_umem_num_dma_blocks(struct ib_umem *umem,
65 					    unsigned long pgsz)
66 {
67 	return (size_t)((ALIGN(umem->iova + umem->length, pgsz) -
68 			 ALIGN_DOWN(umem->iova, pgsz))) /
69 	       pgsz;
70 }
71 
72 static inline size_t ib_umem_num_pages(struct ib_umem *umem)
73 {
74 	return ib_umem_num_dma_blocks(umem, PAGE_SIZE);
75 }
76 
77 static inline void __rdma_umem_block_iter_start(struct ib_block_iter *biter,
78 						struct ib_umem *umem,
79 						unsigned long pgsz)
80 {
81 	__rdma_block_iter_start(biter, umem->sg_head.sgl, umem->nmap, pgsz);
82 }
83 
84 /**
85  * rdma_umem_for_each_dma_block - iterate over contiguous DMA blocks of the umem
86  * @umem: umem to iterate over
87  * @pgsz: Page size to split the list into
88  *
89  * pgsz must be <= PAGE_SIZE or computed by ib_umem_find_best_pgsz(). The
90  * returned DMA blocks will be aligned to pgsz and span the range:
91  * ALIGN_DOWN(umem->address, pgsz) to ALIGN(umem->address + umem->length, pgsz)
92  *
93  * Performs exactly ib_umem_num_dma_blocks() iterations.
94  */
95 #define rdma_umem_for_each_dma_block(umem, biter, pgsz)                        \
96 	for (__rdma_umem_block_iter_start(biter, umem, pgsz);                  \
97 	     __rdma_block_iter_next(biter);)
98 
99 #ifdef CONFIG_INFINIBAND_USER_MEM
100 
101 struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
102 			    size_t size, int access);
103 void ib_umem_release(struct ib_umem *umem);
104 int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
105 		      size_t length);
106 unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
107 				     unsigned long pgsz_bitmap,
108 				     unsigned long virt);
109 
110 /**
111  * ib_umem_find_best_pgoff - Find best HW page size
112  *
113  * @umem: umem struct
114  * @pgsz_bitmap bitmap of HW supported page sizes
115  * @pgoff_bitmask: Mask of bits that can be represented with an offset
116  *
117  * This is very similar to ib_umem_find_best_pgsz() except instead of accepting
118  * an IOVA it accepts a bitmask specifying what address bits can be represented
119  * with a page offset.
120  *
121  * For instance if the HW has multiple page sizes, requires 64 byte alignemnt,
122  * and can support aligned offsets up to 4032 then pgoff_bitmask would be
123  * "111111000000".
124  *
125  * If the pgoff_bitmask requires either alignment in the low bit or an
126  * unavailable page size for the high bits, this function returns 0.
127  */
128 static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem,
129 						    unsigned long pgsz_bitmap,
130 						    u64 pgoff_bitmask)
131 {
132 	struct scatterlist *sg = umem->sg_head.sgl;
133 	dma_addr_t dma_addr;
134 
135 	dma_addr = sg_dma_address(sg) + (umem->address & ~PAGE_MASK);
136 	return ib_umem_find_best_pgsz(umem, pgsz_bitmap,
137 				      dma_addr & pgoff_bitmask);
138 }
139 
140 struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
141 					  unsigned long offset, size_t size,
142 					  int fd, int access,
143 					  const struct dma_buf_attach_ops *ops);
144 int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf);
145 void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf);
146 void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf);
147 
148 #else /* CONFIG_INFINIBAND_USER_MEM */
149 
150 #include <linux/err.h>
151 
152 static inline struct ib_umem *ib_umem_get(struct ib_device *device,
153 					  unsigned long addr, size_t size,
154 					  int access)
155 {
156 	return ERR_PTR(-EOPNOTSUPP);
157 }
158 static inline void ib_umem_release(struct ib_umem *umem) { }
159 static inline int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
160 		      		    size_t length) {
161 	return -EOPNOTSUPP;
162 }
163 static inline unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
164 						   unsigned long pgsz_bitmap,
165 						   unsigned long virt)
166 {
167 	return 0;
168 }
169 static inline unsigned long ib_umem_find_best_pgoff(struct ib_umem *umem,
170 						    unsigned long pgsz_bitmap,
171 						    u64 pgoff_bitmask)
172 {
173 	return 0;
174 }
175 static inline
176 struct ib_umem_dmabuf *ib_umem_dmabuf_get(struct ib_device *device,
177 					  unsigned long offset,
178 					  size_t size, int fd,
179 					  int access,
180 					  struct dma_buf_attach_ops *ops)
181 {
182 	return ERR_PTR(-EOPNOTSUPP);
183 }
184 static inline int ib_umem_dmabuf_map_pages(struct ib_umem_dmabuf *umem_dmabuf)
185 {
186 	return -EOPNOTSUPP;
187 }
188 static inline void ib_umem_dmabuf_unmap_pages(struct ib_umem_dmabuf *umem_dmabuf) { }
189 static inline void ib_umem_dmabuf_release(struct ib_umem_dmabuf *umem_dmabuf) { }
190 
191 #endif /* CONFIG_INFINIBAND_USER_MEM */
192 #endif /* IB_UMEM_H */
193