1 /*
2 * Copyright (c) 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005 Cisco Systems. All rights reserved.
4 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5 * Copyright (c) 2020 Intel Corporation. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36 #include <linux/mm.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/sched/signal.h>
39 #include <linux/sched/mm.h>
40 #include <linux/export.h>
41 #include <linux/slab.h>
42 #include <linux/pagemap.h>
43 #include <linux/count_zeros.h>
44 #include <rdma/ib_umem_odp.h>
45
46 #include "uverbs.h"
47
48 #define RESCHED_LOOP_CNT_THRESHOLD 0x1000
49
__ib_umem_release(struct ib_device * dev,struct ib_umem * umem,int dirty)50 static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty)
51 {
52 bool make_dirty = umem->writable && dirty;
53 struct scatterlist *sg;
54 unsigned int i;
55
56 if (dirty)
57 ib_dma_unmap_sgtable_attrs(dev, &umem->sgt_append.sgt,
58 DMA_BIDIRECTIONAL, 0);
59
60 for_each_sgtable_sg(&umem->sgt_append.sgt, sg, i) {
61 unpin_user_page_range_dirty_lock(sg_page(sg),
62 DIV_ROUND_UP(sg->length, PAGE_SIZE), make_dirty);
63
64 if (i && !(i % RESCHED_LOOP_CNT_THRESHOLD))
65 cond_resched();
66 }
67
68 sg_free_append_table(&umem->sgt_append);
69 }
70
71 /**
72 * ib_umem_find_best_pgsz - Find best HW page size to use for this MR
73 *
74 * @umem: umem struct
75 * @pgsz_bitmap: bitmap of HW supported page sizes
76 * @virt: IOVA
77 *
78 * This helper is intended for HW that support multiple page
79 * sizes but can do only a single page size in an MR.
80 *
81 * Returns 0 if the umem requires page sizes not supported by
82 * the driver to be mapped. Drivers always supporting PAGE_SIZE
83 * or smaller will never see a 0 result.
84 */
ib_umem_find_best_pgsz(struct ib_umem * umem,unsigned long pgsz_bitmap,unsigned long virt)85 unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
86 unsigned long pgsz_bitmap,
87 unsigned long virt)
88 {
89 unsigned long curr_len = 0;
90 dma_addr_t curr_base = ~0;
91 unsigned long va, pgoff;
92 struct scatterlist *sg;
93 dma_addr_t mask;
94 dma_addr_t end;
95 int i;
96
97 umem->iova = va = virt;
98
99 if (umem->is_odp) {
100 unsigned int page_size = BIT(to_ib_umem_odp(umem)->page_shift);
101
102 /* ODP must always be self consistent. */
103 if (!(pgsz_bitmap & page_size))
104 return 0;
105 return page_size;
106 }
107
108 /* The best result is the smallest page size that results in the minimum
109 * number of required pages. Compute the largest page size that could
110 * work based on VA address bits that don't change.
111 */
112 mask = pgsz_bitmap &
113 GENMASK(BITS_PER_LONG - 1,
114 bits_per((umem->length - 1 + virt) ^ virt));
115 /* offset into first SGL */
116 pgoff = umem->address & ~PAGE_MASK;
117
118 for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, i) {
119 /* If the current entry is physically contiguous with the previous
120 * one, no need to take its start addresses into consideration.
121 */
122 if (check_add_overflow(curr_base, curr_len, &end) ||
123 end != sg_dma_address(sg)) {
124
125 curr_base = sg_dma_address(sg);
126 curr_len = 0;
127
128 /* Reduce max page size if VA/PA bits differ */
129 mask |= (curr_base + pgoff) ^ va;
130
131 /* The alignment of any VA matching a discontinuity point
132 * in the physical memory sets the maximum possible page
133 * size as this must be a starting point of a new page that
134 * needs to be aligned.
135 */
136 if (i != 0)
137 mask |= va;
138 }
139
140 curr_len += sg_dma_len(sg);
141 va += sg_dma_len(sg) - pgoff;
142
143 pgoff = 0;
144 }
145
146 /* The mask accumulates 1's in each position where the VA and physical
147 * address differ, thus the length of trailing 0 is the largest page
148 * size that can pass the VA through to the physical.
149 */
150 if (mask)
151 pgsz_bitmap &= GENMASK(count_trailing_zeros(mask), 0);
152 return pgsz_bitmap ? rounddown_pow_of_two(pgsz_bitmap) : 0;
153 }
154 EXPORT_SYMBOL(ib_umem_find_best_pgsz);
155
156 /**
157 * ib_umem_get - Pin and DMA map userspace memory.
158 *
159 * @device: IB device to connect UMEM
160 * @addr: userspace virtual address to start at
161 * @size: length of region to pin
162 * @access: IB_ACCESS_xxx flags for memory being pinned
163 */
ib_umem_get(struct ib_device * device,unsigned long addr,size_t size,int access)164 struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
165 size_t size, int access)
166 {
167 struct ib_umem *umem;
168 struct page **page_list;
169 unsigned long lock_limit;
170 unsigned long new_pinned;
171 unsigned long cur_base;
172 unsigned long dma_attr = 0;
173 struct mm_struct *mm;
174 unsigned long npages;
175 int pinned, ret;
176 unsigned int gup_flags = FOLL_LONGTERM;
177
178 /*
179 * If the combination of the addr and size requested for this memory
180 * region causes an integer overflow, return error.
181 */
182 if (((addr + size) < addr) ||
183 PAGE_ALIGN(addr + size) < (addr + size))
184 return ERR_PTR(-EINVAL);
185
186 if (!can_do_mlock())
187 return ERR_PTR(-EPERM);
188
189 if (access & IB_ACCESS_ON_DEMAND)
190 return ERR_PTR(-EOPNOTSUPP);
191
192 umem = kzalloc(sizeof(*umem), GFP_KERNEL);
193 if (!umem)
194 return ERR_PTR(-ENOMEM);
195 umem->ibdev = device;
196 umem->length = size;
197 umem->address = addr;
198 /*
199 * Drivers should call ib_umem_find_best_pgsz() to set the iova
200 * correctly.
201 */
202 umem->iova = addr;
203 umem->writable = ib_access_writable(access);
204 umem->owning_mm = mm = current->mm;
205 mmgrab(mm);
206
207 page_list = (struct page **) __get_free_page(GFP_KERNEL);
208 if (!page_list) {
209 ret = -ENOMEM;
210 goto umem_kfree;
211 }
212
213 npages = ib_umem_num_pages(umem);
214 if (npages == 0 || npages > UINT_MAX) {
215 ret = -EINVAL;
216 goto out;
217 }
218
219 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
220
221 new_pinned = atomic64_add_return(npages, &mm->pinned_vm);
222 if (new_pinned > lock_limit && !capable(CAP_IPC_LOCK)) {
223 atomic64_sub(npages, &mm->pinned_vm);
224 ret = -ENOMEM;
225 goto out;
226 }
227
228 cur_base = addr & PAGE_MASK;
229
230 if (umem->writable)
231 gup_flags |= FOLL_WRITE;
232
233 while (npages) {
234 cond_resched();
235 pinned = pin_user_pages_fast(cur_base,
236 min_t(unsigned long, npages,
237 PAGE_SIZE /
238 sizeof(struct page *)),
239 gup_flags, page_list);
240 if (pinned < 0) {
241 ret = pinned;
242 goto umem_release;
243 }
244
245 cur_base += pinned * PAGE_SIZE;
246 npages -= pinned;
247 ret = sg_alloc_append_table_from_pages(
248 &umem->sgt_append, page_list, pinned, 0,
249 pinned << PAGE_SHIFT, ib_dma_max_seg_size(device),
250 npages, GFP_KERNEL);
251 if (ret) {
252 unpin_user_pages_dirty_lock(page_list, pinned, 0);
253 goto umem_release;
254 }
255 }
256
257 if (access & IB_ACCESS_RELAXED_ORDERING)
258 dma_attr |= DMA_ATTR_WEAK_ORDERING;
259
260 ret = ib_dma_map_sgtable_attrs(device, &umem->sgt_append.sgt,
261 DMA_BIDIRECTIONAL, dma_attr);
262 if (ret)
263 goto umem_release;
264 goto out;
265
266 umem_release:
267 __ib_umem_release(device, umem, 0);
268 atomic64_sub(ib_umem_num_pages(umem), &mm->pinned_vm);
269 out:
270 free_page((unsigned long) page_list);
271 umem_kfree:
272 if (ret) {
273 mmdrop(umem->owning_mm);
274 kfree(umem);
275 }
276 return ret ? ERR_PTR(ret) : umem;
277 }
278 EXPORT_SYMBOL(ib_umem_get);
279
280 /**
281 * ib_umem_release - release memory pinned with ib_umem_get
282 * @umem: umem struct to release
283 */
ib_umem_release(struct ib_umem * umem)284 void ib_umem_release(struct ib_umem *umem)
285 {
286 if (!umem)
287 return;
288 if (umem->is_dmabuf)
289 return ib_umem_dmabuf_release(to_ib_umem_dmabuf(umem));
290 if (umem->is_odp)
291 return ib_umem_odp_release(to_ib_umem_odp(umem));
292
293 __ib_umem_release(umem->ibdev, umem, 1);
294
295 atomic64_sub(ib_umem_num_pages(umem), &umem->owning_mm->pinned_vm);
296 mmdrop(umem->owning_mm);
297 kfree(umem);
298 }
299 EXPORT_SYMBOL(ib_umem_release);
300
301 /*
302 * Copy from the given ib_umem's pages to the given buffer.
303 *
304 * umem - the umem to copy from
305 * offset - offset to start copying from
306 * dst - destination buffer
307 * length - buffer length
308 *
309 * Returns 0 on success, or an error code.
310 */
ib_umem_copy_from(void * dst,struct ib_umem * umem,size_t offset,size_t length)311 int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
312 size_t length)
313 {
314 size_t end = offset + length;
315 int ret;
316
317 if (offset > umem->length || length > umem->length - offset) {
318 pr_err("%s not in range. offset: %zd umem length: %zd end: %zd\n",
319 __func__, offset, umem->length, end);
320 return -EINVAL;
321 }
322
323 ret = sg_pcopy_to_buffer(umem->sgt_append.sgt.sgl,
324 umem->sgt_append.sgt.orig_nents, dst, length,
325 offset + ib_umem_offset(umem));
326
327 if (ret < 0)
328 return ret;
329 else if (ret != length)
330 return -EINVAL;
331 else
332 return 0;
333 }
334 EXPORT_SYMBOL(ib_umem_copy_from);
335