xref: /linux/drivers/infiniband/core/umem.c (revision aec2f682d47c54ef434b2d440992626d80b1ebdc)
1 /*
2  * Copyright (c) 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Cisco Systems.  All rights reserved.
4  * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
5  * Copyright (c) 2020 Intel Corporation. All rights reserved.
6  *
7  * This software is available to you under a choice of one of two
8  * licenses.  You may choose to be licensed under the terms of the GNU
9  * General Public License (GPL) Version 2, available from the file
10  * COPYING in the main directory of this source tree, or the
11  * OpenIB.org BSD license below:
12  *
13  *     Redistribution and use in source and binary forms, with or
14  *     without modification, are permitted provided that the following
15  *     conditions are met:
16  *
17  *      - Redistributions of source code must retain the above
18  *        copyright notice, this list of conditions and the following
19  *        disclaimer.
20  *
21  *      - Redistributions in binary form must reproduce the above
22  *        copyright notice, this list of conditions and the following
23  *        disclaimer in the documentation and/or other materials
24  *        provided with the distribution.
25  *
26  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33  * SOFTWARE.
34  */
35 
36 #include <linux/mm.h>
37 #include <linux/dma-mapping.h>
38 #include <linux/sched/signal.h>
39 #include <linux/sched/mm.h>
40 #include <linux/export.h>
41 #include <linux/slab.h>
42 #include <linux/pagemap.h>
43 #include <linux/count_zeros.h>
44 #include <rdma/ib_umem_odp.h>
45 
46 #include "uverbs.h"
47 
48 #define RESCHED_LOOP_CNT_THRESHOLD 0x1000
49 
50 static void __ib_umem_release(struct ib_device *dev, struct ib_umem *umem, int dirty)
51 {
52 	bool make_dirty = umem->writable && dirty;
53 	struct scatterlist *sg;
54 	unsigned int i;
55 
56 	if (dirty)
57 		ib_dma_unmap_sgtable_attrs(dev, &umem->sgt_append.sgt,
58 					   DMA_BIDIRECTIONAL,
59 					   DMA_ATTR_REQUIRE_COHERENT);
60 
61 	for_each_sgtable_sg(&umem->sgt_append.sgt, sg, i) {
62 		unpin_user_page_range_dirty_lock(sg_page(sg),
63 			DIV_ROUND_UP(sg->length, PAGE_SIZE), make_dirty);
64 
65 		if (i && !(i % RESCHED_LOOP_CNT_THRESHOLD))
66 			cond_resched();
67 	}
68 
69 	sg_free_append_table(&umem->sgt_append);
70 }
71 
72 /**
73  * ib_umem_find_best_pgsz - Find best HW page size to use for this MR
74  *
75  * @umem: umem struct
76  * @pgsz_bitmap: bitmap of HW supported page sizes
77  * @virt: IOVA
78  *
79  * This helper is intended for HW that support multiple page
80  * sizes but can do only a single page size in an MR.
81  *
82  * Returns 0 if the umem requires page sizes not supported by
83  * the driver to be mapped. Drivers always supporting PAGE_SIZE
84  * or smaller will never see a 0 result.
85  */
86 unsigned long ib_umem_find_best_pgsz(struct ib_umem *umem,
87 				     unsigned long pgsz_bitmap,
88 				     unsigned long virt)
89 {
90 	unsigned long curr_len = 0;
91 	dma_addr_t curr_base = ~0;
92 	unsigned long va, pgoff;
93 	struct scatterlist *sg;
94 	dma_addr_t mask;
95 	dma_addr_t end;
96 	int i;
97 
98 	umem->iova = va = virt;
99 
100 	if (umem->is_odp) {
101 		unsigned int page_size = BIT(to_ib_umem_odp(umem)->page_shift);
102 
103 		/* ODP must always be self consistent. */
104 		if (!(pgsz_bitmap & page_size))
105 			return 0;
106 		return page_size;
107 	}
108 
109 	/* The best result is the smallest page size that results in the minimum
110 	 * number of required pages. Compute the largest page size that could
111 	 * work based on VA address bits that don't change.
112 	 */
113 	mask = pgsz_bitmap &
114 	       GENMASK(BITS_PER_LONG - 1,
115 		       bits_per((umem->length - 1 + virt) ^ virt));
116 	/* offset into first SGL */
117 	pgoff = umem->address & ~PAGE_MASK;
118 
119 	for_each_sgtable_dma_sg(&umem->sgt_append.sgt, sg, i) {
120 		/* If the current entry is physically contiguous with the previous
121 		 * one, no need to take its start addresses into consideration.
122 		 */
123 		if (check_add_overflow(curr_base, curr_len, &end) ||
124 		    end != sg_dma_address(sg)) {
125 
126 			curr_base = sg_dma_address(sg);
127 			curr_len = 0;
128 
129 			/* Reduce max page size if VA/PA bits differ */
130 			mask |= (curr_base + pgoff) ^ va;
131 
132 			/* The alignment of any VA matching a discontinuity point
133 			* in the physical memory sets the maximum possible page
134 			* size as this must be a starting point of a new page that
135 			* needs to be aligned.
136 			*/
137 			if (i != 0)
138 				mask |= va;
139 		}
140 
141 		curr_len += sg_dma_len(sg);
142 		va += sg_dma_len(sg) - pgoff;
143 
144 		pgoff = 0;
145 	}
146 
147 	/* The mask accumulates 1's in each position where the VA and physical
148 	 * address differ, thus the length of trailing 0 is the largest page
149 	 * size that can pass the VA through to the physical.
150 	 */
151 	if (mask)
152 		pgsz_bitmap &= GENMASK(count_trailing_zeros(mask), 0);
153 	return pgsz_bitmap ? rounddown_pow_of_two(pgsz_bitmap) : 0;
154 }
155 EXPORT_SYMBOL(ib_umem_find_best_pgsz);
156 
157 /**
158  * ib_umem_get - Pin and DMA map userspace memory.
159  *
160  * @device: IB device to connect UMEM
161  * @addr: userspace virtual address to start at
162  * @size: length of region to pin
163  * @access: IB_ACCESS_xxx flags for memory being pinned
164  */
165 struct ib_umem *ib_umem_get(struct ib_device *device, unsigned long addr,
166 			    size_t size, int access)
167 {
168 	struct ib_umem *umem;
169 	struct page **page_list;
170 	unsigned long lock_limit;
171 	unsigned long new_pinned;
172 	unsigned long cur_base;
173 	unsigned long dma_attr = DMA_ATTR_REQUIRE_COHERENT;
174 	struct mm_struct *mm;
175 	unsigned long npages;
176 	int pinned, ret;
177 	unsigned int gup_flags = FOLL_LONGTERM;
178 
179 	/*
180 	 * If the combination of the addr and size requested for this memory
181 	 * region causes an integer overflow, return error.
182 	 */
183 	if (((addr + size) < addr) ||
184 	    PAGE_ALIGN(addr + size) < (addr + size))
185 		return ERR_PTR(-EINVAL);
186 
187 	if (!can_do_mlock())
188 		return ERR_PTR(-EPERM);
189 
190 	if (access & IB_ACCESS_ON_DEMAND)
191 		return ERR_PTR(-EOPNOTSUPP);
192 
193 	umem = kzalloc_obj(*umem);
194 	if (!umem)
195 		return ERR_PTR(-ENOMEM);
196 	umem->ibdev      = device;
197 	umem->length     = size;
198 	umem->address    = addr;
199 	/*
200 	 * Drivers should call ib_umem_find_best_pgsz() to set the iova
201 	 * correctly.
202 	 */
203 	umem->iova = addr;
204 	umem->writable   = ib_access_writable(access);
205 	umem->owning_mm = mm = current->mm;
206 	mmgrab(mm);
207 
208 	page_list = (struct page **) __get_free_page(GFP_KERNEL);
209 	if (!page_list) {
210 		ret = -ENOMEM;
211 		goto umem_kfree;
212 	}
213 
214 	npages = ib_umem_num_pages(umem);
215 	if (npages == 0 || npages > UINT_MAX) {
216 		ret = -EINVAL;
217 		goto out;
218 	}
219 
220 	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
221 
222 	new_pinned = atomic64_add_return(npages, &mm->pinned_vm);
223 	if (new_pinned > lock_limit && !capable(CAP_IPC_LOCK)) {
224 		atomic64_sub(npages, &mm->pinned_vm);
225 		ret = -ENOMEM;
226 		goto out;
227 	}
228 
229 	cur_base = addr & PAGE_MASK;
230 
231 	if (umem->writable)
232 		gup_flags |= FOLL_WRITE;
233 
234 	while (npages) {
235 		cond_resched();
236 		pinned = pin_user_pages_fast(cur_base,
237 					  min_t(unsigned long, npages,
238 						PAGE_SIZE /
239 						sizeof(struct page *)),
240 					  gup_flags, page_list);
241 		if (pinned < 0) {
242 			ret = pinned;
243 			goto umem_release;
244 		}
245 
246 		cur_base += pinned * PAGE_SIZE;
247 		npages -= pinned;
248 		ret = sg_alloc_append_table_from_pages(
249 			&umem->sgt_append, page_list, pinned, 0,
250 			pinned << PAGE_SHIFT, ib_dma_max_seg_size(device),
251 			npages, GFP_KERNEL);
252 		if (ret) {
253 			unpin_user_pages_dirty_lock(page_list, pinned, 0);
254 			goto umem_release;
255 		}
256 	}
257 
258 	if (access & IB_ACCESS_RELAXED_ORDERING)
259 		dma_attr |= DMA_ATTR_WEAK_ORDERING;
260 
261 	ret = ib_dma_map_sgtable_attrs(device, &umem->sgt_append.sgt,
262 				       DMA_BIDIRECTIONAL, dma_attr);
263 	if (ret)
264 		goto umem_release;
265 	goto out;
266 
267 umem_release:
268 	__ib_umem_release(device, umem, 0);
269 	atomic64_sub(ib_umem_num_pages(umem), &mm->pinned_vm);
270 out:
271 	free_page((unsigned long) page_list);
272 umem_kfree:
273 	if (ret) {
274 		mmdrop(umem->owning_mm);
275 		kfree(umem);
276 	}
277 	return ret ? ERR_PTR(ret) : umem;
278 }
279 EXPORT_SYMBOL(ib_umem_get);
280 
281 /**
282  * ib_umem_release - release memory pinned with ib_umem_get
283  * @umem: umem struct to release
284  */
285 void ib_umem_release(struct ib_umem *umem)
286 {
287 	if (!umem)
288 		return;
289 	if (umem->is_dmabuf)
290 		return ib_umem_dmabuf_release(to_ib_umem_dmabuf(umem));
291 	if (umem->is_odp)
292 		return ib_umem_odp_release(to_ib_umem_odp(umem));
293 
294 	__ib_umem_release(umem->ibdev, umem, 1);
295 
296 	atomic64_sub(ib_umem_num_pages(umem), &umem->owning_mm->pinned_vm);
297 	mmdrop(umem->owning_mm);
298 	kfree(umem);
299 }
300 EXPORT_SYMBOL(ib_umem_release);
301 
302 /*
303  * Copy from the given ib_umem's pages to the given buffer.
304  *
305  * umem - the umem to copy from
306  * offset - offset to start copying from
307  * dst - destination buffer
308  * length - buffer length
309  *
310  * Returns 0 on success, or an error code.
311  */
312 int ib_umem_copy_from(void *dst, struct ib_umem *umem, size_t offset,
313 		      size_t length)
314 {
315 	size_t end = offset + length;
316 	int ret;
317 
318 	if (offset > umem->length || length > umem->length - offset) {
319 		pr_err("%s not in range. offset: %zd umem length: %zd end: %zd\n",
320 		       __func__, offset, umem->length, end);
321 		return -EINVAL;
322 	}
323 
324 	ret = sg_pcopy_to_buffer(umem->sgt_append.sgt.sgl,
325 				 umem->sgt_append.sgt.orig_nents, dst, length,
326 				 offset + ib_umem_offset(umem));
327 
328 	if (ret < 0)
329 		return ret;
330 	else if (ret != length)
331 		return -EINVAL;
332 	else
333 		return 0;
334 }
335 EXPORT_SYMBOL(ib_umem_copy_from);
336