xref: /linux/drivers/infiniband/hw/usnic/usnic_uiom.c (revision 189f164e573e18d9f8876dbd3ad8fcbe11f93037)
1 /*
2  * Copyright (c) 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2013 Cisco Systems.  All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include <linux/mm.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/sched/signal.h>
38 #include <linux/sched/mm.h>
39 #include <linux/hugetlb.h>
40 #include <linux/iommu.h>
41 #include <linux/workqueue.h>
42 #include <linux/list.h>
43 #include <rdma/ib_verbs.h>
44 
45 #include "usnic_log.h"
46 #include "usnic_uiom.h"
47 #include "usnic_uiom_interval_tree.h"
48 
49 #define USNIC_UIOM_PAGE_CHUNK						\
50 	((PAGE_SIZE - offsetof(struct usnic_uiom_chunk, page_list))	/\
51 	((void *) &((struct usnic_uiom_chunk *) 0)->page_list[1] -	\
52 	(void *) &((struct usnic_uiom_chunk *) 0)->page_list[0]))
53 
usnic_uiom_dma_fault(struct iommu_domain * domain,struct device * dev,unsigned long iova,int flags,void * token)54 static int usnic_uiom_dma_fault(struct iommu_domain *domain,
55 				struct device *dev,
56 				unsigned long iova, int flags,
57 				void *token)
58 {
59 	usnic_err("Device %s iommu fault domain 0x%p va 0x%lx flags 0x%x\n",
60 		dev_name(dev),
61 		domain, iova, flags);
62 	return -ENOSYS;
63 }
64 
usnic_uiom_put_pages(struct list_head * chunk_list,int dirty)65 static void usnic_uiom_put_pages(struct list_head *chunk_list, int dirty)
66 {
67 	struct usnic_uiom_chunk *chunk, *tmp;
68 	struct page *page;
69 	struct scatterlist *sg;
70 	int i;
71 	dma_addr_t pa;
72 
73 	list_for_each_entry_safe(chunk, tmp, chunk_list, list) {
74 		for_each_sg(chunk->page_list, sg, chunk->nents, i) {
75 			page = sg_page(sg);
76 			pa = sg_phys(sg);
77 			unpin_user_pages_dirty_lock(&page, 1, dirty);
78 			usnic_dbg("pa: %pa\n", &pa);
79 		}
80 		kfree(chunk);
81 	}
82 }
83 
usnic_uiom_get_pages(unsigned long addr,size_t size,int writable,int dmasync,struct usnic_uiom_reg * uiomr)84 static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
85 				int dmasync, struct usnic_uiom_reg *uiomr)
86 {
87 	struct list_head *chunk_list = &uiomr->chunk_list;
88 	unsigned int gup_flags = FOLL_LONGTERM;
89 	struct page **page_list;
90 	struct scatterlist *sg;
91 	struct usnic_uiom_chunk *chunk;
92 	unsigned long locked;
93 	unsigned long lock_limit;
94 	unsigned long cur_base;
95 	unsigned long npages;
96 	int ret;
97 	int off;
98 	int i;
99 	dma_addr_t pa;
100 	struct mm_struct *mm;
101 
102 	/*
103 	 * If the combination of the addr and size requested for this memory
104 	 * region causes an integer overflow, return error.
105 	 */
106 	if (((addr + size) < addr) || PAGE_ALIGN(addr + size) < (addr + size))
107 		return -EINVAL;
108 
109 	if (!size)
110 		return -EINVAL;
111 
112 	if (!can_do_mlock())
113 		return -EPERM;
114 
115 	INIT_LIST_HEAD(chunk_list);
116 
117 	page_list = (struct page **) __get_free_page(GFP_KERNEL);
118 	if (!page_list)
119 		return -ENOMEM;
120 
121 	npages = PAGE_ALIGN(size + (addr & ~PAGE_MASK)) >> PAGE_SHIFT;
122 
123 	uiomr->owning_mm = mm = current->mm;
124 	mmap_read_lock(mm);
125 
126 	locked = atomic64_add_return(npages, &current->mm->pinned_vm);
127 	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
128 
129 	if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
130 		ret = -ENOMEM;
131 		goto out;
132 	}
133 
134 	if (writable)
135 		gup_flags |= FOLL_WRITE;
136 	cur_base = addr & PAGE_MASK;
137 	ret = 0;
138 
139 	while (npages) {
140 		ret = pin_user_pages(cur_base,
141 				     min_t(unsigned long, npages,
142 				     PAGE_SIZE / sizeof(struct page *)),
143 				     gup_flags, page_list);
144 
145 		if (ret < 0)
146 			goto out;
147 
148 		npages -= ret;
149 		off = 0;
150 
151 		while (ret) {
152 			chunk = kmalloc_flex(*chunk, page_list,
153 					     min_t(int, ret, USNIC_UIOM_PAGE_CHUNK));
154 			if (!chunk) {
155 				ret = -ENOMEM;
156 				goto out;
157 			}
158 
159 			chunk->nents = min_t(int, ret, USNIC_UIOM_PAGE_CHUNK);
160 			sg_init_table(chunk->page_list, chunk->nents);
161 			for_each_sg(chunk->page_list, sg, chunk->nents, i) {
162 				sg_set_page(sg, page_list[i + off],
163 						PAGE_SIZE, 0);
164 				pa = sg_phys(sg);
165 				usnic_dbg("va: 0x%lx pa: %pa\n",
166 						cur_base + i*PAGE_SIZE, &pa);
167 			}
168 			cur_base += chunk->nents * PAGE_SIZE;
169 			ret -= chunk->nents;
170 			off += chunk->nents;
171 			list_add_tail(&chunk->list, chunk_list);
172 		}
173 
174 		ret = 0;
175 	}
176 
177 out:
178 	if (ret < 0) {
179 		usnic_uiom_put_pages(chunk_list, 0);
180 		atomic64_sub(npages, &current->mm->pinned_vm);
181 	} else
182 		mmgrab(uiomr->owning_mm);
183 
184 	mmap_read_unlock(mm);
185 	free_page((unsigned long) page_list);
186 	return ret;
187 }
188 
usnic_uiom_unmap_sorted_intervals(struct list_head * intervals,struct usnic_uiom_pd * pd)189 static void usnic_uiom_unmap_sorted_intervals(struct list_head *intervals,
190 						struct usnic_uiom_pd *pd)
191 {
192 	struct usnic_uiom_interval_node *interval, *tmp;
193 	long unsigned va, size;
194 
195 	list_for_each_entry_safe(interval, tmp, intervals, link) {
196 		va = interval->start << PAGE_SHIFT;
197 		size = ((interval->last - interval->start) + 1) << PAGE_SHIFT;
198 		while (size > 0) {
199 			/* Workaround for RH 970401 */
200 			usnic_dbg("va 0x%lx size 0x%lx", va, PAGE_SIZE);
201 			iommu_unmap(pd->domain, va, PAGE_SIZE);
202 			va += PAGE_SIZE;
203 			size -= PAGE_SIZE;
204 		}
205 	}
206 }
207 
__usnic_uiom_reg_release(struct usnic_uiom_pd * pd,struct usnic_uiom_reg * uiomr,int dirty)208 static void __usnic_uiom_reg_release(struct usnic_uiom_pd *pd,
209 					struct usnic_uiom_reg *uiomr,
210 					int dirty)
211 {
212 	int npages;
213 	unsigned long vpn_start, vpn_last;
214 	struct usnic_uiom_interval_node *interval, *tmp;
215 	int writable = 0;
216 	LIST_HEAD(rm_intervals);
217 
218 	npages = PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT;
219 	vpn_start = (uiomr->va & PAGE_MASK) >> PAGE_SHIFT;
220 	vpn_last = vpn_start + npages - 1;
221 
222 	spin_lock(&pd->lock);
223 	usnic_uiom_remove_interval(&pd->root, vpn_start,
224 					vpn_last, &rm_intervals);
225 	usnic_uiom_unmap_sorted_intervals(&rm_intervals, pd);
226 
227 	list_for_each_entry_safe(interval, tmp, &rm_intervals, link) {
228 		if (interval->flags & IOMMU_WRITE)
229 			writable = 1;
230 		list_del(&interval->link);
231 		kfree(interval);
232 	}
233 
234 	usnic_uiom_put_pages(&uiomr->chunk_list, dirty & writable);
235 	spin_unlock(&pd->lock);
236 }
237 
usnic_uiom_map_sorted_intervals(struct list_head * intervals,struct usnic_uiom_reg * uiomr)238 static int usnic_uiom_map_sorted_intervals(struct list_head *intervals,
239 						struct usnic_uiom_reg *uiomr)
240 {
241 	int i, err;
242 	size_t size;
243 	struct usnic_uiom_chunk *chunk;
244 	struct usnic_uiom_interval_node *interval_node;
245 	dma_addr_t pa;
246 	dma_addr_t pa_start = 0;
247 	dma_addr_t pa_end = 0;
248 	long int va_start = -EINVAL;
249 	struct usnic_uiom_pd *pd = uiomr->pd;
250 	long int va = uiomr->va & PAGE_MASK;
251 	int flags = IOMMU_READ | IOMMU_CACHE;
252 
253 	flags |= (uiomr->writable) ? IOMMU_WRITE : 0;
254 	chunk = list_first_entry(&uiomr->chunk_list, struct usnic_uiom_chunk,
255 									list);
256 	list_for_each_entry(interval_node, intervals, link) {
257 iter_chunk:
258 		for (i = 0; i < chunk->nents; i++, va += PAGE_SIZE) {
259 			pa = sg_phys(&chunk->page_list[i]);
260 			if ((va >> PAGE_SHIFT) < interval_node->start)
261 				continue;
262 
263 			if ((va >> PAGE_SHIFT) == interval_node->start) {
264 				/* First page of the interval */
265 				va_start = va;
266 				pa_start = pa;
267 				pa_end = pa;
268 			}
269 
270 			WARN_ON(va_start == -EINVAL);
271 
272 			if ((pa_end + PAGE_SIZE != pa) &&
273 					(pa != pa_start)) {
274 				/* PAs are not contiguous */
275 				size = pa_end - pa_start + PAGE_SIZE;
276 				usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x",
277 					va_start, &pa_start, size, flags);
278 				err = iommu_map(pd->domain, va_start, pa_start,
279 						size, flags, GFP_ATOMIC);
280 				if (err) {
281 					usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
282 						va_start, &pa_start, size, err);
283 					goto err_out;
284 				}
285 				va_start = va;
286 				pa_start = pa;
287 				pa_end = pa;
288 			}
289 
290 			if ((va >> PAGE_SHIFT) == interval_node->last) {
291 				/* Last page of the interval */
292 				size = pa - pa_start + PAGE_SIZE;
293 				usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x\n",
294 					va_start, &pa_start, size, flags);
295 				err = iommu_map(pd->domain, va_start, pa_start,
296 						size, flags, GFP_ATOMIC);
297 				if (err) {
298 					usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
299 						va_start, &pa_start, size, err);
300 					goto err_out;
301 				}
302 				break;
303 			}
304 
305 			if (pa != pa_start)
306 				pa_end += PAGE_SIZE;
307 		}
308 
309 		if (i == chunk->nents) {
310 			/*
311 			 * Hit last entry of the chunk,
312 			 * hence advance to next chunk
313 			 */
314 			chunk = list_first_entry(&chunk->list,
315 							struct usnic_uiom_chunk,
316 							list);
317 			goto iter_chunk;
318 		}
319 	}
320 
321 	return 0;
322 
323 err_out:
324 	usnic_uiom_unmap_sorted_intervals(intervals, pd);
325 	return err;
326 }
327 
usnic_uiom_reg_get(struct usnic_uiom_pd * pd,unsigned long addr,size_t size,int writable,int dmasync)328 struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd,
329 						unsigned long addr, size_t size,
330 						int writable, int dmasync)
331 {
332 	struct usnic_uiom_reg *uiomr;
333 	unsigned long va_base, vpn_start, vpn_last;
334 	unsigned long npages;
335 	int offset, err;
336 	LIST_HEAD(sorted_diff_intervals);
337 
338 	/*
339 	 * Intel IOMMU map throws an error if a translation entry is
340 	 * changed from read to write.  This module may not unmap
341 	 * and then remap the entry after fixing the permission
342 	 * b/c this open up a small windows where hw DMA may page fault
343 	 * Hence, make all entries to be writable.
344 	 */
345 	writable = 1;
346 
347 	va_base = addr & PAGE_MASK;
348 	offset = addr & ~PAGE_MASK;
349 	npages = PAGE_ALIGN(size + offset) >> PAGE_SHIFT;
350 	vpn_start = (addr & PAGE_MASK) >> PAGE_SHIFT;
351 	vpn_last = vpn_start + npages - 1;
352 
353 	uiomr = kmalloc_obj(*uiomr);
354 	if (!uiomr)
355 		return ERR_PTR(-ENOMEM);
356 
357 	uiomr->va = va_base;
358 	uiomr->offset = offset;
359 	uiomr->length = size;
360 	uiomr->writable = writable;
361 	uiomr->pd = pd;
362 
363 	err = usnic_uiom_get_pages(addr, size, writable, dmasync,
364 				   uiomr);
365 	if (err) {
366 		usnic_err("Failed get_pages vpn [0x%lx,0x%lx] err %d\n",
367 				vpn_start, vpn_last, err);
368 		goto out_free_uiomr;
369 	}
370 
371 	spin_lock(&pd->lock);
372 	err = usnic_uiom_get_intervals_diff(vpn_start, vpn_last,
373 						(writable) ? IOMMU_WRITE : 0,
374 						IOMMU_WRITE,
375 						&pd->root,
376 						&sorted_diff_intervals);
377 	if (err) {
378 		usnic_err("Failed disjoint interval vpn [0x%lx,0x%lx] err %d\n",
379 						vpn_start, vpn_last, err);
380 		goto out_put_pages;
381 	}
382 
383 	err = usnic_uiom_map_sorted_intervals(&sorted_diff_intervals, uiomr);
384 	if (err) {
385 		usnic_err("Failed map interval vpn [0x%lx,0x%lx] err %d\n",
386 						vpn_start, vpn_last, err);
387 		goto out_put_intervals;
388 
389 	}
390 
391 	err = usnic_uiom_insert_interval(&pd->root, vpn_start, vpn_last,
392 					(writable) ? IOMMU_WRITE : 0);
393 	if (err) {
394 		usnic_err("Failed insert interval vpn [0x%lx,0x%lx] err %d\n",
395 						vpn_start, vpn_last, err);
396 		goto out_unmap_intervals;
397 	}
398 
399 	usnic_uiom_put_interval_set(&sorted_diff_intervals);
400 	spin_unlock(&pd->lock);
401 
402 	return uiomr;
403 
404 out_unmap_intervals:
405 	usnic_uiom_unmap_sorted_intervals(&sorted_diff_intervals, pd);
406 out_put_intervals:
407 	usnic_uiom_put_interval_set(&sorted_diff_intervals);
408 out_put_pages:
409 	usnic_uiom_put_pages(&uiomr->chunk_list, 0);
410 	spin_unlock(&pd->lock);
411 	mmdrop(uiomr->owning_mm);
412 out_free_uiomr:
413 	kfree(uiomr);
414 	return ERR_PTR(err);
415 }
416 
__usnic_uiom_release_tail(struct usnic_uiom_reg * uiomr)417 static void __usnic_uiom_release_tail(struct usnic_uiom_reg *uiomr)
418 {
419 	mmdrop(uiomr->owning_mm);
420 	kfree(uiomr);
421 }
422 
usnic_uiom_num_pages(struct usnic_uiom_reg * uiomr)423 static inline size_t usnic_uiom_num_pages(struct usnic_uiom_reg *uiomr)
424 {
425 	return PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT;
426 }
427 
usnic_uiom_reg_release(struct usnic_uiom_reg * uiomr)428 void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr)
429 {
430 	__usnic_uiom_reg_release(uiomr->pd, uiomr, 1);
431 
432 	atomic64_sub(usnic_uiom_num_pages(uiomr), &uiomr->owning_mm->pinned_vm);
433 	__usnic_uiom_release_tail(uiomr);
434 }
435 
usnic_uiom_alloc_pd(struct device * dev)436 struct usnic_uiom_pd *usnic_uiom_alloc_pd(struct device *dev)
437 {
438 	struct usnic_uiom_pd *pd;
439 	void *domain;
440 
441 	pd = kzalloc_obj(*pd);
442 	if (!pd)
443 		return ERR_PTR(-ENOMEM);
444 
445 	pd->domain = domain = iommu_paging_domain_alloc(dev);
446 	if (IS_ERR(domain)) {
447 		usnic_err("Failed to allocate IOMMU domain");
448 		kfree(pd);
449 		return ERR_CAST(domain);
450 	}
451 
452 	iommu_set_fault_handler(pd->domain, usnic_uiom_dma_fault, NULL);
453 
454 	spin_lock_init(&pd->lock);
455 	INIT_LIST_HEAD(&pd->devs);
456 
457 	return pd;
458 }
459 
usnic_uiom_dealloc_pd(struct usnic_uiom_pd * pd)460 void usnic_uiom_dealloc_pd(struct usnic_uiom_pd *pd)
461 {
462 	iommu_domain_free(pd->domain);
463 	kfree(pd);
464 }
465 
usnic_uiom_attach_dev_to_pd(struct usnic_uiom_pd * pd,struct device * dev)466 int usnic_uiom_attach_dev_to_pd(struct usnic_uiom_pd *pd, struct device *dev)
467 {
468 	struct usnic_uiom_dev *uiom_dev;
469 	int err;
470 
471 	uiom_dev = kzalloc_obj(*uiom_dev, GFP_ATOMIC);
472 	if (!uiom_dev)
473 		return -ENOMEM;
474 	uiom_dev->dev = dev;
475 
476 	err = iommu_attach_device(pd->domain, dev);
477 	if (err)
478 		goto out_free_dev;
479 
480 	if (!device_iommu_capable(dev, IOMMU_CAP_CACHE_COHERENCY)) {
481 		usnic_err("IOMMU of %s does not support cache coherency\n",
482 				dev_name(dev));
483 		err = -EINVAL;
484 		goto out_detach_device;
485 	}
486 
487 	spin_lock(&pd->lock);
488 	list_add_tail(&uiom_dev->link, &pd->devs);
489 	pd->dev_cnt++;
490 	spin_unlock(&pd->lock);
491 
492 	return 0;
493 
494 out_detach_device:
495 	iommu_detach_device(pd->domain, dev);
496 out_free_dev:
497 	kfree(uiom_dev);
498 	return err;
499 }
500 
usnic_uiom_detach_dev_from_pd(struct usnic_uiom_pd * pd,struct device * dev)501 void usnic_uiom_detach_dev_from_pd(struct usnic_uiom_pd *pd, struct device *dev)
502 {
503 	struct usnic_uiom_dev *uiom_dev;
504 	int found = 0;
505 
506 	spin_lock(&pd->lock);
507 	list_for_each_entry(uiom_dev, &pd->devs, link) {
508 		if (uiom_dev->dev == dev) {
509 			found = 1;
510 			break;
511 		}
512 	}
513 
514 	if (!found) {
515 		usnic_err("Unable to free dev %s - not found\n",
516 				dev_name(dev));
517 		spin_unlock(&pd->lock);
518 		return;
519 	}
520 
521 	list_del(&uiom_dev->link);
522 	pd->dev_cnt--;
523 	spin_unlock(&pd->lock);
524 
525 	return iommu_detach_device(pd->domain, dev);
526 }
527 
usnic_uiom_get_dev_list(struct usnic_uiom_pd * pd)528 struct device **usnic_uiom_get_dev_list(struct usnic_uiom_pd *pd)
529 {
530 	struct usnic_uiom_dev *uiom_dev;
531 	struct device **devs;
532 	int i = 0;
533 
534 	spin_lock(&pd->lock);
535 	devs = kzalloc_objs(*devs, pd->dev_cnt + 1, GFP_ATOMIC);
536 	if (!devs) {
537 		devs = ERR_PTR(-ENOMEM);
538 		goto out;
539 	}
540 
541 	list_for_each_entry(uiom_dev, &pd->devs, link) {
542 		devs[i++] = uiom_dev->dev;
543 	}
544 out:
545 	spin_unlock(&pd->lock);
546 	return devs;
547 }
548 
usnic_uiom_free_dev_list(struct device ** devs)549 void usnic_uiom_free_dev_list(struct device **devs)
550 {
551 	kfree(devs);
552 }
553