xref: /linux/drivers/infiniband/hw/usnic/usnic_uiom.c (revision bd628c1bed7902ec1f24ba0fe70758949146abbe)
1 /*
2  * Copyright (c) 2005 Topspin Communications.  All rights reserved.
3  * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
4  * Copyright (c) 2013 Cisco Systems.  All rights reserved.
5  *
6  * This software is available to you under a choice of one of two
7  * licenses.  You may choose to be licensed under the terms of the GNU
8  * General Public License (GPL) Version 2, available from the file
9  * COPYING in the main directory of this source tree, or the
10  * BSD license below:
11  *
12  *     Redistribution and use in source and binary forms, with or
13  *     without modification, are permitted provided that the following
14  *     conditions are met:
15  *
16  *      - Redistributions of source code must retain the above
17  *        copyright notice, this list of conditions and the following
18  *        disclaimer.
19  *
20  *      - Redistributions in binary form must reproduce the above
21  *        copyright notice, this list of conditions and the following
22  *        disclaimer in the documentation and/or other materials
23  *        provided with the distribution.
24  *
25  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32  * SOFTWARE.
33  */
34 
35 #include <linux/mm.h>
36 #include <linux/dma-mapping.h>
37 #include <linux/sched/signal.h>
38 #include <linux/sched/mm.h>
39 #include <linux/hugetlb.h>
40 #include <linux/iommu.h>
41 #include <linux/workqueue.h>
42 #include <linux/list.h>
43 #include <linux/pci.h>
44 #include <rdma/ib_verbs.h>
45 
46 #include "usnic_log.h"
47 #include "usnic_uiom.h"
48 #include "usnic_uiom_interval_tree.h"
49 
50 static struct workqueue_struct *usnic_uiom_wq;
51 
52 #define USNIC_UIOM_PAGE_CHUNK						\
53 	((PAGE_SIZE - offsetof(struct usnic_uiom_chunk, page_list))	/\
54 	((void *) &((struct usnic_uiom_chunk *) 0)->page_list[1] -	\
55 	(void *) &((struct usnic_uiom_chunk *) 0)->page_list[0]))
56 
57 static int usnic_uiom_dma_fault(struct iommu_domain *domain,
58 				struct device *dev,
59 				unsigned long iova, int flags,
60 				void *token)
61 {
62 	usnic_err("Device %s iommu fault domain 0x%pK va 0x%lx flags 0x%x\n",
63 		dev_name(dev),
64 		domain, iova, flags);
65 	return -ENOSYS;
66 }
67 
68 static void usnic_uiom_put_pages(struct list_head *chunk_list, int dirty)
69 {
70 	struct usnic_uiom_chunk *chunk, *tmp;
71 	struct page *page;
72 	struct scatterlist *sg;
73 	int i;
74 	dma_addr_t pa;
75 
76 	list_for_each_entry_safe(chunk, tmp, chunk_list, list) {
77 		for_each_sg(chunk->page_list, sg, chunk->nents, i) {
78 			page = sg_page(sg);
79 			pa = sg_phys(sg);
80 			if (!PageDirty(page) && dirty)
81 				set_page_dirty_lock(page);
82 			put_page(page);
83 			usnic_dbg("pa: %pa\n", &pa);
84 		}
85 		kfree(chunk);
86 	}
87 }
88 
89 static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable,
90 				int dmasync, struct usnic_uiom_reg *uiomr)
91 {
92 	struct list_head *chunk_list = &uiomr->chunk_list;
93 	struct page **page_list;
94 	struct scatterlist *sg;
95 	struct usnic_uiom_chunk *chunk;
96 	unsigned long locked;
97 	unsigned long lock_limit;
98 	unsigned long cur_base;
99 	unsigned long npages;
100 	int ret;
101 	int off;
102 	int i;
103 	int flags;
104 	dma_addr_t pa;
105 	unsigned int gup_flags;
106 	struct mm_struct *mm;
107 
108 	/*
109 	 * If the combination of the addr and size requested for this memory
110 	 * region causes an integer overflow, return error.
111 	 */
112 	if (((addr + size) < addr) || PAGE_ALIGN(addr + size) < (addr + size))
113 		return -EINVAL;
114 
115 	if (!size)
116 		return -EINVAL;
117 
118 	if (!can_do_mlock())
119 		return -EPERM;
120 
121 	INIT_LIST_HEAD(chunk_list);
122 
123 	page_list = (struct page **) __get_free_page(GFP_KERNEL);
124 	if (!page_list)
125 		return -ENOMEM;
126 
127 	npages = PAGE_ALIGN(size + (addr & ~PAGE_MASK)) >> PAGE_SHIFT;
128 
129 	uiomr->owning_mm = mm = current->mm;
130 	down_write(&mm->mmap_sem);
131 
132 	locked = npages + current->mm->pinned_vm;
133 	lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
134 
135 	if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) {
136 		ret = -ENOMEM;
137 		goto out;
138 	}
139 
140 	flags = IOMMU_READ | IOMMU_CACHE;
141 	flags |= (writable) ? IOMMU_WRITE : 0;
142 	gup_flags = FOLL_WRITE;
143 	gup_flags |= (writable) ? 0 : FOLL_FORCE;
144 	cur_base = addr & PAGE_MASK;
145 	ret = 0;
146 
147 	while (npages) {
148 		ret = get_user_pages_longterm(cur_base,
149 					min_t(unsigned long, npages,
150 					PAGE_SIZE / sizeof(struct page *)),
151 					gup_flags, page_list, NULL);
152 
153 		if (ret < 0)
154 			goto out;
155 
156 		npages -= ret;
157 		off = 0;
158 
159 		while (ret) {
160 			chunk = kmalloc(sizeof(*chunk) +
161 					sizeof(struct scatterlist) *
162 					min_t(int, ret, USNIC_UIOM_PAGE_CHUNK),
163 					GFP_KERNEL);
164 			if (!chunk) {
165 				ret = -ENOMEM;
166 				goto out;
167 			}
168 
169 			chunk->nents = min_t(int, ret, USNIC_UIOM_PAGE_CHUNK);
170 			sg_init_table(chunk->page_list, chunk->nents);
171 			for_each_sg(chunk->page_list, sg, chunk->nents, i) {
172 				sg_set_page(sg, page_list[i + off],
173 						PAGE_SIZE, 0);
174 				pa = sg_phys(sg);
175 				usnic_dbg("va: 0x%lx pa: %pa\n",
176 						cur_base + i*PAGE_SIZE, &pa);
177 			}
178 			cur_base += chunk->nents * PAGE_SIZE;
179 			ret -= chunk->nents;
180 			off += chunk->nents;
181 			list_add_tail(&chunk->list, chunk_list);
182 		}
183 
184 		ret = 0;
185 	}
186 
187 out:
188 	if (ret < 0)
189 		usnic_uiom_put_pages(chunk_list, 0);
190 	else {
191 		mm->pinned_vm = locked;
192 		mmgrab(uiomr->owning_mm);
193 	}
194 
195 	up_write(&mm->mmap_sem);
196 	free_page((unsigned long) page_list);
197 	return ret;
198 }
199 
200 static void usnic_uiom_unmap_sorted_intervals(struct list_head *intervals,
201 						struct usnic_uiom_pd *pd)
202 {
203 	struct usnic_uiom_interval_node *interval, *tmp;
204 	long unsigned va, size;
205 
206 	list_for_each_entry_safe(interval, tmp, intervals, link) {
207 		va = interval->start << PAGE_SHIFT;
208 		size = ((interval->last - interval->start) + 1) << PAGE_SHIFT;
209 		while (size > 0) {
210 			/* Workaround for RH 970401 */
211 			usnic_dbg("va 0x%lx size 0x%lx", va, PAGE_SIZE);
212 			iommu_unmap(pd->domain, va, PAGE_SIZE);
213 			va += PAGE_SIZE;
214 			size -= PAGE_SIZE;
215 		}
216 	}
217 }
218 
219 static void __usnic_uiom_reg_release(struct usnic_uiom_pd *pd,
220 					struct usnic_uiom_reg *uiomr,
221 					int dirty)
222 {
223 	int npages;
224 	unsigned long vpn_start, vpn_last;
225 	struct usnic_uiom_interval_node *interval, *tmp;
226 	int writable = 0;
227 	LIST_HEAD(rm_intervals);
228 
229 	npages = PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT;
230 	vpn_start = (uiomr->va & PAGE_MASK) >> PAGE_SHIFT;
231 	vpn_last = vpn_start + npages - 1;
232 
233 	spin_lock(&pd->lock);
234 	usnic_uiom_remove_interval(&pd->root, vpn_start,
235 					vpn_last, &rm_intervals);
236 	usnic_uiom_unmap_sorted_intervals(&rm_intervals, pd);
237 
238 	list_for_each_entry_safe(interval, tmp, &rm_intervals, link) {
239 		if (interval->flags & IOMMU_WRITE)
240 			writable = 1;
241 		list_del(&interval->link);
242 		kfree(interval);
243 	}
244 
245 	usnic_uiom_put_pages(&uiomr->chunk_list, dirty & writable);
246 	spin_unlock(&pd->lock);
247 }
248 
249 static int usnic_uiom_map_sorted_intervals(struct list_head *intervals,
250 						struct usnic_uiom_reg *uiomr)
251 {
252 	int i, err;
253 	size_t size;
254 	struct usnic_uiom_chunk *chunk;
255 	struct usnic_uiom_interval_node *interval_node;
256 	dma_addr_t pa;
257 	dma_addr_t pa_start = 0;
258 	dma_addr_t pa_end = 0;
259 	long int va_start = -EINVAL;
260 	struct usnic_uiom_pd *pd = uiomr->pd;
261 	long int va = uiomr->va & PAGE_MASK;
262 	int flags = IOMMU_READ | IOMMU_CACHE;
263 
264 	flags |= (uiomr->writable) ? IOMMU_WRITE : 0;
265 	chunk = list_first_entry(&uiomr->chunk_list, struct usnic_uiom_chunk,
266 									list);
267 	list_for_each_entry(interval_node, intervals, link) {
268 iter_chunk:
269 		for (i = 0; i < chunk->nents; i++, va += PAGE_SIZE) {
270 			pa = sg_phys(&chunk->page_list[i]);
271 			if ((va >> PAGE_SHIFT) < interval_node->start)
272 				continue;
273 
274 			if ((va >> PAGE_SHIFT) == interval_node->start) {
275 				/* First page of the interval */
276 				va_start = va;
277 				pa_start = pa;
278 				pa_end = pa;
279 			}
280 
281 			WARN_ON(va_start == -EINVAL);
282 
283 			if ((pa_end + PAGE_SIZE != pa) &&
284 					(pa != pa_start)) {
285 				/* PAs are not contiguous */
286 				size = pa_end - pa_start + PAGE_SIZE;
287 				usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x",
288 					va_start, &pa_start, size, flags);
289 				err = iommu_map(pd->domain, va_start, pa_start,
290 							size, flags);
291 				if (err) {
292 					usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
293 						va_start, &pa_start, size, err);
294 					goto err_out;
295 				}
296 				va_start = va;
297 				pa_start = pa;
298 				pa_end = pa;
299 			}
300 
301 			if ((va >> PAGE_SHIFT) == interval_node->last) {
302 				/* Last page of the interval */
303 				size = pa - pa_start + PAGE_SIZE;
304 				usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x\n",
305 					va_start, &pa_start, size, flags);
306 				err = iommu_map(pd->domain, va_start, pa_start,
307 						size, flags);
308 				if (err) {
309 					usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n",
310 						va_start, &pa_start, size, err);
311 					goto err_out;
312 				}
313 				break;
314 			}
315 
316 			if (pa != pa_start)
317 				pa_end += PAGE_SIZE;
318 		}
319 
320 		if (i == chunk->nents) {
321 			/*
322 			 * Hit last entry of the chunk,
323 			 * hence advance to next chunk
324 			 */
325 			chunk = list_first_entry(&chunk->list,
326 							struct usnic_uiom_chunk,
327 							list);
328 			goto iter_chunk;
329 		}
330 	}
331 
332 	return 0;
333 
334 err_out:
335 	usnic_uiom_unmap_sorted_intervals(intervals, pd);
336 	return err;
337 }
338 
339 struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd,
340 						unsigned long addr, size_t size,
341 						int writable, int dmasync)
342 {
343 	struct usnic_uiom_reg *uiomr;
344 	unsigned long va_base, vpn_start, vpn_last;
345 	unsigned long npages;
346 	int offset, err;
347 	LIST_HEAD(sorted_diff_intervals);
348 
349 	/*
350 	 * Intel IOMMU map throws an error if a translation entry is
351 	 * changed from read to write.  This module may not unmap
352 	 * and then remap the entry after fixing the permission
353 	 * b/c this open up a small windows where hw DMA may page fault
354 	 * Hence, make all entries to be writable.
355 	 */
356 	writable = 1;
357 
358 	va_base = addr & PAGE_MASK;
359 	offset = addr & ~PAGE_MASK;
360 	npages = PAGE_ALIGN(size + offset) >> PAGE_SHIFT;
361 	vpn_start = (addr & PAGE_MASK) >> PAGE_SHIFT;
362 	vpn_last = vpn_start + npages - 1;
363 
364 	uiomr = kmalloc(sizeof(*uiomr), GFP_KERNEL);
365 	if (!uiomr)
366 		return ERR_PTR(-ENOMEM);
367 
368 	uiomr->va = va_base;
369 	uiomr->offset = offset;
370 	uiomr->length = size;
371 	uiomr->writable = writable;
372 	uiomr->pd = pd;
373 
374 	err = usnic_uiom_get_pages(addr, size, writable, dmasync,
375 				   uiomr);
376 	if (err) {
377 		usnic_err("Failed get_pages vpn [0x%lx,0x%lx] err %d\n",
378 				vpn_start, vpn_last, err);
379 		goto out_free_uiomr;
380 	}
381 
382 	spin_lock(&pd->lock);
383 	err = usnic_uiom_get_intervals_diff(vpn_start, vpn_last,
384 						(writable) ? IOMMU_WRITE : 0,
385 						IOMMU_WRITE,
386 						&pd->root,
387 						&sorted_diff_intervals);
388 	if (err) {
389 		usnic_err("Failed disjoint interval vpn [0x%lx,0x%lx] err %d\n",
390 						vpn_start, vpn_last, err);
391 		goto out_put_pages;
392 	}
393 
394 	err = usnic_uiom_map_sorted_intervals(&sorted_diff_intervals, uiomr);
395 	if (err) {
396 		usnic_err("Failed map interval vpn [0x%lx,0x%lx] err %d\n",
397 						vpn_start, vpn_last, err);
398 		goto out_put_intervals;
399 
400 	}
401 
402 	err = usnic_uiom_insert_interval(&pd->root, vpn_start, vpn_last,
403 					(writable) ? IOMMU_WRITE : 0);
404 	if (err) {
405 		usnic_err("Failed insert interval vpn [0x%lx,0x%lx] err %d\n",
406 						vpn_start, vpn_last, err);
407 		goto out_unmap_intervals;
408 	}
409 
410 	usnic_uiom_put_interval_set(&sorted_diff_intervals);
411 	spin_unlock(&pd->lock);
412 
413 	return uiomr;
414 
415 out_unmap_intervals:
416 	usnic_uiom_unmap_sorted_intervals(&sorted_diff_intervals, pd);
417 out_put_intervals:
418 	usnic_uiom_put_interval_set(&sorted_diff_intervals);
419 out_put_pages:
420 	usnic_uiom_put_pages(&uiomr->chunk_list, 0);
421 	spin_unlock(&pd->lock);
422 	mmdrop(uiomr->owning_mm);
423 out_free_uiomr:
424 	kfree(uiomr);
425 	return ERR_PTR(err);
426 }
427 
428 static void __usnic_uiom_release_tail(struct usnic_uiom_reg *uiomr)
429 {
430 	mmdrop(uiomr->owning_mm);
431 	kfree(uiomr);
432 }
433 
434 static inline size_t usnic_uiom_num_pages(struct usnic_uiom_reg *uiomr)
435 {
436 	return PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT;
437 }
438 
439 static void usnic_uiom_release_defer(struct work_struct *work)
440 {
441 	struct usnic_uiom_reg *uiomr =
442 		container_of(work, struct usnic_uiom_reg, work);
443 
444 	down_write(&uiomr->owning_mm->mmap_sem);
445 	uiomr->owning_mm->pinned_vm -= usnic_uiom_num_pages(uiomr);
446 	up_write(&uiomr->owning_mm->mmap_sem);
447 
448 	__usnic_uiom_release_tail(uiomr);
449 }
450 
451 void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr,
452 			    struct ib_ucontext *context)
453 {
454 	__usnic_uiom_reg_release(uiomr->pd, uiomr, 1);
455 
456 	/*
457 	 * We may be called with the mm's mmap_sem already held.  This
458 	 * can happen when a userspace munmap() is the call that drops
459 	 * the last reference to our file and calls our release
460 	 * method.  If there are memory regions to destroy, we'll end
461 	 * up here and not be able to take the mmap_sem.  In that case
462 	 * we defer the vm_locked accounting to a workqueue.
463 	 */
464 	if (context->closing) {
465 		if (!down_write_trylock(&uiomr->owning_mm->mmap_sem)) {
466 			INIT_WORK(&uiomr->work, usnic_uiom_release_defer);
467 			queue_work(usnic_uiom_wq, &uiomr->work);
468 			return;
469 		}
470 	} else {
471 		down_write(&uiomr->owning_mm->mmap_sem);
472 	}
473 	uiomr->owning_mm->pinned_vm -= usnic_uiom_num_pages(uiomr);
474 	up_write(&uiomr->owning_mm->mmap_sem);
475 
476 	__usnic_uiom_release_tail(uiomr);
477 }
478 
479 struct usnic_uiom_pd *usnic_uiom_alloc_pd(void)
480 {
481 	struct usnic_uiom_pd *pd;
482 	void *domain;
483 
484 	pd = kzalloc(sizeof(*pd), GFP_KERNEL);
485 	if (!pd)
486 		return ERR_PTR(-ENOMEM);
487 
488 	pd->domain = domain = iommu_domain_alloc(&pci_bus_type);
489 	if (!domain) {
490 		usnic_err("Failed to allocate IOMMU domain");
491 		kfree(pd);
492 		return ERR_PTR(-ENOMEM);
493 	}
494 
495 	iommu_set_fault_handler(pd->domain, usnic_uiom_dma_fault, NULL);
496 
497 	spin_lock_init(&pd->lock);
498 	INIT_LIST_HEAD(&pd->devs);
499 
500 	return pd;
501 }
502 
503 void usnic_uiom_dealloc_pd(struct usnic_uiom_pd *pd)
504 {
505 	iommu_domain_free(pd->domain);
506 	kfree(pd);
507 }
508 
509 int usnic_uiom_attach_dev_to_pd(struct usnic_uiom_pd *pd, struct device *dev)
510 {
511 	struct usnic_uiom_dev *uiom_dev;
512 	int err;
513 
514 	uiom_dev = kzalloc(sizeof(*uiom_dev), GFP_ATOMIC);
515 	if (!uiom_dev)
516 		return -ENOMEM;
517 	uiom_dev->dev = dev;
518 
519 	err = iommu_attach_device(pd->domain, dev);
520 	if (err)
521 		goto out_free_dev;
522 
523 	if (!iommu_capable(dev->bus, IOMMU_CAP_CACHE_COHERENCY)) {
524 		usnic_err("IOMMU of %s does not support cache coherency\n",
525 				dev_name(dev));
526 		err = -EINVAL;
527 		goto out_detach_device;
528 	}
529 
530 	spin_lock(&pd->lock);
531 	list_add_tail(&uiom_dev->link, &pd->devs);
532 	pd->dev_cnt++;
533 	spin_unlock(&pd->lock);
534 
535 	return 0;
536 
537 out_detach_device:
538 	iommu_detach_device(pd->domain, dev);
539 out_free_dev:
540 	kfree(uiom_dev);
541 	return err;
542 }
543 
544 void usnic_uiom_detach_dev_from_pd(struct usnic_uiom_pd *pd, struct device *dev)
545 {
546 	struct usnic_uiom_dev *uiom_dev;
547 	int found = 0;
548 
549 	spin_lock(&pd->lock);
550 	list_for_each_entry(uiom_dev, &pd->devs, link) {
551 		if (uiom_dev->dev == dev) {
552 			found = 1;
553 			break;
554 		}
555 	}
556 
557 	if (!found) {
558 		usnic_err("Unable to free dev %s - not found\n",
559 				dev_name(dev));
560 		spin_unlock(&pd->lock);
561 		return;
562 	}
563 
564 	list_del(&uiom_dev->link);
565 	pd->dev_cnt--;
566 	spin_unlock(&pd->lock);
567 
568 	return iommu_detach_device(pd->domain, dev);
569 }
570 
571 struct device **usnic_uiom_get_dev_list(struct usnic_uiom_pd *pd)
572 {
573 	struct usnic_uiom_dev *uiom_dev;
574 	struct device **devs;
575 	int i = 0;
576 
577 	spin_lock(&pd->lock);
578 	devs = kcalloc(pd->dev_cnt + 1, sizeof(*devs), GFP_ATOMIC);
579 	if (!devs) {
580 		devs = ERR_PTR(-ENOMEM);
581 		goto out;
582 	}
583 
584 	list_for_each_entry(uiom_dev, &pd->devs, link) {
585 		devs[i++] = uiom_dev->dev;
586 	}
587 out:
588 	spin_unlock(&pd->lock);
589 	return devs;
590 }
591 
592 void usnic_uiom_free_dev_list(struct device **devs)
593 {
594 	kfree(devs);
595 }
596 
597 int usnic_uiom_init(char *drv_name)
598 {
599 	if (!iommu_present(&pci_bus_type)) {
600 		usnic_err("IOMMU required but not present or enabled.  USNIC QPs will not function w/o enabling IOMMU\n");
601 		return -EPERM;
602 	}
603 
604 	usnic_uiom_wq = create_workqueue(drv_name);
605 	if (!usnic_uiom_wq) {
606 		usnic_err("Unable to alloc wq for drv %s\n", drv_name);
607 		return -ENOMEM;
608 	}
609 
610 	return 0;
611 }
612 
613 void usnic_uiom_fini(void)
614 {
615 	flush_workqueue(usnic_uiom_wq);
616 	destroy_workqueue(usnic_uiom_wq);
617 }
618