1 /* 2 * Copyright (c) 2005 Topspin Communications. All rights reserved. 3 * Copyright (c) 2005 Mellanox Technologies. All rights reserved. 4 * Copyright (c) 2013 Cisco Systems. All rights reserved. 5 * 6 * This software is available to you under a choice of one of two 7 * licenses. You may choose to be licensed under the terms of the GNU 8 * General Public License (GPL) Version 2, available from the file 9 * COPYING in the main directory of this source tree, or the 10 * BSD license below: 11 * 12 * Redistribution and use in source and binary forms, with or 13 * without modification, are permitted provided that the following 14 * conditions are met: 15 * 16 * - Redistributions of source code must retain the above 17 * copyright notice, this list of conditions and the following 18 * disclaimer. 19 * 20 * - Redistributions in binary form must reproduce the above 21 * copyright notice, this list of conditions and the following 22 * disclaimer in the documentation and/or other materials 23 * provided with the distribution. 24 * 25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, 26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF 27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND 28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS 29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN 30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN 31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 32 * SOFTWARE. 33 */ 34 35 #include <linux/mm.h> 36 #include <linux/dma-mapping.h> 37 #include <linux/sched.h> 38 #include <linux/hugetlb.h> 39 #include <linux/dma-attrs.h> 40 #include <linux/iommu.h> 41 #include <linux/workqueue.h> 42 #include <linux/list.h> 43 #include <linux/pci.h> 44 45 #include "usnic_log.h" 46 #include "usnic_uiom.h" 47 #include "usnic_uiom_interval_tree.h" 48 49 static struct workqueue_struct *usnic_uiom_wq; 50 51 #define USNIC_UIOM_PAGE_CHUNK \ 52 ((PAGE_SIZE - offsetof(struct usnic_uiom_chunk, page_list)) /\ 53 ((void *) &((struct usnic_uiom_chunk *) 0)->page_list[1] - \ 54 (void *) &((struct usnic_uiom_chunk *) 0)->page_list[0])) 55 56 static void usnic_uiom_reg_account(struct work_struct *work) 57 { 58 struct usnic_uiom_reg *umem = container_of(work, 59 struct usnic_uiom_reg, work); 60 61 down_write(&umem->mm->mmap_sem); 62 umem->mm->locked_vm -= umem->diff; 63 up_write(&umem->mm->mmap_sem); 64 mmput(umem->mm); 65 kfree(umem); 66 } 67 68 static int usnic_uiom_dma_fault(struct iommu_domain *domain, 69 struct device *dev, 70 unsigned long iova, int flags, 71 void *token) 72 { 73 usnic_err("Device %s iommu fault domain 0x%pK va 0x%lx flags 0x%x\n", 74 dev_name(dev), 75 domain, iova, flags); 76 return -ENOSYS; 77 } 78 79 static void usnic_uiom_put_pages(struct list_head *chunk_list, int dirty) 80 { 81 struct usnic_uiom_chunk *chunk, *tmp; 82 struct page *page; 83 struct scatterlist *sg; 84 int i; 85 dma_addr_t pa; 86 87 list_for_each_entry_safe(chunk, tmp, chunk_list, list) { 88 for_each_sg(chunk->page_list, sg, chunk->nents, i) { 89 page = sg_page(sg); 90 pa = sg_phys(sg); 91 if (dirty) 92 set_page_dirty_lock(page); 93 put_page(page); 94 usnic_dbg("pa: %pa\n", &pa); 95 } 96 kfree(chunk); 97 } 98 } 99 100 static int usnic_uiom_get_pages(unsigned long addr, size_t size, int writable, 101 int dmasync, struct list_head *chunk_list) 102 { 103 struct page **page_list; 104 struct scatterlist *sg; 105 struct usnic_uiom_chunk *chunk; 106 unsigned long locked; 107 unsigned long lock_limit; 108 unsigned long cur_base; 109 unsigned long npages; 110 int ret; 111 int off; 112 int i; 113 int flags; 114 dma_addr_t pa; 115 DEFINE_DMA_ATTRS(attrs); 116 117 if (dmasync) 118 dma_set_attr(DMA_ATTR_WRITE_BARRIER, &attrs); 119 120 if (!can_do_mlock()) 121 return -EPERM; 122 123 INIT_LIST_HEAD(chunk_list); 124 125 page_list = (struct page **) __get_free_page(GFP_KERNEL); 126 if (!page_list) 127 return -ENOMEM; 128 129 npages = PAGE_ALIGN(size + (addr & ~PAGE_MASK)) >> PAGE_SHIFT; 130 131 down_write(¤t->mm->mmap_sem); 132 133 locked = npages + current->mm->locked_vm; 134 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; 135 136 if ((locked > lock_limit) && !capable(CAP_IPC_LOCK)) { 137 ret = -ENOMEM; 138 goto out; 139 } 140 141 flags = IOMMU_READ | IOMMU_CACHE; 142 flags |= (writable) ? IOMMU_WRITE : 0; 143 cur_base = addr & PAGE_MASK; 144 ret = 0; 145 146 while (npages) { 147 ret = get_user_pages(current, current->mm, cur_base, 148 min_t(unsigned long, npages, 149 PAGE_SIZE / sizeof(struct page *)), 150 1, !writable, page_list, NULL); 151 152 if (ret < 0) 153 goto out; 154 155 npages -= ret; 156 off = 0; 157 158 while (ret) { 159 chunk = kmalloc(sizeof(*chunk) + 160 sizeof(struct scatterlist) * 161 min_t(int, ret, USNIC_UIOM_PAGE_CHUNK), 162 GFP_KERNEL); 163 if (!chunk) { 164 ret = -ENOMEM; 165 goto out; 166 } 167 168 chunk->nents = min_t(int, ret, USNIC_UIOM_PAGE_CHUNK); 169 sg_init_table(chunk->page_list, chunk->nents); 170 for_each_sg(chunk->page_list, sg, chunk->nents, i) { 171 sg_set_page(sg, page_list[i + off], 172 PAGE_SIZE, 0); 173 pa = sg_phys(sg); 174 usnic_dbg("va: 0x%lx pa: %pa\n", 175 cur_base + i*PAGE_SIZE, &pa); 176 } 177 cur_base += chunk->nents * PAGE_SIZE; 178 ret -= chunk->nents; 179 off += chunk->nents; 180 list_add_tail(&chunk->list, chunk_list); 181 } 182 183 ret = 0; 184 } 185 186 out: 187 if (ret < 0) 188 usnic_uiom_put_pages(chunk_list, 0); 189 else 190 current->mm->locked_vm = locked; 191 192 up_write(¤t->mm->mmap_sem); 193 free_page((unsigned long) page_list); 194 return ret; 195 } 196 197 static void usnic_uiom_unmap_sorted_intervals(struct list_head *intervals, 198 struct usnic_uiom_pd *pd) 199 { 200 struct usnic_uiom_interval_node *interval, *tmp; 201 long unsigned va, size; 202 203 list_for_each_entry_safe(interval, tmp, intervals, link) { 204 va = interval->start << PAGE_SHIFT; 205 size = ((interval->last - interval->start) + 1) << PAGE_SHIFT; 206 while (size > 0) { 207 /* Workaround for RH 970401 */ 208 usnic_dbg("va 0x%lx size 0x%lx", va, PAGE_SIZE); 209 iommu_unmap(pd->domain, va, PAGE_SIZE); 210 va += PAGE_SIZE; 211 size -= PAGE_SIZE; 212 } 213 } 214 } 215 216 static void __usnic_uiom_reg_release(struct usnic_uiom_pd *pd, 217 struct usnic_uiom_reg *uiomr, 218 int dirty) 219 { 220 int npages; 221 unsigned long vpn_start, vpn_last; 222 struct usnic_uiom_interval_node *interval, *tmp; 223 int writable = 0; 224 LIST_HEAD(rm_intervals); 225 226 npages = PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT; 227 vpn_start = (uiomr->va & PAGE_MASK) >> PAGE_SHIFT; 228 vpn_last = vpn_start + npages - 1; 229 230 spin_lock(&pd->lock); 231 usnic_uiom_remove_interval(&pd->rb_root, vpn_start, 232 vpn_last, &rm_intervals); 233 usnic_uiom_unmap_sorted_intervals(&rm_intervals, pd); 234 235 list_for_each_entry_safe(interval, tmp, &rm_intervals, link) { 236 if (interval->flags & IOMMU_WRITE) 237 writable = 1; 238 list_del(&interval->link); 239 kfree(interval); 240 } 241 242 usnic_uiom_put_pages(&uiomr->chunk_list, dirty & writable); 243 spin_unlock(&pd->lock); 244 } 245 246 static int usnic_uiom_map_sorted_intervals(struct list_head *intervals, 247 struct usnic_uiom_reg *uiomr) 248 { 249 int i, err; 250 size_t size; 251 struct usnic_uiom_chunk *chunk; 252 struct usnic_uiom_interval_node *interval_node; 253 dma_addr_t pa; 254 dma_addr_t pa_start = 0; 255 dma_addr_t pa_end = 0; 256 long int va_start = -EINVAL; 257 struct usnic_uiom_pd *pd = uiomr->pd; 258 long int va = uiomr->va & PAGE_MASK; 259 int flags = IOMMU_READ | IOMMU_CACHE; 260 261 flags |= (uiomr->writable) ? IOMMU_WRITE : 0; 262 chunk = list_first_entry(&uiomr->chunk_list, struct usnic_uiom_chunk, 263 list); 264 list_for_each_entry(interval_node, intervals, link) { 265 iter_chunk: 266 for (i = 0; i < chunk->nents; i++, va += PAGE_SIZE) { 267 pa = sg_phys(&chunk->page_list[i]); 268 if ((va >> PAGE_SHIFT) < interval_node->start) 269 continue; 270 271 if ((va >> PAGE_SHIFT) == interval_node->start) { 272 /* First page of the interval */ 273 va_start = va; 274 pa_start = pa; 275 pa_end = pa; 276 } 277 278 WARN_ON(va_start == -EINVAL); 279 280 if ((pa_end + PAGE_SIZE != pa) && 281 (pa != pa_start)) { 282 /* PAs are not contiguous */ 283 size = pa_end - pa_start + PAGE_SIZE; 284 usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x", 285 va_start, &pa_start, size, flags); 286 err = iommu_map(pd->domain, va_start, pa_start, 287 size, flags); 288 if (err) { 289 usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n", 290 va_start, &pa_start, size, err); 291 goto err_out; 292 } 293 va_start = va; 294 pa_start = pa; 295 pa_end = pa; 296 } 297 298 if ((va >> PAGE_SHIFT) == interval_node->last) { 299 /* Last page of the interval */ 300 size = pa - pa_start + PAGE_SIZE; 301 usnic_dbg("va 0x%lx pa %pa size 0x%zx flags 0x%x\n", 302 va_start, &pa_start, size, flags); 303 err = iommu_map(pd->domain, va_start, pa_start, 304 size, flags); 305 if (err) { 306 usnic_err("Failed to map va 0x%lx pa %pa size 0x%zx with err %d\n", 307 va_start, &pa_start, size, err); 308 goto err_out; 309 } 310 break; 311 } 312 313 if (pa != pa_start) 314 pa_end += PAGE_SIZE; 315 } 316 317 if (i == chunk->nents) { 318 /* 319 * Hit last entry of the chunk, 320 * hence advance to next chunk 321 */ 322 chunk = list_first_entry(&chunk->list, 323 struct usnic_uiom_chunk, 324 list); 325 goto iter_chunk; 326 } 327 } 328 329 return 0; 330 331 err_out: 332 usnic_uiom_unmap_sorted_intervals(intervals, pd); 333 return err; 334 } 335 336 struct usnic_uiom_reg *usnic_uiom_reg_get(struct usnic_uiom_pd *pd, 337 unsigned long addr, size_t size, 338 int writable, int dmasync) 339 { 340 struct usnic_uiom_reg *uiomr; 341 unsigned long va_base, vpn_start, vpn_last; 342 unsigned long npages; 343 int offset, err; 344 LIST_HEAD(sorted_diff_intervals); 345 346 /* 347 * Intel IOMMU map throws an error if a translation entry is 348 * changed from read to write. This module may not unmap 349 * and then remap the entry after fixing the permission 350 * b/c this open up a small windows where hw DMA may page fault 351 * Hence, make all entries to be writable. 352 */ 353 writable = 1; 354 355 va_base = addr & PAGE_MASK; 356 offset = addr & ~PAGE_MASK; 357 npages = PAGE_ALIGN(size + offset) >> PAGE_SHIFT; 358 vpn_start = (addr & PAGE_MASK) >> PAGE_SHIFT; 359 vpn_last = vpn_start + npages - 1; 360 361 uiomr = kmalloc(sizeof(*uiomr), GFP_KERNEL); 362 if (!uiomr) 363 return ERR_PTR(-ENOMEM); 364 365 uiomr->va = va_base; 366 uiomr->offset = offset; 367 uiomr->length = size; 368 uiomr->writable = writable; 369 uiomr->pd = pd; 370 371 err = usnic_uiom_get_pages(addr, size, writable, dmasync, 372 &uiomr->chunk_list); 373 if (err) { 374 usnic_err("Failed get_pages vpn [0x%lx,0x%lx] err %d\n", 375 vpn_start, vpn_last, err); 376 goto out_free_uiomr; 377 } 378 379 spin_lock(&pd->lock); 380 err = usnic_uiom_get_intervals_diff(vpn_start, vpn_last, 381 (writable) ? IOMMU_WRITE : 0, 382 IOMMU_WRITE, 383 &pd->rb_root, 384 &sorted_diff_intervals); 385 if (err) { 386 usnic_err("Failed disjoint interval vpn [0x%lx,0x%lx] err %d\n", 387 vpn_start, vpn_last, err); 388 goto out_put_pages; 389 } 390 391 err = usnic_uiom_map_sorted_intervals(&sorted_diff_intervals, uiomr); 392 if (err) { 393 usnic_err("Failed map interval vpn [0x%lx,0x%lx] err %d\n", 394 vpn_start, vpn_last, err); 395 goto out_put_intervals; 396 397 } 398 399 err = usnic_uiom_insert_interval(&pd->rb_root, vpn_start, vpn_last, 400 (writable) ? IOMMU_WRITE : 0); 401 if (err) { 402 usnic_err("Failed insert interval vpn [0x%lx,0x%lx] err %d\n", 403 vpn_start, vpn_last, err); 404 goto out_unmap_intervals; 405 } 406 407 usnic_uiom_put_interval_set(&sorted_diff_intervals); 408 spin_unlock(&pd->lock); 409 410 return uiomr; 411 412 out_unmap_intervals: 413 usnic_uiom_unmap_sorted_intervals(&sorted_diff_intervals, pd); 414 out_put_intervals: 415 usnic_uiom_put_interval_set(&sorted_diff_intervals); 416 out_put_pages: 417 usnic_uiom_put_pages(&uiomr->chunk_list, 0); 418 spin_unlock(&pd->lock); 419 out_free_uiomr: 420 kfree(uiomr); 421 return ERR_PTR(err); 422 } 423 424 void usnic_uiom_reg_release(struct usnic_uiom_reg *uiomr, int closing) 425 { 426 struct mm_struct *mm; 427 unsigned long diff; 428 429 __usnic_uiom_reg_release(uiomr->pd, uiomr, 1); 430 431 mm = get_task_mm(current); 432 if (!mm) { 433 kfree(uiomr); 434 return; 435 } 436 437 diff = PAGE_ALIGN(uiomr->length + uiomr->offset) >> PAGE_SHIFT; 438 439 /* 440 * We may be called with the mm's mmap_sem already held. This 441 * can happen when a userspace munmap() is the call that drops 442 * the last reference to our file and calls our release 443 * method. If there are memory regions to destroy, we'll end 444 * up here and not be able to take the mmap_sem. In that case 445 * we defer the vm_locked accounting to the system workqueue. 446 */ 447 if (closing) { 448 if (!down_write_trylock(&mm->mmap_sem)) { 449 INIT_WORK(&uiomr->work, usnic_uiom_reg_account); 450 uiomr->mm = mm; 451 uiomr->diff = diff; 452 453 queue_work(usnic_uiom_wq, &uiomr->work); 454 return; 455 } 456 } else 457 down_write(&mm->mmap_sem); 458 459 current->mm->locked_vm -= diff; 460 up_write(&mm->mmap_sem); 461 mmput(mm); 462 kfree(uiomr); 463 } 464 465 struct usnic_uiom_pd *usnic_uiom_alloc_pd(void) 466 { 467 struct usnic_uiom_pd *pd; 468 void *domain; 469 470 pd = kzalloc(sizeof(*pd), GFP_KERNEL); 471 if (!pd) 472 return ERR_PTR(-ENOMEM); 473 474 pd->domain = domain = iommu_domain_alloc(&pci_bus_type); 475 if (!domain) { 476 usnic_err("Failed to allocate IOMMU domain"); 477 kfree(pd); 478 return ERR_PTR(-ENOMEM); 479 } 480 481 iommu_set_fault_handler(pd->domain, usnic_uiom_dma_fault, NULL); 482 483 spin_lock_init(&pd->lock); 484 INIT_LIST_HEAD(&pd->devs); 485 486 return pd; 487 } 488 489 void usnic_uiom_dealloc_pd(struct usnic_uiom_pd *pd) 490 { 491 iommu_domain_free(pd->domain); 492 kfree(pd); 493 } 494 495 int usnic_uiom_attach_dev_to_pd(struct usnic_uiom_pd *pd, struct device *dev) 496 { 497 struct usnic_uiom_dev *uiom_dev; 498 int err; 499 500 uiom_dev = kzalloc(sizeof(*uiom_dev), GFP_ATOMIC); 501 if (!uiom_dev) 502 return -ENOMEM; 503 uiom_dev->dev = dev; 504 505 err = iommu_attach_device(pd->domain, dev); 506 if (err) 507 goto out_free_dev; 508 509 if (!iommu_capable(dev->bus, IOMMU_CAP_CACHE_COHERENCY)) { 510 usnic_err("IOMMU of %s does not support cache coherency\n", 511 dev_name(dev)); 512 err = -EINVAL; 513 goto out_detach_device; 514 } 515 516 spin_lock(&pd->lock); 517 list_add_tail(&uiom_dev->link, &pd->devs); 518 pd->dev_cnt++; 519 spin_unlock(&pd->lock); 520 521 return 0; 522 523 out_detach_device: 524 iommu_detach_device(pd->domain, dev); 525 out_free_dev: 526 kfree(uiom_dev); 527 return err; 528 } 529 530 void usnic_uiom_detach_dev_from_pd(struct usnic_uiom_pd *pd, struct device *dev) 531 { 532 struct usnic_uiom_dev *uiom_dev; 533 int found = 0; 534 535 spin_lock(&pd->lock); 536 list_for_each_entry(uiom_dev, &pd->devs, link) { 537 if (uiom_dev->dev == dev) { 538 found = 1; 539 break; 540 } 541 } 542 543 if (!found) { 544 usnic_err("Unable to free dev %s - not found\n", 545 dev_name(dev)); 546 spin_unlock(&pd->lock); 547 return; 548 } 549 550 list_del(&uiom_dev->link); 551 pd->dev_cnt--; 552 spin_unlock(&pd->lock); 553 554 return iommu_detach_device(pd->domain, dev); 555 } 556 557 struct device **usnic_uiom_get_dev_list(struct usnic_uiom_pd *pd) 558 { 559 struct usnic_uiom_dev *uiom_dev; 560 struct device **devs; 561 int i = 0; 562 563 spin_lock(&pd->lock); 564 devs = kcalloc(pd->dev_cnt + 1, sizeof(*devs), GFP_ATOMIC); 565 if (!devs) { 566 devs = ERR_PTR(-ENOMEM); 567 goto out; 568 } 569 570 list_for_each_entry(uiom_dev, &pd->devs, link) { 571 devs[i++] = uiom_dev->dev; 572 } 573 out: 574 spin_unlock(&pd->lock); 575 return devs; 576 } 577 578 void usnic_uiom_free_dev_list(struct device **devs) 579 { 580 kfree(devs); 581 } 582 583 int usnic_uiom_init(char *drv_name) 584 { 585 if (!iommu_present(&pci_bus_type)) { 586 usnic_err("IOMMU required but not present or enabled. USNIC QPs will not function w/o enabling IOMMU\n"); 587 return -EPERM; 588 } 589 590 usnic_uiom_wq = create_workqueue(drv_name); 591 if (!usnic_uiom_wq) { 592 usnic_err("Unable to alloc wq for drv %s\n", drv_name); 593 return -ENOMEM; 594 } 595 596 return 0; 597 } 598 599 void usnic_uiom_fini(void) 600 { 601 flush_workqueue(usnic_uiom_wq); 602 destroy_workqueue(usnic_uiom_wq); 603 } 604