1 /*- 2 * Copyright (c) 2015-2016 Mellanox Technologies, Ltd. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/bus.h> 33 #include <sys/malloc.h> 34 #include <sys/kernel.h> 35 #include <sys/sysctl.h> 36 #include <sys/lock.h> 37 #include <sys/mutex.h> 38 #include <sys/fcntl.h> 39 #include <sys/file.h> 40 #include <sys/filio.h> 41 #include <sys/pctrie.h> 42 #include <sys/rwlock.h> 43 44 #include <vm/vm.h> 45 #include <vm/pmap.h> 46 47 #include <machine/stdarg.h> 48 49 #include <linux/kobject.h> 50 #include <linux/device.h> 51 #include <linux/slab.h> 52 #include <linux/module.h> 53 #include <linux/cdev.h> 54 #include <linux/file.h> 55 #include <linux/sysfs.h> 56 #include <linux/mm.h> 57 #include <linux/io.h> 58 #include <linux/vmalloc.h> 59 #include <linux/pci.h> 60 #include <linux/compat.h> 61 62 static device_probe_t linux_pci_probe; 63 static device_attach_t linux_pci_attach; 64 static device_detach_t linux_pci_detach; 65 static device_suspend_t linux_pci_suspend; 66 static device_resume_t linux_pci_resume; 67 static device_shutdown_t linux_pci_shutdown; 68 static pci_iov_init_t linux_pci_iov_init; 69 static pci_iov_uninit_t linux_pci_iov_uninit; 70 static pci_iov_add_vf_t linux_pci_iov_add_vf; 71 72 static device_method_t pci_methods[] = { 73 DEVMETHOD(device_probe, linux_pci_probe), 74 DEVMETHOD(device_attach, linux_pci_attach), 75 DEVMETHOD(device_detach, linux_pci_detach), 76 DEVMETHOD(device_suspend, linux_pci_suspend), 77 DEVMETHOD(device_resume, linux_pci_resume), 78 DEVMETHOD(device_shutdown, linux_pci_shutdown), 79 DEVMETHOD(pci_iov_init, linux_pci_iov_init), 80 DEVMETHOD(pci_iov_uninit, linux_pci_iov_uninit), 81 DEVMETHOD(pci_iov_add_vf, linux_pci_iov_add_vf), 82 DEVMETHOD_END 83 }; 84 85 struct linux_dma_priv { 86 uint64_t dma_mask; 87 struct mtx lock; 88 bus_dma_tag_t dmat; 89 struct pctrie ptree; 90 }; 91 #define DMA_PRIV_LOCK(priv) mtx_lock(&(priv)->lock) 92 #define DMA_PRIV_UNLOCK(priv) mtx_unlock(&(priv)->lock) 93 94 static int 95 linux_pdev_dma_init(struct pci_dev *pdev) 96 { 97 struct linux_dma_priv *priv; 98 int error; 99 100 priv = malloc(sizeof(*priv), M_DEVBUF, M_WAITOK | M_ZERO); 101 pdev->dev.dma_priv = priv; 102 103 mtx_init(&priv->lock, "lkpi-priv-dma", NULL, MTX_DEF); 104 105 pctrie_init(&priv->ptree); 106 107 /* create a default DMA tag */ 108 error = linux_dma_tag_init(&pdev->dev, DMA_BIT_MASK(64)); 109 if (error) { 110 mtx_destroy(&priv->lock); 111 free(priv, M_DEVBUF); 112 pdev->dev.dma_priv = NULL; 113 } 114 return (error); 115 } 116 117 static int 118 linux_pdev_dma_uninit(struct pci_dev *pdev) 119 { 120 struct linux_dma_priv *priv; 121 122 priv = pdev->dev.dma_priv; 123 if (priv->dmat) 124 bus_dma_tag_destroy(priv->dmat); 125 mtx_destroy(&priv->lock); 126 free(priv, M_DEVBUF); 127 pdev->dev.dma_priv = NULL; 128 return (0); 129 } 130 131 int 132 linux_dma_tag_init(struct device *dev, u64 dma_mask) 133 { 134 struct linux_dma_priv *priv; 135 int error; 136 137 priv = dev->dma_priv; 138 139 if (priv->dmat) { 140 if (priv->dma_mask == dma_mask) 141 return (0); 142 143 bus_dma_tag_destroy(priv->dmat); 144 } 145 146 priv->dma_mask = dma_mask; 147 148 error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 149 1, 0, /* alignment, boundary */ 150 dma_mask, /* lowaddr */ 151 BUS_SPACE_MAXADDR, /* highaddr */ 152 NULL, NULL, /* filtfunc, filtfuncarg */ 153 BUS_SPACE_MAXSIZE, /* maxsize */ 154 1, /* nsegments */ 155 BUS_SPACE_MAXSIZE, /* maxsegsz */ 156 0, /* flags */ 157 NULL, NULL, /* lockfunc, lockfuncarg */ 158 &priv->dmat); 159 return (-error); 160 } 161 162 static struct pci_driver * 163 linux_pci_find(device_t dev, const struct pci_device_id **idp) 164 { 165 const struct pci_device_id *id; 166 struct pci_driver *pdrv; 167 uint16_t vendor; 168 uint16_t device; 169 uint16_t subvendor; 170 uint16_t subdevice; 171 172 vendor = pci_get_vendor(dev); 173 device = pci_get_device(dev); 174 subvendor = pci_get_subvendor(dev); 175 subdevice = pci_get_subdevice(dev); 176 177 spin_lock(&pci_lock); 178 list_for_each_entry(pdrv, &pci_drivers, links) { 179 for (id = pdrv->id_table; id->vendor != 0; id++) { 180 if (vendor == id->vendor && 181 (PCI_ANY_ID == id->device || device == id->device) && 182 (PCI_ANY_ID == id->subvendor || subvendor == id->subvendor) && 183 (PCI_ANY_ID == id->subdevice || subdevice == id->subdevice)) { 184 *idp = id; 185 spin_unlock(&pci_lock); 186 return (pdrv); 187 } 188 } 189 } 190 spin_unlock(&pci_lock); 191 return (NULL); 192 } 193 194 static int 195 linux_pci_probe(device_t dev) 196 { 197 const struct pci_device_id *id; 198 struct pci_driver *pdrv; 199 200 if ((pdrv = linux_pci_find(dev, &id)) == NULL) 201 return (ENXIO); 202 if (device_get_driver(dev) != &pdrv->bsddriver) 203 return (ENXIO); 204 device_set_desc(dev, pdrv->name); 205 return (0); 206 } 207 208 static int 209 linux_pci_attach(device_t dev) 210 { 211 struct resource_list_entry *rle; 212 struct pci_bus *pbus; 213 struct pci_dev *pdev; 214 struct pci_devinfo *dinfo; 215 struct pci_driver *pdrv; 216 const struct pci_device_id *id; 217 device_t parent; 218 int error; 219 220 linux_set_current(curthread); 221 222 pdrv = linux_pci_find(dev, &id); 223 pdev = device_get_softc(dev); 224 225 parent = device_get_parent(dev); 226 if (pdrv->isdrm) { 227 dinfo = device_get_ivars(parent); 228 device_set_ivars(dev, dinfo); 229 } else { 230 dinfo = device_get_ivars(dev); 231 } 232 233 pdev->dev.parent = &linux_root_device; 234 pdev->dev.bsddev = dev; 235 INIT_LIST_HEAD(&pdev->dev.irqents); 236 pdev->devfn = PCI_DEVFN(pci_get_slot(dev), pci_get_function(dev)); 237 pdev->device = dinfo->cfg.device; 238 pdev->vendor = dinfo->cfg.vendor; 239 pdev->subsystem_vendor = dinfo->cfg.subvendor; 240 pdev->subsystem_device = dinfo->cfg.subdevice; 241 pdev->class = pci_get_class(dev); 242 pdev->revision = pci_get_revid(dev); 243 pdev->pdrv = pdrv; 244 kobject_init(&pdev->dev.kobj, &linux_dev_ktype); 245 kobject_set_name(&pdev->dev.kobj, device_get_nameunit(dev)); 246 kobject_add(&pdev->dev.kobj, &linux_root_device.kobj, 247 kobject_name(&pdev->dev.kobj)); 248 rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 0); 249 if (rle != NULL) 250 pdev->dev.irq = rle->start; 251 else 252 pdev->dev.irq = LINUX_IRQ_INVALID; 253 pdev->irq = pdev->dev.irq; 254 error = linux_pdev_dma_init(pdev); 255 if (error) 256 goto out_dma_init; 257 258 pbus = malloc(sizeof(*pbus), M_DEVBUF, M_WAITOK | M_ZERO); 259 pbus->self = pdev; 260 pbus->number = pci_get_bus(dev); 261 pbus->domain = pci_get_domain(dev); 262 pdev->bus = pbus; 263 264 spin_lock(&pci_lock); 265 list_add(&pdev->links, &pci_devices); 266 spin_unlock(&pci_lock); 267 268 error = pdrv->probe(pdev, id); 269 if (error) 270 goto out_probe; 271 return (0); 272 273 out_probe: 274 free(pdev->bus, M_DEVBUF); 275 linux_pdev_dma_uninit(pdev); 276 out_dma_init: 277 spin_lock(&pci_lock); 278 list_del(&pdev->links); 279 spin_unlock(&pci_lock); 280 put_device(&pdev->dev); 281 return (-error); 282 } 283 284 static int 285 linux_pci_detach(device_t dev) 286 { 287 struct pci_dev *pdev; 288 289 linux_set_current(curthread); 290 pdev = device_get_softc(dev); 291 292 pdev->pdrv->remove(pdev); 293 294 free(pdev->bus, M_DEVBUF); 295 linux_pdev_dma_uninit(pdev); 296 297 spin_lock(&pci_lock); 298 list_del(&pdev->links); 299 spin_unlock(&pci_lock); 300 device_set_desc(dev, NULL); 301 put_device(&pdev->dev); 302 303 return (0); 304 } 305 306 static int 307 linux_pci_suspend(device_t dev) 308 { 309 const struct dev_pm_ops *pmops; 310 struct pm_message pm = { }; 311 struct pci_dev *pdev; 312 int error; 313 314 error = 0; 315 linux_set_current(curthread); 316 pdev = device_get_softc(dev); 317 pmops = pdev->pdrv->driver.pm; 318 319 if (pdev->pdrv->suspend != NULL) 320 error = -pdev->pdrv->suspend(pdev, pm); 321 else if (pmops != NULL && pmops->suspend != NULL) { 322 error = -pmops->suspend(&pdev->dev); 323 if (error == 0 && pmops->suspend_late != NULL) 324 error = -pmops->suspend_late(&pdev->dev); 325 } 326 return (error); 327 } 328 329 static int 330 linux_pci_resume(device_t dev) 331 { 332 const struct dev_pm_ops *pmops; 333 struct pci_dev *pdev; 334 int error; 335 336 error = 0; 337 linux_set_current(curthread); 338 pdev = device_get_softc(dev); 339 pmops = pdev->pdrv->driver.pm; 340 341 if (pdev->pdrv->resume != NULL) 342 error = -pdev->pdrv->resume(pdev); 343 else if (pmops != NULL && pmops->resume != NULL) { 344 if (pmops->resume_early != NULL) 345 error = -pmops->resume_early(&pdev->dev); 346 if (error == 0 && pmops->resume != NULL) 347 error = -pmops->resume(&pdev->dev); 348 } 349 return (error); 350 } 351 352 static int 353 linux_pci_shutdown(device_t dev) 354 { 355 struct pci_dev *pdev; 356 357 linux_set_current(curthread); 358 pdev = device_get_softc(dev); 359 if (pdev->pdrv->shutdown != NULL) 360 pdev->pdrv->shutdown(pdev); 361 return (0); 362 } 363 364 static int 365 linux_pci_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *pf_config) 366 { 367 struct pci_dev *pdev; 368 int error; 369 370 linux_set_current(curthread); 371 pdev = device_get_softc(dev); 372 if (pdev->pdrv->bsd_iov_init != NULL) 373 error = pdev->pdrv->bsd_iov_init(dev, num_vfs, pf_config); 374 else 375 error = EINVAL; 376 return (error); 377 } 378 379 static void 380 linux_pci_iov_uninit(device_t dev) 381 { 382 struct pci_dev *pdev; 383 384 linux_set_current(curthread); 385 pdev = device_get_softc(dev); 386 if (pdev->pdrv->bsd_iov_uninit != NULL) 387 pdev->pdrv->bsd_iov_uninit(dev); 388 } 389 390 static int 391 linux_pci_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *vf_config) 392 { 393 struct pci_dev *pdev; 394 int error; 395 396 linux_set_current(curthread); 397 pdev = device_get_softc(dev); 398 if (pdev->pdrv->bsd_iov_add_vf != NULL) 399 error = pdev->pdrv->bsd_iov_add_vf(dev, vfnum, vf_config); 400 else 401 error = EINVAL; 402 return (error); 403 } 404 405 static int 406 _linux_pci_register_driver(struct pci_driver *pdrv, devclass_t dc) 407 { 408 int error; 409 410 linux_set_current(curthread); 411 spin_lock(&pci_lock); 412 list_add(&pdrv->links, &pci_drivers); 413 spin_unlock(&pci_lock); 414 pdrv->bsddriver.name = pdrv->name; 415 pdrv->bsddriver.methods = pci_methods; 416 pdrv->bsddriver.size = sizeof(struct pci_dev); 417 418 mtx_lock(&Giant); 419 error = devclass_add_driver(dc, &pdrv->bsddriver, 420 BUS_PASS_DEFAULT, &pdrv->bsdclass); 421 mtx_unlock(&Giant); 422 return (-error); 423 } 424 425 int 426 linux_pci_register_driver(struct pci_driver *pdrv) 427 { 428 devclass_t dc; 429 430 dc = devclass_find("pci"); 431 if (dc == NULL) 432 return (-ENXIO); 433 pdrv->isdrm = false; 434 return (_linux_pci_register_driver(pdrv, dc)); 435 } 436 437 unsigned long 438 pci_resource_start(struct pci_dev *pdev, int bar) 439 { 440 struct resource_list_entry *rle; 441 rman_res_t newstart; 442 device_t dev; 443 444 if ((rle = linux_pci_get_bar(pdev, bar)) == NULL) 445 return (0); 446 dev = pci_find_dbsf(pdev->bus->domain, pdev->bus->number, 447 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); 448 MPASS(dev != NULL); 449 if (BUS_TRANSLATE_RESOURCE(dev, rle->type, rle->start, &newstart)) { 450 device_printf(pdev->dev.bsddev, "translate of %#jx failed\n", 451 (uintmax_t)rle->start); 452 return (0); 453 } 454 return (newstart); 455 } 456 457 unsigned long 458 pci_resource_len(struct pci_dev *pdev, int bar) 459 { 460 struct resource_list_entry *rle; 461 462 if ((rle = linux_pci_get_bar(pdev, bar)) == NULL) 463 return (0); 464 return (rle->count); 465 } 466 467 int 468 linux_pci_register_drm_driver(struct pci_driver *pdrv) 469 { 470 devclass_t dc; 471 472 dc = devclass_create("vgapci"); 473 if (dc == NULL) 474 return (-ENXIO); 475 pdrv->isdrm = true; 476 pdrv->name = "drmn"; 477 return (_linux_pci_register_driver(pdrv, dc)); 478 } 479 480 void 481 linux_pci_unregister_driver(struct pci_driver *pdrv) 482 { 483 devclass_t bus; 484 485 bus = devclass_find("pci"); 486 487 spin_lock(&pci_lock); 488 list_del(&pdrv->links); 489 spin_unlock(&pci_lock); 490 mtx_lock(&Giant); 491 if (bus != NULL) 492 devclass_delete_driver(bus, &pdrv->bsddriver); 493 mtx_unlock(&Giant); 494 } 495 496 void 497 linux_pci_unregister_drm_driver(struct pci_driver *pdrv) 498 { 499 devclass_t bus; 500 501 bus = devclass_find("vgapci"); 502 503 spin_lock(&pci_lock); 504 list_del(&pdrv->links); 505 spin_unlock(&pci_lock); 506 mtx_lock(&Giant); 507 if (bus != NULL) 508 devclass_delete_driver(bus, &pdrv->bsddriver); 509 mtx_unlock(&Giant); 510 } 511 512 CTASSERT(sizeof(dma_addr_t) <= sizeof(uint64_t)); 513 514 struct linux_dma_obj { 515 void *vaddr; 516 uint64_t dma_addr; 517 bus_dmamap_t dmamap; 518 }; 519 520 static uma_zone_t linux_dma_trie_zone; 521 static uma_zone_t linux_dma_obj_zone; 522 523 static void 524 linux_dma_init(void *arg) 525 { 526 527 linux_dma_trie_zone = uma_zcreate("linux_dma_pctrie", 528 pctrie_node_size(), NULL, NULL, pctrie_zone_init, NULL, 529 UMA_ALIGN_PTR, 0); 530 linux_dma_obj_zone = uma_zcreate("linux_dma_object", 531 sizeof(struct linux_dma_obj), NULL, NULL, NULL, NULL, 532 UMA_ALIGN_PTR, 0); 533 534 } 535 SYSINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_init, NULL); 536 537 static void 538 linux_dma_uninit(void *arg) 539 { 540 541 uma_zdestroy(linux_dma_obj_zone); 542 uma_zdestroy(linux_dma_trie_zone); 543 } 544 SYSUNINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_uninit, NULL); 545 546 static void * 547 linux_dma_trie_alloc(struct pctrie *ptree) 548 { 549 550 return (uma_zalloc(linux_dma_trie_zone, M_NOWAIT)); 551 } 552 553 static void 554 linux_dma_trie_free(struct pctrie *ptree, void *node) 555 { 556 557 uma_zfree(linux_dma_trie_zone, node); 558 } 559 560 561 PCTRIE_DEFINE(LINUX_DMA, linux_dma_obj, dma_addr, linux_dma_trie_alloc, 562 linux_dma_trie_free); 563 564 void * 565 linux_dma_alloc_coherent(struct device *dev, size_t size, 566 dma_addr_t *dma_handle, gfp_t flag) 567 { 568 struct linux_dma_priv *priv; 569 vm_paddr_t high; 570 size_t align; 571 void *mem; 572 573 if (dev == NULL || dev->dma_priv == NULL) { 574 *dma_handle = 0; 575 return (NULL); 576 } 577 priv = dev->dma_priv; 578 if (priv->dma_mask) 579 high = priv->dma_mask; 580 else if (flag & GFP_DMA32) 581 high = BUS_SPACE_MAXADDR_32BIT; 582 else 583 high = BUS_SPACE_MAXADDR; 584 align = PAGE_SIZE << get_order(size); 585 mem = (void *)kmem_alloc_contig(size, flag, 0, high, align, 0, 586 VM_MEMATTR_DEFAULT); 587 if (mem != NULL) { 588 *dma_handle = linux_dma_map_phys(dev, vtophys(mem), size); 589 if (*dma_handle == 0) { 590 kmem_free((vm_offset_t)mem, size); 591 mem = NULL; 592 } 593 } else { 594 *dma_handle = 0; 595 } 596 return (mem); 597 } 598 599 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 600 dma_addr_t 601 linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len) 602 { 603 struct linux_dma_priv *priv; 604 struct linux_dma_obj *obj; 605 int error, nseg; 606 bus_dma_segment_t seg; 607 608 priv = dev->dma_priv; 609 610 /* 611 * If the resultant mapping will be entirely 1:1 with the 612 * physical address, short-circuit the remainder of the 613 * bus_dma API. This avoids tracking collisions in the pctrie 614 * with the additional benefit of reducing overhead. 615 */ 616 if (bus_dma_id_mapped(priv->dmat, phys, len)) 617 return (phys); 618 619 obj = uma_zalloc(linux_dma_obj_zone, M_NOWAIT); 620 if (obj == NULL) { 621 return (0); 622 } 623 624 DMA_PRIV_LOCK(priv); 625 if (bus_dmamap_create(priv->dmat, 0, &obj->dmamap) != 0) { 626 DMA_PRIV_UNLOCK(priv); 627 uma_zfree(linux_dma_obj_zone, obj); 628 return (0); 629 } 630 631 nseg = -1; 632 if (_bus_dmamap_load_phys(priv->dmat, obj->dmamap, phys, len, 633 BUS_DMA_NOWAIT, &seg, &nseg) != 0) { 634 bus_dmamap_destroy(priv->dmat, obj->dmamap); 635 DMA_PRIV_UNLOCK(priv); 636 uma_zfree(linux_dma_obj_zone, obj); 637 return (0); 638 } 639 640 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); 641 obj->dma_addr = seg.ds_addr; 642 643 error = LINUX_DMA_PCTRIE_INSERT(&priv->ptree, obj); 644 if (error != 0) { 645 bus_dmamap_unload(priv->dmat, obj->dmamap); 646 bus_dmamap_destroy(priv->dmat, obj->dmamap); 647 DMA_PRIV_UNLOCK(priv); 648 uma_zfree(linux_dma_obj_zone, obj); 649 return (0); 650 } 651 DMA_PRIV_UNLOCK(priv); 652 return (obj->dma_addr); 653 } 654 #else 655 dma_addr_t 656 linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len) 657 { 658 return (phys); 659 } 660 #endif 661 662 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 663 void 664 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len) 665 { 666 struct linux_dma_priv *priv; 667 struct linux_dma_obj *obj; 668 669 priv = dev->dma_priv; 670 671 if (pctrie_is_empty(&priv->ptree)) 672 return; 673 674 DMA_PRIV_LOCK(priv); 675 obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr); 676 if (obj == NULL) { 677 DMA_PRIV_UNLOCK(priv); 678 return; 679 } 680 LINUX_DMA_PCTRIE_REMOVE(&priv->ptree, dma_addr); 681 bus_dmamap_unload(priv->dmat, obj->dmamap); 682 bus_dmamap_destroy(priv->dmat, obj->dmamap); 683 DMA_PRIV_UNLOCK(priv); 684 685 uma_zfree(linux_dma_obj_zone, obj); 686 } 687 #else 688 void 689 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len) 690 { 691 } 692 #endif 693 694 int 695 linux_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents, 696 enum dma_data_direction dir, struct dma_attrs *attrs) 697 { 698 struct linux_dma_priv *priv; 699 struct scatterlist *sg; 700 int i, nseg; 701 bus_dma_segment_t seg; 702 703 priv = dev->dma_priv; 704 705 DMA_PRIV_LOCK(priv); 706 707 /* create common DMA map in the first S/G entry */ 708 if (bus_dmamap_create(priv->dmat, 0, &sgl->dma_map) != 0) { 709 DMA_PRIV_UNLOCK(priv); 710 return (0); 711 } 712 713 /* load all S/G list entries */ 714 for_each_sg(sgl, sg, nents, i) { 715 nseg = -1; 716 if (_bus_dmamap_load_phys(priv->dmat, sgl->dma_map, 717 sg_phys(sg), sg->length, BUS_DMA_NOWAIT, 718 &seg, &nseg) != 0) { 719 bus_dmamap_unload(priv->dmat, sgl->dma_map); 720 bus_dmamap_destroy(priv->dmat, sgl->dma_map); 721 DMA_PRIV_UNLOCK(priv); 722 return (0); 723 } 724 KASSERT(nseg == 0, 725 ("More than one segment (nseg=%d)", nseg + 1)); 726 727 sg_dma_address(sg) = seg.ds_addr; 728 } 729 DMA_PRIV_UNLOCK(priv); 730 731 return (nents); 732 } 733 734 void 735 linux_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, 736 int nents, enum dma_data_direction dir, struct dma_attrs *attrs) 737 { 738 struct linux_dma_priv *priv; 739 740 priv = dev->dma_priv; 741 742 DMA_PRIV_LOCK(priv); 743 bus_dmamap_unload(priv->dmat, sgl->dma_map); 744 bus_dmamap_destroy(priv->dmat, sgl->dma_map); 745 DMA_PRIV_UNLOCK(priv); 746 } 747 748 struct dma_pool { 749 struct device *pool_device; 750 uma_zone_t pool_zone; 751 struct mtx pool_lock; 752 bus_dma_tag_t pool_dmat; 753 size_t pool_entry_size; 754 struct pctrie pool_ptree; 755 }; 756 757 #define DMA_POOL_LOCK(pool) mtx_lock(&(pool)->pool_lock) 758 #define DMA_POOL_UNLOCK(pool) mtx_unlock(&(pool)->pool_lock) 759 760 static inline int 761 dma_pool_obj_ctor(void *mem, int size, void *arg, int flags) 762 { 763 struct linux_dma_obj *obj = mem; 764 struct dma_pool *pool = arg; 765 int error, nseg; 766 bus_dma_segment_t seg; 767 768 nseg = -1; 769 DMA_POOL_LOCK(pool); 770 error = _bus_dmamap_load_phys(pool->pool_dmat, obj->dmamap, 771 vtophys(obj->vaddr), pool->pool_entry_size, BUS_DMA_NOWAIT, 772 &seg, &nseg); 773 DMA_POOL_UNLOCK(pool); 774 if (error != 0) { 775 return (error); 776 } 777 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); 778 obj->dma_addr = seg.ds_addr; 779 780 return (0); 781 } 782 783 static void 784 dma_pool_obj_dtor(void *mem, int size, void *arg) 785 { 786 struct linux_dma_obj *obj = mem; 787 struct dma_pool *pool = arg; 788 789 DMA_POOL_LOCK(pool); 790 bus_dmamap_unload(pool->pool_dmat, obj->dmamap); 791 DMA_POOL_UNLOCK(pool); 792 } 793 794 static int 795 dma_pool_obj_import(void *arg, void **store, int count, int domain __unused, 796 int flags) 797 { 798 struct dma_pool *pool = arg; 799 struct linux_dma_priv *priv; 800 struct linux_dma_obj *obj; 801 int error, i; 802 803 priv = pool->pool_device->dma_priv; 804 for (i = 0; i < count; i++) { 805 obj = uma_zalloc(linux_dma_obj_zone, flags); 806 if (obj == NULL) 807 break; 808 809 error = bus_dmamem_alloc(pool->pool_dmat, &obj->vaddr, 810 BUS_DMA_NOWAIT, &obj->dmamap); 811 if (error!= 0) { 812 uma_zfree(linux_dma_obj_zone, obj); 813 break; 814 } 815 816 store[i] = obj; 817 } 818 819 return (i); 820 } 821 822 static void 823 dma_pool_obj_release(void *arg, void **store, int count) 824 { 825 struct dma_pool *pool = arg; 826 struct linux_dma_priv *priv; 827 struct linux_dma_obj *obj; 828 int i; 829 830 priv = pool->pool_device->dma_priv; 831 for (i = 0; i < count; i++) { 832 obj = store[i]; 833 bus_dmamem_free(pool->pool_dmat, obj->vaddr, obj->dmamap); 834 uma_zfree(linux_dma_obj_zone, obj); 835 } 836 } 837 838 struct dma_pool * 839 linux_dma_pool_create(char *name, struct device *dev, size_t size, 840 size_t align, size_t boundary) 841 { 842 struct linux_dma_priv *priv; 843 struct dma_pool *pool; 844 845 priv = dev->dma_priv; 846 847 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 848 pool->pool_device = dev; 849 pool->pool_entry_size = size; 850 851 if (bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 852 align, boundary, /* alignment, boundary */ 853 priv->dma_mask, /* lowaddr */ 854 BUS_SPACE_MAXADDR, /* highaddr */ 855 NULL, NULL, /* filtfunc, filtfuncarg */ 856 size, /* maxsize */ 857 1, /* nsegments */ 858 size, /* maxsegsz */ 859 0, /* flags */ 860 NULL, NULL, /* lockfunc, lockfuncarg */ 861 &pool->pool_dmat)) { 862 kfree(pool); 863 return (NULL); 864 } 865 866 pool->pool_zone = uma_zcache_create(name, -1, dma_pool_obj_ctor, 867 dma_pool_obj_dtor, NULL, NULL, dma_pool_obj_import, 868 dma_pool_obj_release, pool, 0); 869 870 mtx_init(&pool->pool_lock, "lkpi-dma-pool", NULL, MTX_DEF); 871 pctrie_init(&pool->pool_ptree); 872 873 return (pool); 874 } 875 876 void 877 linux_dma_pool_destroy(struct dma_pool *pool) 878 { 879 880 uma_zdestroy(pool->pool_zone); 881 bus_dma_tag_destroy(pool->pool_dmat); 882 mtx_destroy(&pool->pool_lock); 883 kfree(pool); 884 } 885 886 void * 887 linux_dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, 888 dma_addr_t *handle) 889 { 890 struct linux_dma_obj *obj; 891 892 obj = uma_zalloc_arg(pool->pool_zone, pool, mem_flags); 893 if (obj == NULL) 894 return (NULL); 895 896 DMA_POOL_LOCK(pool); 897 if (LINUX_DMA_PCTRIE_INSERT(&pool->pool_ptree, obj) != 0) { 898 DMA_POOL_UNLOCK(pool); 899 uma_zfree_arg(pool->pool_zone, obj, pool); 900 return (NULL); 901 } 902 DMA_POOL_UNLOCK(pool); 903 904 *handle = obj->dma_addr; 905 return (obj->vaddr); 906 } 907 908 void 909 linux_dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma_addr) 910 { 911 struct linux_dma_obj *obj; 912 913 DMA_POOL_LOCK(pool); 914 obj = LINUX_DMA_PCTRIE_LOOKUP(&pool->pool_ptree, dma_addr); 915 if (obj == NULL) { 916 DMA_POOL_UNLOCK(pool); 917 return; 918 } 919 LINUX_DMA_PCTRIE_REMOVE(&pool->pool_ptree, dma_addr); 920 DMA_POOL_UNLOCK(pool); 921 922 uma_zfree_arg(pool->pool_zone, obj, pool); 923 } 924