1 /*- 2 * Copyright (c) 2015-2016 Mellanox Technologies, Ltd. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/bus.h> 33 #include <sys/malloc.h> 34 #include <sys/kernel.h> 35 #include <sys/sysctl.h> 36 #include <sys/lock.h> 37 #include <sys/mutex.h> 38 #include <sys/fcntl.h> 39 #include <sys/file.h> 40 #include <sys/filio.h> 41 #include <sys/pciio.h> 42 #include <sys/pctrie.h> 43 #include <sys/rwlock.h> 44 45 #include <vm/vm.h> 46 #include <vm/pmap.h> 47 48 #include <machine/stdarg.h> 49 50 #include <dev/pci/pcivar.h> 51 #include <dev/pci/pci_private.h> 52 #include <dev/pci/pci_iov.h> 53 54 #include <linux/kobject.h> 55 #include <linux/device.h> 56 #include <linux/slab.h> 57 #include <linux/module.h> 58 #include <linux/cdev.h> 59 #include <linux/file.h> 60 #include <linux/sysfs.h> 61 #include <linux/mm.h> 62 #include <linux/io.h> 63 #include <linux/vmalloc.h> 64 #include <linux/pci.h> 65 #include <linux/compat.h> 66 67 static device_probe_t linux_pci_probe; 68 static device_attach_t linux_pci_attach; 69 static device_detach_t linux_pci_detach; 70 static device_suspend_t linux_pci_suspend; 71 static device_resume_t linux_pci_resume; 72 static device_shutdown_t linux_pci_shutdown; 73 static pci_iov_init_t linux_pci_iov_init; 74 static pci_iov_uninit_t linux_pci_iov_uninit; 75 static pci_iov_add_vf_t linux_pci_iov_add_vf; 76 77 static device_method_t pci_methods[] = { 78 DEVMETHOD(device_probe, linux_pci_probe), 79 DEVMETHOD(device_attach, linux_pci_attach), 80 DEVMETHOD(device_detach, linux_pci_detach), 81 DEVMETHOD(device_suspend, linux_pci_suspend), 82 DEVMETHOD(device_resume, linux_pci_resume), 83 DEVMETHOD(device_shutdown, linux_pci_shutdown), 84 DEVMETHOD(pci_iov_init, linux_pci_iov_init), 85 DEVMETHOD(pci_iov_uninit, linux_pci_iov_uninit), 86 DEVMETHOD(pci_iov_add_vf, linux_pci_iov_add_vf), 87 DEVMETHOD_END 88 }; 89 90 struct linux_dma_priv { 91 uint64_t dma_mask; 92 struct mtx lock; 93 bus_dma_tag_t dmat; 94 struct pctrie ptree; 95 }; 96 #define DMA_PRIV_LOCK(priv) mtx_lock(&(priv)->lock) 97 #define DMA_PRIV_UNLOCK(priv) mtx_unlock(&(priv)->lock) 98 99 static int 100 linux_pdev_dma_init(struct pci_dev *pdev) 101 { 102 struct linux_dma_priv *priv; 103 int error; 104 105 priv = malloc(sizeof(*priv), M_DEVBUF, M_WAITOK | M_ZERO); 106 pdev->dev.dma_priv = priv; 107 108 mtx_init(&priv->lock, "lkpi-priv-dma", NULL, MTX_DEF); 109 110 pctrie_init(&priv->ptree); 111 112 /* create a default DMA tag */ 113 error = linux_dma_tag_init(&pdev->dev, DMA_BIT_MASK(64)); 114 if (error) { 115 mtx_destroy(&priv->lock); 116 free(priv, M_DEVBUF); 117 pdev->dev.dma_priv = NULL; 118 } 119 return (error); 120 } 121 122 static int 123 linux_pdev_dma_uninit(struct pci_dev *pdev) 124 { 125 struct linux_dma_priv *priv; 126 127 priv = pdev->dev.dma_priv; 128 if (priv->dmat) 129 bus_dma_tag_destroy(priv->dmat); 130 mtx_destroy(&priv->lock); 131 free(priv, M_DEVBUF); 132 pdev->dev.dma_priv = NULL; 133 return (0); 134 } 135 136 int 137 linux_dma_tag_init(struct device *dev, u64 dma_mask) 138 { 139 struct linux_dma_priv *priv; 140 int error; 141 142 priv = dev->dma_priv; 143 144 if (priv->dmat) { 145 if (priv->dma_mask == dma_mask) 146 return (0); 147 148 bus_dma_tag_destroy(priv->dmat); 149 } 150 151 priv->dma_mask = dma_mask; 152 153 error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 154 1, 0, /* alignment, boundary */ 155 dma_mask, /* lowaddr */ 156 BUS_SPACE_MAXADDR, /* highaddr */ 157 NULL, NULL, /* filtfunc, filtfuncarg */ 158 BUS_SPACE_MAXSIZE, /* maxsize */ 159 1, /* nsegments */ 160 BUS_SPACE_MAXSIZE, /* maxsegsz */ 161 0, /* flags */ 162 NULL, NULL, /* lockfunc, lockfuncarg */ 163 &priv->dmat); 164 return (-error); 165 } 166 167 static struct pci_driver * 168 linux_pci_find(device_t dev, const struct pci_device_id **idp) 169 { 170 const struct pci_device_id *id; 171 struct pci_driver *pdrv; 172 uint16_t vendor; 173 uint16_t device; 174 uint16_t subvendor; 175 uint16_t subdevice; 176 177 vendor = pci_get_vendor(dev); 178 device = pci_get_device(dev); 179 subvendor = pci_get_subvendor(dev); 180 subdevice = pci_get_subdevice(dev); 181 182 spin_lock(&pci_lock); 183 list_for_each_entry(pdrv, &pci_drivers, links) { 184 for (id = pdrv->id_table; id->vendor != 0; id++) { 185 if (vendor == id->vendor && 186 (PCI_ANY_ID == id->device || device == id->device) && 187 (PCI_ANY_ID == id->subvendor || subvendor == id->subvendor) && 188 (PCI_ANY_ID == id->subdevice || subdevice == id->subdevice)) { 189 *idp = id; 190 spin_unlock(&pci_lock); 191 return (pdrv); 192 } 193 } 194 } 195 spin_unlock(&pci_lock); 196 return (NULL); 197 } 198 199 static int 200 linux_pci_probe(device_t dev) 201 { 202 const struct pci_device_id *id; 203 struct pci_driver *pdrv; 204 205 if ((pdrv = linux_pci_find(dev, &id)) == NULL) 206 return (ENXIO); 207 if (device_get_driver(dev) != &pdrv->bsddriver) 208 return (ENXIO); 209 device_set_desc(dev, pdrv->name); 210 return (0); 211 } 212 213 static int 214 linux_pci_attach(device_t dev) 215 { 216 const struct pci_device_id *id; 217 struct pci_driver *pdrv; 218 struct pci_dev *pdev; 219 220 pdrv = linux_pci_find(dev, &id); 221 pdev = device_get_softc(dev); 222 223 MPASS(pdrv != NULL); 224 MPASS(pdev != NULL); 225 226 return (linux_pci_attach_device(dev, pdrv, id, pdev)); 227 } 228 229 int 230 linux_pci_attach_device(device_t dev, struct pci_driver *pdrv, 231 const struct pci_device_id *id, struct pci_dev *pdev) 232 { 233 struct resource_list_entry *rle; 234 struct pci_bus *pbus; 235 struct pci_devinfo *dinfo; 236 device_t parent; 237 int error; 238 239 linux_set_current(curthread); 240 241 if (pdrv != NULL && pdrv->isdrm) { 242 parent = device_get_parent(dev); 243 dinfo = device_get_ivars(parent); 244 device_set_ivars(dev, dinfo); 245 } else { 246 dinfo = device_get_ivars(dev); 247 } 248 249 pdev->dev.parent = &linux_root_device; 250 pdev->dev.bsddev = dev; 251 INIT_LIST_HEAD(&pdev->dev.irqents); 252 pdev->devfn = PCI_DEVFN(pci_get_slot(dev), pci_get_function(dev)); 253 pdev->device = dinfo->cfg.device; 254 pdev->vendor = dinfo->cfg.vendor; 255 pdev->subsystem_vendor = dinfo->cfg.subvendor; 256 pdev->subsystem_device = dinfo->cfg.subdevice; 257 pdev->class = pci_get_class(dev); 258 pdev->revision = pci_get_revid(dev); 259 pdev->pdrv = pdrv; 260 kobject_init(&pdev->dev.kobj, &linux_dev_ktype); 261 kobject_set_name(&pdev->dev.kobj, device_get_nameunit(dev)); 262 kobject_add(&pdev->dev.kobj, &linux_root_device.kobj, 263 kobject_name(&pdev->dev.kobj)); 264 rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 0); 265 if (rle != NULL) 266 pdev->dev.irq = rle->start; 267 else 268 pdev->dev.irq = LINUX_IRQ_INVALID; 269 pdev->irq = pdev->dev.irq; 270 error = linux_pdev_dma_init(pdev); 271 if (error) 272 goto out_dma_init; 273 274 TAILQ_INIT(&pdev->mmio); 275 pbus = malloc(sizeof(*pbus), M_DEVBUF, M_WAITOK | M_ZERO); 276 pbus->self = pdev; 277 pbus->number = pci_get_bus(dev); 278 pbus->domain = pci_get_domain(dev); 279 pdev->bus = pbus; 280 281 spin_lock(&pci_lock); 282 list_add(&pdev->links, &pci_devices); 283 spin_unlock(&pci_lock); 284 285 if (pdrv != NULL) { 286 error = pdrv->probe(pdev, id); 287 if (error) 288 goto out_probe; 289 } 290 return (0); 291 292 out_probe: 293 free(pdev->bus, M_DEVBUF); 294 linux_pdev_dma_uninit(pdev); 295 out_dma_init: 296 spin_lock(&pci_lock); 297 list_del(&pdev->links); 298 spin_unlock(&pci_lock); 299 put_device(&pdev->dev); 300 return (-error); 301 } 302 303 static int 304 linux_pci_detach(device_t dev) 305 { 306 struct pci_dev *pdev; 307 308 pdev = device_get_softc(dev); 309 310 MPASS(pdev != NULL); 311 312 device_set_desc(dev, NULL); 313 314 return (linux_pci_detach_device(pdev)); 315 } 316 317 int 318 linux_pci_detach_device(struct pci_dev *pdev) 319 { 320 321 linux_set_current(curthread); 322 323 if (pdev->pdrv != NULL) 324 pdev->pdrv->remove(pdev); 325 326 free(pdev->bus, M_DEVBUF); 327 linux_pdev_dma_uninit(pdev); 328 329 spin_lock(&pci_lock); 330 list_del(&pdev->links); 331 spin_unlock(&pci_lock); 332 put_device(&pdev->dev); 333 334 return (0); 335 } 336 337 static int 338 linux_pci_suspend(device_t dev) 339 { 340 const struct dev_pm_ops *pmops; 341 struct pm_message pm = { }; 342 struct pci_dev *pdev; 343 int error; 344 345 error = 0; 346 linux_set_current(curthread); 347 pdev = device_get_softc(dev); 348 pmops = pdev->pdrv->driver.pm; 349 350 if (pdev->pdrv->suspend != NULL) 351 error = -pdev->pdrv->suspend(pdev, pm); 352 else if (pmops != NULL && pmops->suspend != NULL) { 353 error = -pmops->suspend(&pdev->dev); 354 if (error == 0 && pmops->suspend_late != NULL) 355 error = -pmops->suspend_late(&pdev->dev); 356 } 357 return (error); 358 } 359 360 static int 361 linux_pci_resume(device_t dev) 362 { 363 const struct dev_pm_ops *pmops; 364 struct pci_dev *pdev; 365 int error; 366 367 error = 0; 368 linux_set_current(curthread); 369 pdev = device_get_softc(dev); 370 pmops = pdev->pdrv->driver.pm; 371 372 if (pdev->pdrv->resume != NULL) 373 error = -pdev->pdrv->resume(pdev); 374 else if (pmops != NULL && pmops->resume != NULL) { 375 if (pmops->resume_early != NULL) 376 error = -pmops->resume_early(&pdev->dev); 377 if (error == 0 && pmops->resume != NULL) 378 error = -pmops->resume(&pdev->dev); 379 } 380 return (error); 381 } 382 383 static int 384 linux_pci_shutdown(device_t dev) 385 { 386 struct pci_dev *pdev; 387 388 linux_set_current(curthread); 389 pdev = device_get_softc(dev); 390 if (pdev->pdrv->shutdown != NULL) 391 pdev->pdrv->shutdown(pdev); 392 return (0); 393 } 394 395 static int 396 linux_pci_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *pf_config) 397 { 398 struct pci_dev *pdev; 399 int error; 400 401 linux_set_current(curthread); 402 pdev = device_get_softc(dev); 403 if (pdev->pdrv->bsd_iov_init != NULL) 404 error = pdev->pdrv->bsd_iov_init(dev, num_vfs, pf_config); 405 else 406 error = EINVAL; 407 return (error); 408 } 409 410 static void 411 linux_pci_iov_uninit(device_t dev) 412 { 413 struct pci_dev *pdev; 414 415 linux_set_current(curthread); 416 pdev = device_get_softc(dev); 417 if (pdev->pdrv->bsd_iov_uninit != NULL) 418 pdev->pdrv->bsd_iov_uninit(dev); 419 } 420 421 static int 422 linux_pci_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *vf_config) 423 { 424 struct pci_dev *pdev; 425 int error; 426 427 linux_set_current(curthread); 428 pdev = device_get_softc(dev); 429 if (pdev->pdrv->bsd_iov_add_vf != NULL) 430 error = pdev->pdrv->bsd_iov_add_vf(dev, vfnum, vf_config); 431 else 432 error = EINVAL; 433 return (error); 434 } 435 436 static int 437 _linux_pci_register_driver(struct pci_driver *pdrv, devclass_t dc) 438 { 439 int error; 440 441 linux_set_current(curthread); 442 spin_lock(&pci_lock); 443 list_add(&pdrv->links, &pci_drivers); 444 spin_unlock(&pci_lock); 445 pdrv->bsddriver.name = pdrv->name; 446 pdrv->bsddriver.methods = pci_methods; 447 pdrv->bsddriver.size = sizeof(struct pci_dev); 448 449 mtx_lock(&Giant); 450 error = devclass_add_driver(dc, &pdrv->bsddriver, 451 BUS_PASS_DEFAULT, &pdrv->bsdclass); 452 mtx_unlock(&Giant); 453 return (-error); 454 } 455 456 int 457 linux_pci_register_driver(struct pci_driver *pdrv) 458 { 459 devclass_t dc; 460 461 dc = devclass_find("pci"); 462 if (dc == NULL) 463 return (-ENXIO); 464 pdrv->isdrm = false; 465 return (_linux_pci_register_driver(pdrv, dc)); 466 } 467 468 unsigned long 469 pci_resource_start(struct pci_dev *pdev, int bar) 470 { 471 struct resource_list_entry *rle; 472 rman_res_t newstart; 473 device_t dev; 474 475 if ((rle = linux_pci_get_bar(pdev, bar)) == NULL) 476 return (0); 477 dev = pci_find_dbsf(pdev->bus->domain, pdev->bus->number, 478 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); 479 MPASS(dev != NULL); 480 if (BUS_TRANSLATE_RESOURCE(dev, rle->type, rle->start, &newstart)) { 481 device_printf(pdev->dev.bsddev, "translate of %#jx failed\n", 482 (uintmax_t)rle->start); 483 return (0); 484 } 485 return (newstart); 486 } 487 488 unsigned long 489 pci_resource_len(struct pci_dev *pdev, int bar) 490 { 491 struct resource_list_entry *rle; 492 493 if ((rle = linux_pci_get_bar(pdev, bar)) == NULL) 494 return (0); 495 return (rle->count); 496 } 497 498 int 499 linux_pci_register_drm_driver(struct pci_driver *pdrv) 500 { 501 devclass_t dc; 502 503 dc = devclass_create("vgapci"); 504 if (dc == NULL) 505 return (-ENXIO); 506 pdrv->isdrm = true; 507 pdrv->name = "drmn"; 508 return (_linux_pci_register_driver(pdrv, dc)); 509 } 510 511 void 512 linux_pci_unregister_driver(struct pci_driver *pdrv) 513 { 514 devclass_t bus; 515 516 bus = devclass_find("pci"); 517 518 spin_lock(&pci_lock); 519 list_del(&pdrv->links); 520 spin_unlock(&pci_lock); 521 mtx_lock(&Giant); 522 if (bus != NULL) 523 devclass_delete_driver(bus, &pdrv->bsddriver); 524 mtx_unlock(&Giant); 525 } 526 527 void 528 linux_pci_unregister_drm_driver(struct pci_driver *pdrv) 529 { 530 devclass_t bus; 531 532 bus = devclass_find("vgapci"); 533 534 spin_lock(&pci_lock); 535 list_del(&pdrv->links); 536 spin_unlock(&pci_lock); 537 mtx_lock(&Giant); 538 if (bus != NULL) 539 devclass_delete_driver(bus, &pdrv->bsddriver); 540 mtx_unlock(&Giant); 541 } 542 543 CTASSERT(sizeof(dma_addr_t) <= sizeof(uint64_t)); 544 545 struct linux_dma_obj { 546 void *vaddr; 547 uint64_t dma_addr; 548 bus_dmamap_t dmamap; 549 }; 550 551 static uma_zone_t linux_dma_trie_zone; 552 static uma_zone_t linux_dma_obj_zone; 553 554 static void 555 linux_dma_init(void *arg) 556 { 557 558 linux_dma_trie_zone = uma_zcreate("linux_dma_pctrie", 559 pctrie_node_size(), NULL, NULL, pctrie_zone_init, NULL, 560 UMA_ALIGN_PTR, 0); 561 linux_dma_obj_zone = uma_zcreate("linux_dma_object", 562 sizeof(struct linux_dma_obj), NULL, NULL, NULL, NULL, 563 UMA_ALIGN_PTR, 0); 564 565 } 566 SYSINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_init, NULL); 567 568 static void 569 linux_dma_uninit(void *arg) 570 { 571 572 uma_zdestroy(linux_dma_obj_zone); 573 uma_zdestroy(linux_dma_trie_zone); 574 } 575 SYSUNINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_uninit, NULL); 576 577 static void * 578 linux_dma_trie_alloc(struct pctrie *ptree) 579 { 580 581 return (uma_zalloc(linux_dma_trie_zone, M_NOWAIT)); 582 } 583 584 static void 585 linux_dma_trie_free(struct pctrie *ptree, void *node) 586 { 587 588 uma_zfree(linux_dma_trie_zone, node); 589 } 590 591 PCTRIE_DEFINE(LINUX_DMA, linux_dma_obj, dma_addr, linux_dma_trie_alloc, 592 linux_dma_trie_free); 593 594 void * 595 linux_dma_alloc_coherent(struct device *dev, size_t size, 596 dma_addr_t *dma_handle, gfp_t flag) 597 { 598 struct linux_dma_priv *priv; 599 vm_paddr_t high; 600 size_t align; 601 void *mem; 602 603 if (dev == NULL || dev->dma_priv == NULL) { 604 *dma_handle = 0; 605 return (NULL); 606 } 607 priv = dev->dma_priv; 608 if (priv->dma_mask) 609 high = priv->dma_mask; 610 else if (flag & GFP_DMA32) 611 high = BUS_SPACE_MAXADDR_32BIT; 612 else 613 high = BUS_SPACE_MAXADDR; 614 align = PAGE_SIZE << get_order(size); 615 mem = (void *)kmem_alloc_contig(size, flag, 0, high, align, 0, 616 VM_MEMATTR_DEFAULT); 617 if (mem != NULL) { 618 *dma_handle = linux_dma_map_phys(dev, vtophys(mem), size); 619 if (*dma_handle == 0) { 620 kmem_free((vm_offset_t)mem, size); 621 mem = NULL; 622 } 623 } else { 624 *dma_handle = 0; 625 } 626 return (mem); 627 } 628 629 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 630 dma_addr_t 631 linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len) 632 { 633 struct linux_dma_priv *priv; 634 struct linux_dma_obj *obj; 635 int error, nseg; 636 bus_dma_segment_t seg; 637 638 priv = dev->dma_priv; 639 640 /* 641 * If the resultant mapping will be entirely 1:1 with the 642 * physical address, short-circuit the remainder of the 643 * bus_dma API. This avoids tracking collisions in the pctrie 644 * with the additional benefit of reducing overhead. 645 */ 646 if (bus_dma_id_mapped(priv->dmat, phys, len)) 647 return (phys); 648 649 obj = uma_zalloc(linux_dma_obj_zone, M_NOWAIT); 650 if (obj == NULL) { 651 return (0); 652 } 653 654 DMA_PRIV_LOCK(priv); 655 if (bus_dmamap_create(priv->dmat, 0, &obj->dmamap) != 0) { 656 DMA_PRIV_UNLOCK(priv); 657 uma_zfree(linux_dma_obj_zone, obj); 658 return (0); 659 } 660 661 nseg = -1; 662 if (_bus_dmamap_load_phys(priv->dmat, obj->dmamap, phys, len, 663 BUS_DMA_NOWAIT, &seg, &nseg) != 0) { 664 bus_dmamap_destroy(priv->dmat, obj->dmamap); 665 DMA_PRIV_UNLOCK(priv); 666 uma_zfree(linux_dma_obj_zone, obj); 667 return (0); 668 } 669 670 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); 671 obj->dma_addr = seg.ds_addr; 672 673 error = LINUX_DMA_PCTRIE_INSERT(&priv->ptree, obj); 674 if (error != 0) { 675 bus_dmamap_unload(priv->dmat, obj->dmamap); 676 bus_dmamap_destroy(priv->dmat, obj->dmamap); 677 DMA_PRIV_UNLOCK(priv); 678 uma_zfree(linux_dma_obj_zone, obj); 679 return (0); 680 } 681 DMA_PRIV_UNLOCK(priv); 682 return (obj->dma_addr); 683 } 684 #else 685 dma_addr_t 686 linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len) 687 { 688 return (phys); 689 } 690 #endif 691 692 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 693 void 694 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len) 695 { 696 struct linux_dma_priv *priv; 697 struct linux_dma_obj *obj; 698 699 priv = dev->dma_priv; 700 701 if (pctrie_is_empty(&priv->ptree)) 702 return; 703 704 DMA_PRIV_LOCK(priv); 705 obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr); 706 if (obj == NULL) { 707 DMA_PRIV_UNLOCK(priv); 708 return; 709 } 710 LINUX_DMA_PCTRIE_REMOVE(&priv->ptree, dma_addr); 711 bus_dmamap_unload(priv->dmat, obj->dmamap); 712 bus_dmamap_destroy(priv->dmat, obj->dmamap); 713 DMA_PRIV_UNLOCK(priv); 714 715 uma_zfree(linux_dma_obj_zone, obj); 716 } 717 #else 718 void 719 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len) 720 { 721 } 722 #endif 723 724 int 725 linux_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents, 726 enum dma_data_direction dir, struct dma_attrs *attrs) 727 { 728 struct linux_dma_priv *priv; 729 struct scatterlist *sg; 730 int i, nseg; 731 bus_dma_segment_t seg; 732 733 priv = dev->dma_priv; 734 735 DMA_PRIV_LOCK(priv); 736 737 /* create common DMA map in the first S/G entry */ 738 if (bus_dmamap_create(priv->dmat, 0, &sgl->dma_map) != 0) { 739 DMA_PRIV_UNLOCK(priv); 740 return (0); 741 } 742 743 /* load all S/G list entries */ 744 for_each_sg(sgl, sg, nents, i) { 745 nseg = -1; 746 if (_bus_dmamap_load_phys(priv->dmat, sgl->dma_map, 747 sg_phys(sg), sg->length, BUS_DMA_NOWAIT, 748 &seg, &nseg) != 0) { 749 bus_dmamap_unload(priv->dmat, sgl->dma_map); 750 bus_dmamap_destroy(priv->dmat, sgl->dma_map); 751 DMA_PRIV_UNLOCK(priv); 752 return (0); 753 } 754 KASSERT(nseg == 0, 755 ("More than one segment (nseg=%d)", nseg + 1)); 756 757 sg_dma_address(sg) = seg.ds_addr; 758 } 759 DMA_PRIV_UNLOCK(priv); 760 761 return (nents); 762 } 763 764 void 765 linux_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, 766 int nents, enum dma_data_direction dir, struct dma_attrs *attrs) 767 { 768 struct linux_dma_priv *priv; 769 770 priv = dev->dma_priv; 771 772 DMA_PRIV_LOCK(priv); 773 bus_dmamap_unload(priv->dmat, sgl->dma_map); 774 bus_dmamap_destroy(priv->dmat, sgl->dma_map); 775 DMA_PRIV_UNLOCK(priv); 776 } 777 778 struct dma_pool { 779 struct device *pool_device; 780 uma_zone_t pool_zone; 781 struct mtx pool_lock; 782 bus_dma_tag_t pool_dmat; 783 size_t pool_entry_size; 784 struct pctrie pool_ptree; 785 }; 786 787 #define DMA_POOL_LOCK(pool) mtx_lock(&(pool)->pool_lock) 788 #define DMA_POOL_UNLOCK(pool) mtx_unlock(&(pool)->pool_lock) 789 790 static inline int 791 dma_pool_obj_ctor(void *mem, int size, void *arg, int flags) 792 { 793 struct linux_dma_obj *obj = mem; 794 struct dma_pool *pool = arg; 795 int error, nseg; 796 bus_dma_segment_t seg; 797 798 nseg = -1; 799 DMA_POOL_LOCK(pool); 800 error = _bus_dmamap_load_phys(pool->pool_dmat, obj->dmamap, 801 vtophys(obj->vaddr), pool->pool_entry_size, BUS_DMA_NOWAIT, 802 &seg, &nseg); 803 DMA_POOL_UNLOCK(pool); 804 if (error != 0) { 805 return (error); 806 } 807 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); 808 obj->dma_addr = seg.ds_addr; 809 810 return (0); 811 } 812 813 static void 814 dma_pool_obj_dtor(void *mem, int size, void *arg) 815 { 816 struct linux_dma_obj *obj = mem; 817 struct dma_pool *pool = arg; 818 819 DMA_POOL_LOCK(pool); 820 bus_dmamap_unload(pool->pool_dmat, obj->dmamap); 821 DMA_POOL_UNLOCK(pool); 822 } 823 824 static int 825 dma_pool_obj_import(void *arg, void **store, int count, int domain __unused, 826 int flags) 827 { 828 struct dma_pool *pool = arg; 829 struct linux_dma_priv *priv; 830 struct linux_dma_obj *obj; 831 int error, i; 832 833 priv = pool->pool_device->dma_priv; 834 for (i = 0; i < count; i++) { 835 obj = uma_zalloc(linux_dma_obj_zone, flags); 836 if (obj == NULL) 837 break; 838 839 error = bus_dmamem_alloc(pool->pool_dmat, &obj->vaddr, 840 BUS_DMA_NOWAIT, &obj->dmamap); 841 if (error!= 0) { 842 uma_zfree(linux_dma_obj_zone, obj); 843 break; 844 } 845 846 store[i] = obj; 847 } 848 849 return (i); 850 } 851 852 static void 853 dma_pool_obj_release(void *arg, void **store, int count) 854 { 855 struct dma_pool *pool = arg; 856 struct linux_dma_priv *priv; 857 struct linux_dma_obj *obj; 858 int i; 859 860 priv = pool->pool_device->dma_priv; 861 for (i = 0; i < count; i++) { 862 obj = store[i]; 863 bus_dmamem_free(pool->pool_dmat, obj->vaddr, obj->dmamap); 864 uma_zfree(linux_dma_obj_zone, obj); 865 } 866 } 867 868 struct dma_pool * 869 linux_dma_pool_create(char *name, struct device *dev, size_t size, 870 size_t align, size_t boundary) 871 { 872 struct linux_dma_priv *priv; 873 struct dma_pool *pool; 874 875 priv = dev->dma_priv; 876 877 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 878 pool->pool_device = dev; 879 pool->pool_entry_size = size; 880 881 if (bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 882 align, boundary, /* alignment, boundary */ 883 priv->dma_mask, /* lowaddr */ 884 BUS_SPACE_MAXADDR, /* highaddr */ 885 NULL, NULL, /* filtfunc, filtfuncarg */ 886 size, /* maxsize */ 887 1, /* nsegments */ 888 size, /* maxsegsz */ 889 0, /* flags */ 890 NULL, NULL, /* lockfunc, lockfuncarg */ 891 &pool->pool_dmat)) { 892 kfree(pool); 893 return (NULL); 894 } 895 896 pool->pool_zone = uma_zcache_create(name, -1, dma_pool_obj_ctor, 897 dma_pool_obj_dtor, NULL, NULL, dma_pool_obj_import, 898 dma_pool_obj_release, pool, 0); 899 900 mtx_init(&pool->pool_lock, "lkpi-dma-pool", NULL, MTX_DEF); 901 pctrie_init(&pool->pool_ptree); 902 903 return (pool); 904 } 905 906 void 907 linux_dma_pool_destroy(struct dma_pool *pool) 908 { 909 910 uma_zdestroy(pool->pool_zone); 911 bus_dma_tag_destroy(pool->pool_dmat); 912 mtx_destroy(&pool->pool_lock); 913 kfree(pool); 914 } 915 916 void * 917 linux_dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, 918 dma_addr_t *handle) 919 { 920 struct linux_dma_obj *obj; 921 922 obj = uma_zalloc_arg(pool->pool_zone, pool, mem_flags); 923 if (obj == NULL) 924 return (NULL); 925 926 DMA_POOL_LOCK(pool); 927 if (LINUX_DMA_PCTRIE_INSERT(&pool->pool_ptree, obj) != 0) { 928 DMA_POOL_UNLOCK(pool); 929 uma_zfree_arg(pool->pool_zone, obj, pool); 930 return (NULL); 931 } 932 DMA_POOL_UNLOCK(pool); 933 934 *handle = obj->dma_addr; 935 return (obj->vaddr); 936 } 937 938 void 939 linux_dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma_addr) 940 { 941 struct linux_dma_obj *obj; 942 943 DMA_POOL_LOCK(pool); 944 obj = LINUX_DMA_PCTRIE_LOOKUP(&pool->pool_ptree, dma_addr); 945 if (obj == NULL) { 946 DMA_POOL_UNLOCK(pool); 947 return; 948 } 949 LINUX_DMA_PCTRIE_REMOVE(&pool->pool_ptree, dma_addr); 950 DMA_POOL_UNLOCK(pool); 951 952 uma_zfree_arg(pool->pool_zone, obj, pool); 953 } 954