1 /*- 2 * Copyright (c) 2015-2016 Mellanox Technologies, Ltd. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/bus.h> 33 #include <sys/malloc.h> 34 #include <sys/kernel.h> 35 #include <sys/sysctl.h> 36 #include <sys/lock.h> 37 #include <sys/mutex.h> 38 #include <sys/fcntl.h> 39 #include <sys/file.h> 40 #include <sys/filio.h> 41 #include <sys/pciio.h> 42 #include <sys/pctrie.h> 43 #include <sys/rwlock.h> 44 45 #include <vm/vm.h> 46 #include <vm/pmap.h> 47 48 #include <machine/stdarg.h> 49 50 #include <dev/pci/pcivar.h> 51 #include <dev/pci/pci_private.h> 52 #include <dev/pci/pci_iov.h> 53 54 #include <linux/kobject.h> 55 #include <linux/device.h> 56 #include <linux/slab.h> 57 #include <linux/module.h> 58 #include <linux/cdev.h> 59 #include <linux/file.h> 60 #include <linux/sysfs.h> 61 #include <linux/mm.h> 62 #include <linux/io.h> 63 #include <linux/vmalloc.h> 64 #include <linux/pci.h> 65 #include <linux/compat.h> 66 67 static device_probe_t linux_pci_probe; 68 static device_attach_t linux_pci_attach; 69 static device_detach_t linux_pci_detach; 70 static device_suspend_t linux_pci_suspend; 71 static device_resume_t linux_pci_resume; 72 static device_shutdown_t linux_pci_shutdown; 73 static pci_iov_init_t linux_pci_iov_init; 74 static pci_iov_uninit_t linux_pci_iov_uninit; 75 static pci_iov_add_vf_t linux_pci_iov_add_vf; 76 77 static device_method_t pci_methods[] = { 78 DEVMETHOD(device_probe, linux_pci_probe), 79 DEVMETHOD(device_attach, linux_pci_attach), 80 DEVMETHOD(device_detach, linux_pci_detach), 81 DEVMETHOD(device_suspend, linux_pci_suspend), 82 DEVMETHOD(device_resume, linux_pci_resume), 83 DEVMETHOD(device_shutdown, linux_pci_shutdown), 84 DEVMETHOD(pci_iov_init, linux_pci_iov_init), 85 DEVMETHOD(pci_iov_uninit, linux_pci_iov_uninit), 86 DEVMETHOD(pci_iov_add_vf, linux_pci_iov_add_vf), 87 DEVMETHOD_END 88 }; 89 90 struct linux_dma_priv { 91 uint64_t dma_mask; 92 struct mtx lock; 93 bus_dma_tag_t dmat; 94 struct pctrie ptree; 95 }; 96 #define DMA_PRIV_LOCK(priv) mtx_lock(&(priv)->lock) 97 #define DMA_PRIV_UNLOCK(priv) mtx_unlock(&(priv)->lock) 98 99 static int 100 linux_pdev_dma_init(struct pci_dev *pdev) 101 { 102 struct linux_dma_priv *priv; 103 int error; 104 105 priv = malloc(sizeof(*priv), M_DEVBUF, M_WAITOK | M_ZERO); 106 pdev->dev.dma_priv = priv; 107 108 mtx_init(&priv->lock, "lkpi-priv-dma", NULL, MTX_DEF); 109 110 pctrie_init(&priv->ptree); 111 112 /* create a default DMA tag */ 113 error = linux_dma_tag_init(&pdev->dev, DMA_BIT_MASK(64)); 114 if (error) { 115 mtx_destroy(&priv->lock); 116 free(priv, M_DEVBUF); 117 pdev->dev.dma_priv = NULL; 118 } 119 return (error); 120 } 121 122 static int 123 linux_pdev_dma_uninit(struct pci_dev *pdev) 124 { 125 struct linux_dma_priv *priv; 126 127 priv = pdev->dev.dma_priv; 128 if (priv->dmat) 129 bus_dma_tag_destroy(priv->dmat); 130 mtx_destroy(&priv->lock); 131 free(priv, M_DEVBUF); 132 pdev->dev.dma_priv = NULL; 133 return (0); 134 } 135 136 int 137 linux_dma_tag_init(struct device *dev, u64 dma_mask) 138 { 139 struct linux_dma_priv *priv; 140 int error; 141 142 priv = dev->dma_priv; 143 144 if (priv->dmat) { 145 if (priv->dma_mask == dma_mask) 146 return (0); 147 148 bus_dma_tag_destroy(priv->dmat); 149 } 150 151 priv->dma_mask = dma_mask; 152 153 error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 154 1, 0, /* alignment, boundary */ 155 dma_mask, /* lowaddr */ 156 BUS_SPACE_MAXADDR, /* highaddr */ 157 NULL, NULL, /* filtfunc, filtfuncarg */ 158 BUS_SPACE_MAXSIZE, /* maxsize */ 159 1, /* nsegments */ 160 BUS_SPACE_MAXSIZE, /* maxsegsz */ 161 0, /* flags */ 162 NULL, NULL, /* lockfunc, lockfuncarg */ 163 &priv->dmat); 164 return (-error); 165 } 166 167 static struct pci_driver * 168 linux_pci_find(device_t dev, const struct pci_device_id **idp) 169 { 170 const struct pci_device_id *id; 171 struct pci_driver *pdrv; 172 uint16_t vendor; 173 uint16_t device; 174 uint16_t subvendor; 175 uint16_t subdevice; 176 177 vendor = pci_get_vendor(dev); 178 device = pci_get_device(dev); 179 subvendor = pci_get_subvendor(dev); 180 subdevice = pci_get_subdevice(dev); 181 182 spin_lock(&pci_lock); 183 list_for_each_entry(pdrv, &pci_drivers, links) { 184 for (id = pdrv->id_table; id->vendor != 0; id++) { 185 if (vendor == id->vendor && 186 (PCI_ANY_ID == id->device || device == id->device) && 187 (PCI_ANY_ID == id->subvendor || subvendor == id->subvendor) && 188 (PCI_ANY_ID == id->subdevice || subdevice == id->subdevice)) { 189 *idp = id; 190 spin_unlock(&pci_lock); 191 return (pdrv); 192 } 193 } 194 } 195 spin_unlock(&pci_lock); 196 return (NULL); 197 } 198 199 static int 200 linux_pci_probe(device_t dev) 201 { 202 const struct pci_device_id *id; 203 struct pci_driver *pdrv; 204 205 if ((pdrv = linux_pci_find(dev, &id)) == NULL) 206 return (ENXIO); 207 if (device_get_driver(dev) != &pdrv->bsddriver) 208 return (ENXIO); 209 device_set_desc(dev, pdrv->name); 210 return (0); 211 } 212 213 static int 214 linux_pci_attach(device_t dev) 215 { 216 const struct pci_device_id *id; 217 struct pci_driver *pdrv; 218 struct pci_dev *pdev; 219 220 pdrv = linux_pci_find(dev, &id); 221 pdev = device_get_softc(dev); 222 223 MPASS(pdrv != NULL); 224 MPASS(pdev != NULL); 225 226 return (linux_pci_attach_device(dev, pdrv, id, pdev)); 227 } 228 229 int 230 linux_pci_attach_device(device_t dev, struct pci_driver *pdrv, 231 const struct pci_device_id *id, struct pci_dev *pdev) 232 { 233 struct resource_list_entry *rle; 234 struct pci_bus *pbus; 235 struct pci_devinfo *dinfo; 236 device_t parent; 237 int error; 238 239 linux_set_current(curthread); 240 241 if (pdrv != NULL && pdrv->isdrm) { 242 parent = device_get_parent(dev); 243 dinfo = device_get_ivars(parent); 244 device_set_ivars(dev, dinfo); 245 } else { 246 dinfo = device_get_ivars(dev); 247 } 248 249 pdev->dev.parent = &linux_root_device; 250 pdev->dev.bsddev = dev; 251 INIT_LIST_HEAD(&pdev->dev.irqents); 252 pdev->devfn = PCI_DEVFN(pci_get_slot(dev), pci_get_function(dev)); 253 pdev->device = dinfo->cfg.device; 254 pdev->vendor = dinfo->cfg.vendor; 255 pdev->subsystem_vendor = dinfo->cfg.subvendor; 256 pdev->subsystem_device = dinfo->cfg.subdevice; 257 pdev->class = pci_get_class(dev); 258 pdev->revision = pci_get_revid(dev); 259 pdev->pdrv = pdrv; 260 kobject_init(&pdev->dev.kobj, &linux_dev_ktype); 261 kobject_set_name(&pdev->dev.kobj, device_get_nameunit(dev)); 262 kobject_add(&pdev->dev.kobj, &linux_root_device.kobj, 263 kobject_name(&pdev->dev.kobj)); 264 rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 0); 265 if (rle != NULL) 266 pdev->dev.irq = rle->start; 267 else 268 pdev->dev.irq = LINUX_IRQ_INVALID; 269 pdev->irq = pdev->dev.irq; 270 error = linux_pdev_dma_init(pdev); 271 if (error) 272 goto out_dma_init; 273 274 TAILQ_INIT(&pdev->mmio); 275 pbus = malloc(sizeof(*pbus), M_DEVBUF, M_WAITOK | M_ZERO); 276 pbus->self = pdev; 277 pbus->number = pci_get_bus(dev); 278 pbus->domain = pci_get_domain(dev); 279 pdev->bus = pbus; 280 281 spin_lock(&pci_lock); 282 list_add(&pdev->links, &pci_devices); 283 spin_unlock(&pci_lock); 284 285 if (pdrv != NULL) { 286 error = pdrv->probe(pdev, id); 287 if (error) 288 goto out_probe; 289 } 290 return (0); 291 292 out_probe: 293 free(pdev->bus, M_DEVBUF); 294 linux_pdev_dma_uninit(pdev); 295 out_dma_init: 296 spin_lock(&pci_lock); 297 list_del(&pdev->links); 298 spin_unlock(&pci_lock); 299 put_device(&pdev->dev); 300 return (-error); 301 } 302 303 static int 304 linux_pci_detach(device_t dev) 305 { 306 struct pci_dev *pdev; 307 308 pdev = device_get_softc(dev); 309 310 MPASS(pdev != NULL); 311 312 device_set_desc(dev, NULL); 313 314 return (linux_pci_detach_device(pdev)); 315 } 316 317 int 318 linux_pci_detach_device(struct pci_dev *pdev) 319 { 320 321 linux_set_current(curthread); 322 323 if (pdev->pdrv != NULL) 324 pdev->pdrv->remove(pdev); 325 326 free(pdev->bus, M_DEVBUF); 327 linux_pdev_dma_uninit(pdev); 328 329 spin_lock(&pci_lock); 330 list_del(&pdev->links); 331 spin_unlock(&pci_lock); 332 put_device(&pdev->dev); 333 334 return (0); 335 } 336 337 static int 338 linux_pci_suspend(device_t dev) 339 { 340 const struct dev_pm_ops *pmops; 341 struct pm_message pm = { }; 342 struct pci_dev *pdev; 343 int error; 344 345 error = 0; 346 linux_set_current(curthread); 347 pdev = device_get_softc(dev); 348 pmops = pdev->pdrv->driver.pm; 349 350 if (pdev->pdrv->suspend != NULL) 351 error = -pdev->pdrv->suspend(pdev, pm); 352 else if (pmops != NULL && pmops->suspend != NULL) { 353 error = -pmops->suspend(&pdev->dev); 354 if (error == 0 && pmops->suspend_late != NULL) 355 error = -pmops->suspend_late(&pdev->dev); 356 } 357 return (error); 358 } 359 360 static int 361 linux_pci_resume(device_t dev) 362 { 363 const struct dev_pm_ops *pmops; 364 struct pci_dev *pdev; 365 int error; 366 367 error = 0; 368 linux_set_current(curthread); 369 pdev = device_get_softc(dev); 370 pmops = pdev->pdrv->driver.pm; 371 372 if (pdev->pdrv->resume != NULL) 373 error = -pdev->pdrv->resume(pdev); 374 else if (pmops != NULL && pmops->resume != NULL) { 375 if (pmops->resume_early != NULL) 376 error = -pmops->resume_early(&pdev->dev); 377 if (error == 0 && pmops->resume != NULL) 378 error = -pmops->resume(&pdev->dev); 379 } 380 return (error); 381 } 382 383 static int 384 linux_pci_shutdown(device_t dev) 385 { 386 struct pci_dev *pdev; 387 388 linux_set_current(curthread); 389 pdev = device_get_softc(dev); 390 if (pdev->pdrv->shutdown != NULL) 391 pdev->pdrv->shutdown(pdev); 392 return (0); 393 } 394 395 static int 396 linux_pci_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *pf_config) 397 { 398 struct pci_dev *pdev; 399 int error; 400 401 linux_set_current(curthread); 402 pdev = device_get_softc(dev); 403 if (pdev->pdrv->bsd_iov_init != NULL) 404 error = pdev->pdrv->bsd_iov_init(dev, num_vfs, pf_config); 405 else 406 error = EINVAL; 407 return (error); 408 } 409 410 static void 411 linux_pci_iov_uninit(device_t dev) 412 { 413 struct pci_dev *pdev; 414 415 linux_set_current(curthread); 416 pdev = device_get_softc(dev); 417 if (pdev->pdrv->bsd_iov_uninit != NULL) 418 pdev->pdrv->bsd_iov_uninit(dev); 419 } 420 421 static int 422 linux_pci_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *vf_config) 423 { 424 struct pci_dev *pdev; 425 int error; 426 427 linux_set_current(curthread); 428 pdev = device_get_softc(dev); 429 if (pdev->pdrv->bsd_iov_add_vf != NULL) 430 error = pdev->pdrv->bsd_iov_add_vf(dev, vfnum, vf_config); 431 else 432 error = EINVAL; 433 return (error); 434 } 435 436 static int 437 _linux_pci_register_driver(struct pci_driver *pdrv, devclass_t dc) 438 { 439 int error; 440 441 linux_set_current(curthread); 442 spin_lock(&pci_lock); 443 list_add(&pdrv->links, &pci_drivers); 444 spin_unlock(&pci_lock); 445 pdrv->bsddriver.name = pdrv->name; 446 pdrv->bsddriver.methods = pci_methods; 447 pdrv->bsddriver.size = sizeof(struct pci_dev); 448 449 mtx_lock(&Giant); 450 error = devclass_add_driver(dc, &pdrv->bsddriver, 451 BUS_PASS_DEFAULT, &pdrv->bsdclass); 452 mtx_unlock(&Giant); 453 return (-error); 454 } 455 456 int 457 linux_pci_register_driver(struct pci_driver *pdrv) 458 { 459 devclass_t dc; 460 461 dc = devclass_find("pci"); 462 if (dc == NULL) 463 return (-ENXIO); 464 pdrv->isdrm = false; 465 return (_linux_pci_register_driver(pdrv, dc)); 466 } 467 468 unsigned long 469 pci_resource_start(struct pci_dev *pdev, int bar) 470 { 471 struct resource_list_entry *rle; 472 rman_res_t newstart; 473 device_t dev; 474 475 if ((rle = linux_pci_get_bar(pdev, bar)) == NULL) 476 return (0); 477 dev = pci_find_dbsf(pdev->bus->domain, pdev->bus->number, 478 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); 479 MPASS(dev != NULL); 480 if (BUS_TRANSLATE_RESOURCE(dev, rle->type, rle->start, &newstart)) { 481 device_printf(pdev->dev.bsddev, "translate of %#jx failed\n", 482 (uintmax_t)rle->start); 483 return (0); 484 } 485 return (newstart); 486 } 487 488 unsigned long 489 pci_resource_len(struct pci_dev *pdev, int bar) 490 { 491 struct resource_list_entry *rle; 492 493 if ((rle = linux_pci_get_bar(pdev, bar)) == NULL) 494 return (0); 495 return (rle->count); 496 } 497 498 int 499 linux_pci_register_drm_driver(struct pci_driver *pdrv) 500 { 501 devclass_t dc; 502 503 dc = devclass_create("vgapci"); 504 if (dc == NULL) 505 return (-ENXIO); 506 pdrv->isdrm = true; 507 pdrv->name = "drmn"; 508 return (_linux_pci_register_driver(pdrv, dc)); 509 } 510 511 void 512 linux_pci_unregister_driver(struct pci_driver *pdrv) 513 { 514 devclass_t bus; 515 516 bus = devclass_find("pci"); 517 518 spin_lock(&pci_lock); 519 list_del(&pdrv->links); 520 spin_unlock(&pci_lock); 521 mtx_lock(&Giant); 522 if (bus != NULL) 523 devclass_delete_driver(bus, &pdrv->bsddriver); 524 mtx_unlock(&Giant); 525 } 526 527 void 528 linux_pci_unregister_drm_driver(struct pci_driver *pdrv) 529 { 530 devclass_t bus; 531 532 bus = devclass_find("vgapci"); 533 534 spin_lock(&pci_lock); 535 list_del(&pdrv->links); 536 spin_unlock(&pci_lock); 537 mtx_lock(&Giant); 538 if (bus != NULL) 539 devclass_delete_driver(bus, &pdrv->bsddriver); 540 mtx_unlock(&Giant); 541 } 542 543 CTASSERT(sizeof(dma_addr_t) <= sizeof(uint64_t)); 544 545 struct linux_dma_obj { 546 void *vaddr; 547 uint64_t dma_addr; 548 bus_dmamap_t dmamap; 549 }; 550 551 static uma_zone_t linux_dma_trie_zone; 552 static uma_zone_t linux_dma_obj_zone; 553 554 static void 555 linux_dma_init(void *arg) 556 { 557 558 linux_dma_trie_zone = uma_zcreate("linux_dma_pctrie", 559 pctrie_node_size(), NULL, NULL, pctrie_zone_init, NULL, 560 UMA_ALIGN_PTR, 0); 561 linux_dma_obj_zone = uma_zcreate("linux_dma_object", 562 sizeof(struct linux_dma_obj), NULL, NULL, NULL, NULL, 563 UMA_ALIGN_PTR, 0); 564 565 } 566 SYSINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_init, NULL); 567 568 static void 569 linux_dma_uninit(void *arg) 570 { 571 572 uma_zdestroy(linux_dma_obj_zone); 573 uma_zdestroy(linux_dma_trie_zone); 574 } 575 SYSUNINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_uninit, NULL); 576 577 static void * 578 linux_dma_trie_alloc(struct pctrie *ptree) 579 { 580 581 return (uma_zalloc(linux_dma_trie_zone, M_NOWAIT)); 582 } 583 584 static void 585 linux_dma_trie_free(struct pctrie *ptree, void *node) 586 { 587 588 uma_zfree(linux_dma_trie_zone, node); 589 } 590 591 592 PCTRIE_DEFINE(LINUX_DMA, linux_dma_obj, dma_addr, linux_dma_trie_alloc, 593 linux_dma_trie_free); 594 595 void * 596 linux_dma_alloc_coherent(struct device *dev, size_t size, 597 dma_addr_t *dma_handle, gfp_t flag) 598 { 599 struct linux_dma_priv *priv; 600 vm_paddr_t high; 601 size_t align; 602 void *mem; 603 604 if (dev == NULL || dev->dma_priv == NULL) { 605 *dma_handle = 0; 606 return (NULL); 607 } 608 priv = dev->dma_priv; 609 if (priv->dma_mask) 610 high = priv->dma_mask; 611 else if (flag & GFP_DMA32) 612 high = BUS_SPACE_MAXADDR_32BIT; 613 else 614 high = BUS_SPACE_MAXADDR; 615 align = PAGE_SIZE << get_order(size); 616 mem = (void *)kmem_alloc_contig(size, flag, 0, high, align, 0, 617 VM_MEMATTR_DEFAULT); 618 if (mem != NULL) { 619 *dma_handle = linux_dma_map_phys(dev, vtophys(mem), size); 620 if (*dma_handle == 0) { 621 kmem_free((vm_offset_t)mem, size); 622 mem = NULL; 623 } 624 } else { 625 *dma_handle = 0; 626 } 627 return (mem); 628 } 629 630 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 631 dma_addr_t 632 linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len) 633 { 634 struct linux_dma_priv *priv; 635 struct linux_dma_obj *obj; 636 int error, nseg; 637 bus_dma_segment_t seg; 638 639 priv = dev->dma_priv; 640 641 /* 642 * If the resultant mapping will be entirely 1:1 with the 643 * physical address, short-circuit the remainder of the 644 * bus_dma API. This avoids tracking collisions in the pctrie 645 * with the additional benefit of reducing overhead. 646 */ 647 if (bus_dma_id_mapped(priv->dmat, phys, len)) 648 return (phys); 649 650 obj = uma_zalloc(linux_dma_obj_zone, M_NOWAIT); 651 if (obj == NULL) { 652 return (0); 653 } 654 655 DMA_PRIV_LOCK(priv); 656 if (bus_dmamap_create(priv->dmat, 0, &obj->dmamap) != 0) { 657 DMA_PRIV_UNLOCK(priv); 658 uma_zfree(linux_dma_obj_zone, obj); 659 return (0); 660 } 661 662 nseg = -1; 663 if (_bus_dmamap_load_phys(priv->dmat, obj->dmamap, phys, len, 664 BUS_DMA_NOWAIT, &seg, &nseg) != 0) { 665 bus_dmamap_destroy(priv->dmat, obj->dmamap); 666 DMA_PRIV_UNLOCK(priv); 667 uma_zfree(linux_dma_obj_zone, obj); 668 return (0); 669 } 670 671 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); 672 obj->dma_addr = seg.ds_addr; 673 674 error = LINUX_DMA_PCTRIE_INSERT(&priv->ptree, obj); 675 if (error != 0) { 676 bus_dmamap_unload(priv->dmat, obj->dmamap); 677 bus_dmamap_destroy(priv->dmat, obj->dmamap); 678 DMA_PRIV_UNLOCK(priv); 679 uma_zfree(linux_dma_obj_zone, obj); 680 return (0); 681 } 682 DMA_PRIV_UNLOCK(priv); 683 return (obj->dma_addr); 684 } 685 #else 686 dma_addr_t 687 linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len) 688 { 689 return (phys); 690 } 691 #endif 692 693 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 694 void 695 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len) 696 { 697 struct linux_dma_priv *priv; 698 struct linux_dma_obj *obj; 699 700 priv = dev->dma_priv; 701 702 if (pctrie_is_empty(&priv->ptree)) 703 return; 704 705 DMA_PRIV_LOCK(priv); 706 obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr); 707 if (obj == NULL) { 708 DMA_PRIV_UNLOCK(priv); 709 return; 710 } 711 LINUX_DMA_PCTRIE_REMOVE(&priv->ptree, dma_addr); 712 bus_dmamap_unload(priv->dmat, obj->dmamap); 713 bus_dmamap_destroy(priv->dmat, obj->dmamap); 714 DMA_PRIV_UNLOCK(priv); 715 716 uma_zfree(linux_dma_obj_zone, obj); 717 } 718 #else 719 void 720 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len) 721 { 722 } 723 #endif 724 725 int 726 linux_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents, 727 enum dma_data_direction dir, struct dma_attrs *attrs) 728 { 729 struct linux_dma_priv *priv; 730 struct scatterlist *sg; 731 int i, nseg; 732 bus_dma_segment_t seg; 733 734 priv = dev->dma_priv; 735 736 DMA_PRIV_LOCK(priv); 737 738 /* create common DMA map in the first S/G entry */ 739 if (bus_dmamap_create(priv->dmat, 0, &sgl->dma_map) != 0) { 740 DMA_PRIV_UNLOCK(priv); 741 return (0); 742 } 743 744 /* load all S/G list entries */ 745 for_each_sg(sgl, sg, nents, i) { 746 nseg = -1; 747 if (_bus_dmamap_load_phys(priv->dmat, sgl->dma_map, 748 sg_phys(sg), sg->length, BUS_DMA_NOWAIT, 749 &seg, &nseg) != 0) { 750 bus_dmamap_unload(priv->dmat, sgl->dma_map); 751 bus_dmamap_destroy(priv->dmat, sgl->dma_map); 752 DMA_PRIV_UNLOCK(priv); 753 return (0); 754 } 755 KASSERT(nseg == 0, 756 ("More than one segment (nseg=%d)", nseg + 1)); 757 758 sg_dma_address(sg) = seg.ds_addr; 759 } 760 DMA_PRIV_UNLOCK(priv); 761 762 return (nents); 763 } 764 765 void 766 linux_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, 767 int nents, enum dma_data_direction dir, struct dma_attrs *attrs) 768 { 769 struct linux_dma_priv *priv; 770 771 priv = dev->dma_priv; 772 773 DMA_PRIV_LOCK(priv); 774 bus_dmamap_unload(priv->dmat, sgl->dma_map); 775 bus_dmamap_destroy(priv->dmat, sgl->dma_map); 776 DMA_PRIV_UNLOCK(priv); 777 } 778 779 struct dma_pool { 780 struct device *pool_device; 781 uma_zone_t pool_zone; 782 struct mtx pool_lock; 783 bus_dma_tag_t pool_dmat; 784 size_t pool_entry_size; 785 struct pctrie pool_ptree; 786 }; 787 788 #define DMA_POOL_LOCK(pool) mtx_lock(&(pool)->pool_lock) 789 #define DMA_POOL_UNLOCK(pool) mtx_unlock(&(pool)->pool_lock) 790 791 static inline int 792 dma_pool_obj_ctor(void *mem, int size, void *arg, int flags) 793 { 794 struct linux_dma_obj *obj = mem; 795 struct dma_pool *pool = arg; 796 int error, nseg; 797 bus_dma_segment_t seg; 798 799 nseg = -1; 800 DMA_POOL_LOCK(pool); 801 error = _bus_dmamap_load_phys(pool->pool_dmat, obj->dmamap, 802 vtophys(obj->vaddr), pool->pool_entry_size, BUS_DMA_NOWAIT, 803 &seg, &nseg); 804 DMA_POOL_UNLOCK(pool); 805 if (error != 0) { 806 return (error); 807 } 808 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); 809 obj->dma_addr = seg.ds_addr; 810 811 return (0); 812 } 813 814 static void 815 dma_pool_obj_dtor(void *mem, int size, void *arg) 816 { 817 struct linux_dma_obj *obj = mem; 818 struct dma_pool *pool = arg; 819 820 DMA_POOL_LOCK(pool); 821 bus_dmamap_unload(pool->pool_dmat, obj->dmamap); 822 DMA_POOL_UNLOCK(pool); 823 } 824 825 static int 826 dma_pool_obj_import(void *arg, void **store, int count, int domain __unused, 827 int flags) 828 { 829 struct dma_pool *pool = arg; 830 struct linux_dma_priv *priv; 831 struct linux_dma_obj *obj; 832 int error, i; 833 834 priv = pool->pool_device->dma_priv; 835 for (i = 0; i < count; i++) { 836 obj = uma_zalloc(linux_dma_obj_zone, flags); 837 if (obj == NULL) 838 break; 839 840 error = bus_dmamem_alloc(pool->pool_dmat, &obj->vaddr, 841 BUS_DMA_NOWAIT, &obj->dmamap); 842 if (error!= 0) { 843 uma_zfree(linux_dma_obj_zone, obj); 844 break; 845 } 846 847 store[i] = obj; 848 } 849 850 return (i); 851 } 852 853 static void 854 dma_pool_obj_release(void *arg, void **store, int count) 855 { 856 struct dma_pool *pool = arg; 857 struct linux_dma_priv *priv; 858 struct linux_dma_obj *obj; 859 int i; 860 861 priv = pool->pool_device->dma_priv; 862 for (i = 0; i < count; i++) { 863 obj = store[i]; 864 bus_dmamem_free(pool->pool_dmat, obj->vaddr, obj->dmamap); 865 uma_zfree(linux_dma_obj_zone, obj); 866 } 867 } 868 869 struct dma_pool * 870 linux_dma_pool_create(char *name, struct device *dev, size_t size, 871 size_t align, size_t boundary) 872 { 873 struct linux_dma_priv *priv; 874 struct dma_pool *pool; 875 876 priv = dev->dma_priv; 877 878 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 879 pool->pool_device = dev; 880 pool->pool_entry_size = size; 881 882 if (bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 883 align, boundary, /* alignment, boundary */ 884 priv->dma_mask, /* lowaddr */ 885 BUS_SPACE_MAXADDR, /* highaddr */ 886 NULL, NULL, /* filtfunc, filtfuncarg */ 887 size, /* maxsize */ 888 1, /* nsegments */ 889 size, /* maxsegsz */ 890 0, /* flags */ 891 NULL, NULL, /* lockfunc, lockfuncarg */ 892 &pool->pool_dmat)) { 893 kfree(pool); 894 return (NULL); 895 } 896 897 pool->pool_zone = uma_zcache_create(name, -1, dma_pool_obj_ctor, 898 dma_pool_obj_dtor, NULL, NULL, dma_pool_obj_import, 899 dma_pool_obj_release, pool, 0); 900 901 mtx_init(&pool->pool_lock, "lkpi-dma-pool", NULL, MTX_DEF); 902 pctrie_init(&pool->pool_ptree); 903 904 return (pool); 905 } 906 907 void 908 linux_dma_pool_destroy(struct dma_pool *pool) 909 { 910 911 uma_zdestroy(pool->pool_zone); 912 bus_dma_tag_destroy(pool->pool_dmat); 913 mtx_destroy(&pool->pool_lock); 914 kfree(pool); 915 } 916 917 void * 918 linux_dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, 919 dma_addr_t *handle) 920 { 921 struct linux_dma_obj *obj; 922 923 obj = uma_zalloc_arg(pool->pool_zone, pool, mem_flags); 924 if (obj == NULL) 925 return (NULL); 926 927 DMA_POOL_LOCK(pool); 928 if (LINUX_DMA_PCTRIE_INSERT(&pool->pool_ptree, obj) != 0) { 929 DMA_POOL_UNLOCK(pool); 930 uma_zfree_arg(pool->pool_zone, obj, pool); 931 return (NULL); 932 } 933 DMA_POOL_UNLOCK(pool); 934 935 *handle = obj->dma_addr; 936 return (obj->vaddr); 937 } 938 939 void 940 linux_dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma_addr) 941 { 942 struct linux_dma_obj *obj; 943 944 DMA_POOL_LOCK(pool); 945 obj = LINUX_DMA_PCTRIE_LOOKUP(&pool->pool_ptree, dma_addr); 946 if (obj == NULL) { 947 DMA_POOL_UNLOCK(pool); 948 return; 949 } 950 LINUX_DMA_PCTRIE_REMOVE(&pool->pool_ptree, dma_addr); 951 DMA_POOL_UNLOCK(pool); 952 953 uma_zfree_arg(pool->pool_zone, obj, pool); 954 } 955