1 /*- 2 * Copyright (c) 2015-2016 Mellanox Technologies, Ltd. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/bus.h> 33 #include <sys/malloc.h> 34 #include <sys/kernel.h> 35 #include <sys/sysctl.h> 36 #include <sys/lock.h> 37 #include <sys/mutex.h> 38 #include <sys/fcntl.h> 39 #include <sys/file.h> 40 #include <sys/filio.h> 41 #include <sys/pciio.h> 42 #include <sys/pctrie.h> 43 #include <sys/rwlock.h> 44 45 #include <vm/vm.h> 46 #include <vm/pmap.h> 47 48 #include <machine/stdarg.h> 49 50 #include <dev/pci/pcivar.h> 51 #include <dev/pci/pci_private.h> 52 #include <dev/pci/pci_iov.h> 53 54 #include <linux/kobject.h> 55 #include <linux/device.h> 56 #include <linux/slab.h> 57 #include <linux/module.h> 58 #include <linux/cdev.h> 59 #include <linux/file.h> 60 #include <linux/sysfs.h> 61 #include <linux/mm.h> 62 #include <linux/io.h> 63 #include <linux/vmalloc.h> 64 #include <linux/pci.h> 65 #include <linux/compat.h> 66 67 static device_probe_t linux_pci_probe; 68 static device_attach_t linux_pci_attach; 69 static device_detach_t linux_pci_detach; 70 static device_suspend_t linux_pci_suspend; 71 static device_resume_t linux_pci_resume; 72 static device_shutdown_t linux_pci_shutdown; 73 static pci_iov_init_t linux_pci_iov_init; 74 static pci_iov_uninit_t linux_pci_iov_uninit; 75 static pci_iov_add_vf_t linux_pci_iov_add_vf; 76 77 static device_method_t pci_methods[] = { 78 DEVMETHOD(device_probe, linux_pci_probe), 79 DEVMETHOD(device_attach, linux_pci_attach), 80 DEVMETHOD(device_detach, linux_pci_detach), 81 DEVMETHOD(device_suspend, linux_pci_suspend), 82 DEVMETHOD(device_resume, linux_pci_resume), 83 DEVMETHOD(device_shutdown, linux_pci_shutdown), 84 DEVMETHOD(pci_iov_init, linux_pci_iov_init), 85 DEVMETHOD(pci_iov_uninit, linux_pci_iov_uninit), 86 DEVMETHOD(pci_iov_add_vf, linux_pci_iov_add_vf), 87 DEVMETHOD_END 88 }; 89 90 struct linux_dma_priv { 91 uint64_t dma_mask; 92 struct mtx lock; 93 bus_dma_tag_t dmat; 94 struct pctrie ptree; 95 }; 96 #define DMA_PRIV_LOCK(priv) mtx_lock(&(priv)->lock) 97 #define DMA_PRIV_UNLOCK(priv) mtx_unlock(&(priv)->lock) 98 99 static int 100 linux_pdev_dma_init(struct pci_dev *pdev) 101 { 102 struct linux_dma_priv *priv; 103 int error; 104 105 priv = malloc(sizeof(*priv), M_DEVBUF, M_WAITOK | M_ZERO); 106 pdev->dev.dma_priv = priv; 107 108 mtx_init(&priv->lock, "lkpi-priv-dma", NULL, MTX_DEF); 109 110 pctrie_init(&priv->ptree); 111 112 /* create a default DMA tag */ 113 error = linux_dma_tag_init(&pdev->dev, DMA_BIT_MASK(64)); 114 if (error) { 115 mtx_destroy(&priv->lock); 116 free(priv, M_DEVBUF); 117 pdev->dev.dma_priv = NULL; 118 } 119 return (error); 120 } 121 122 static int 123 linux_pdev_dma_uninit(struct pci_dev *pdev) 124 { 125 struct linux_dma_priv *priv; 126 127 priv = pdev->dev.dma_priv; 128 if (priv->dmat) 129 bus_dma_tag_destroy(priv->dmat); 130 mtx_destroy(&priv->lock); 131 free(priv, M_DEVBUF); 132 pdev->dev.dma_priv = NULL; 133 return (0); 134 } 135 136 int 137 linux_dma_tag_init(struct device *dev, u64 dma_mask) 138 { 139 struct linux_dma_priv *priv; 140 int error; 141 142 priv = dev->dma_priv; 143 144 if (priv->dmat) { 145 if (priv->dma_mask == dma_mask) 146 return (0); 147 148 bus_dma_tag_destroy(priv->dmat); 149 } 150 151 priv->dma_mask = dma_mask; 152 153 error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 154 1, 0, /* alignment, boundary */ 155 dma_mask, /* lowaddr */ 156 BUS_SPACE_MAXADDR, /* highaddr */ 157 NULL, NULL, /* filtfunc, filtfuncarg */ 158 BUS_SPACE_MAXSIZE, /* maxsize */ 159 1, /* nsegments */ 160 BUS_SPACE_MAXSIZE, /* maxsegsz */ 161 0, /* flags */ 162 NULL, NULL, /* lockfunc, lockfuncarg */ 163 &priv->dmat); 164 return (-error); 165 } 166 167 static struct pci_driver * 168 linux_pci_find(device_t dev, const struct pci_device_id **idp) 169 { 170 const struct pci_device_id *id; 171 struct pci_driver *pdrv; 172 uint16_t vendor; 173 uint16_t device; 174 uint16_t subvendor; 175 uint16_t subdevice; 176 177 vendor = pci_get_vendor(dev); 178 device = pci_get_device(dev); 179 subvendor = pci_get_subvendor(dev); 180 subdevice = pci_get_subdevice(dev); 181 182 spin_lock(&pci_lock); 183 list_for_each_entry(pdrv, &pci_drivers, links) { 184 for (id = pdrv->id_table; id->vendor != 0; id++) { 185 if (vendor == id->vendor && 186 (PCI_ANY_ID == id->device || device == id->device) && 187 (PCI_ANY_ID == id->subvendor || subvendor == id->subvendor) && 188 (PCI_ANY_ID == id->subdevice || subdevice == id->subdevice)) { 189 *idp = id; 190 spin_unlock(&pci_lock); 191 return (pdrv); 192 } 193 } 194 } 195 spin_unlock(&pci_lock); 196 return (NULL); 197 } 198 199 static int 200 linux_pci_probe(device_t dev) 201 { 202 const struct pci_device_id *id; 203 struct pci_driver *pdrv; 204 205 if ((pdrv = linux_pci_find(dev, &id)) == NULL) 206 return (ENXIO); 207 if (device_get_driver(dev) != &pdrv->bsddriver) 208 return (ENXIO); 209 device_set_desc(dev, pdrv->name); 210 return (0); 211 } 212 213 static int 214 linux_pci_attach(device_t dev) 215 { 216 struct resource_list_entry *rle; 217 struct pci_bus *pbus; 218 struct pci_dev *pdev; 219 struct pci_devinfo *dinfo; 220 struct pci_driver *pdrv; 221 const struct pci_device_id *id; 222 device_t parent; 223 int error; 224 225 linux_set_current(curthread); 226 227 pdrv = linux_pci_find(dev, &id); 228 pdev = device_get_softc(dev); 229 230 parent = device_get_parent(dev); 231 if (pdrv->isdrm) { 232 dinfo = device_get_ivars(parent); 233 device_set_ivars(dev, dinfo); 234 } else { 235 dinfo = device_get_ivars(dev); 236 } 237 238 pdev->dev.parent = &linux_root_device; 239 pdev->dev.bsddev = dev; 240 INIT_LIST_HEAD(&pdev->dev.irqents); 241 pdev->devfn = PCI_DEVFN(pci_get_slot(dev), pci_get_function(dev)); 242 pdev->device = dinfo->cfg.device; 243 pdev->vendor = dinfo->cfg.vendor; 244 pdev->subsystem_vendor = dinfo->cfg.subvendor; 245 pdev->subsystem_device = dinfo->cfg.subdevice; 246 pdev->class = pci_get_class(dev); 247 pdev->revision = pci_get_revid(dev); 248 pdev->pdrv = pdrv; 249 kobject_init(&pdev->dev.kobj, &linux_dev_ktype); 250 kobject_set_name(&pdev->dev.kobj, device_get_nameunit(dev)); 251 kobject_add(&pdev->dev.kobj, &linux_root_device.kobj, 252 kobject_name(&pdev->dev.kobj)); 253 rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 0); 254 if (rle != NULL) 255 pdev->dev.irq = rle->start; 256 else 257 pdev->dev.irq = LINUX_IRQ_INVALID; 258 pdev->irq = pdev->dev.irq; 259 error = linux_pdev_dma_init(pdev); 260 if (error) 261 goto out_dma_init; 262 263 pbus = malloc(sizeof(*pbus), M_DEVBUF, M_WAITOK | M_ZERO); 264 pbus->self = pdev; 265 pbus->number = pci_get_bus(dev); 266 pbus->domain = pci_get_domain(dev); 267 pdev->bus = pbus; 268 269 spin_lock(&pci_lock); 270 list_add(&pdev->links, &pci_devices); 271 spin_unlock(&pci_lock); 272 273 error = pdrv->probe(pdev, id); 274 if (error) 275 goto out_probe; 276 return (0); 277 278 out_probe: 279 free(pdev->bus, M_DEVBUF); 280 linux_pdev_dma_uninit(pdev); 281 out_dma_init: 282 spin_lock(&pci_lock); 283 list_del(&pdev->links); 284 spin_unlock(&pci_lock); 285 put_device(&pdev->dev); 286 return (-error); 287 } 288 289 static int 290 linux_pci_detach(device_t dev) 291 { 292 struct pci_dev *pdev; 293 294 linux_set_current(curthread); 295 pdev = device_get_softc(dev); 296 297 pdev->pdrv->remove(pdev); 298 299 free(pdev->bus, M_DEVBUF); 300 linux_pdev_dma_uninit(pdev); 301 302 spin_lock(&pci_lock); 303 list_del(&pdev->links); 304 spin_unlock(&pci_lock); 305 device_set_desc(dev, NULL); 306 put_device(&pdev->dev); 307 308 return (0); 309 } 310 311 static int 312 linux_pci_suspend(device_t dev) 313 { 314 const struct dev_pm_ops *pmops; 315 struct pm_message pm = { }; 316 struct pci_dev *pdev; 317 int error; 318 319 error = 0; 320 linux_set_current(curthread); 321 pdev = device_get_softc(dev); 322 pmops = pdev->pdrv->driver.pm; 323 324 if (pdev->pdrv->suspend != NULL) 325 error = -pdev->pdrv->suspend(pdev, pm); 326 else if (pmops != NULL && pmops->suspend != NULL) { 327 error = -pmops->suspend(&pdev->dev); 328 if (error == 0 && pmops->suspend_late != NULL) 329 error = -pmops->suspend_late(&pdev->dev); 330 } 331 return (error); 332 } 333 334 static int 335 linux_pci_resume(device_t dev) 336 { 337 const struct dev_pm_ops *pmops; 338 struct pci_dev *pdev; 339 int error; 340 341 error = 0; 342 linux_set_current(curthread); 343 pdev = device_get_softc(dev); 344 pmops = pdev->pdrv->driver.pm; 345 346 if (pdev->pdrv->resume != NULL) 347 error = -pdev->pdrv->resume(pdev); 348 else if (pmops != NULL && pmops->resume != NULL) { 349 if (pmops->resume_early != NULL) 350 error = -pmops->resume_early(&pdev->dev); 351 if (error == 0 && pmops->resume != NULL) 352 error = -pmops->resume(&pdev->dev); 353 } 354 return (error); 355 } 356 357 static int 358 linux_pci_shutdown(device_t dev) 359 { 360 struct pci_dev *pdev; 361 362 linux_set_current(curthread); 363 pdev = device_get_softc(dev); 364 if (pdev->pdrv->shutdown != NULL) 365 pdev->pdrv->shutdown(pdev); 366 return (0); 367 } 368 369 static int 370 linux_pci_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *pf_config) 371 { 372 struct pci_dev *pdev; 373 int error; 374 375 linux_set_current(curthread); 376 pdev = device_get_softc(dev); 377 if (pdev->pdrv->bsd_iov_init != NULL) 378 error = pdev->pdrv->bsd_iov_init(dev, num_vfs, pf_config); 379 else 380 error = EINVAL; 381 return (error); 382 } 383 384 static void 385 linux_pci_iov_uninit(device_t dev) 386 { 387 struct pci_dev *pdev; 388 389 linux_set_current(curthread); 390 pdev = device_get_softc(dev); 391 if (pdev->pdrv->bsd_iov_uninit != NULL) 392 pdev->pdrv->bsd_iov_uninit(dev); 393 } 394 395 static int 396 linux_pci_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *vf_config) 397 { 398 struct pci_dev *pdev; 399 int error; 400 401 linux_set_current(curthread); 402 pdev = device_get_softc(dev); 403 if (pdev->pdrv->bsd_iov_add_vf != NULL) 404 error = pdev->pdrv->bsd_iov_add_vf(dev, vfnum, vf_config); 405 else 406 error = EINVAL; 407 return (error); 408 } 409 410 static int 411 _linux_pci_register_driver(struct pci_driver *pdrv, devclass_t dc) 412 { 413 int error; 414 415 linux_set_current(curthread); 416 spin_lock(&pci_lock); 417 list_add(&pdrv->links, &pci_drivers); 418 spin_unlock(&pci_lock); 419 pdrv->bsddriver.name = pdrv->name; 420 pdrv->bsddriver.methods = pci_methods; 421 pdrv->bsddriver.size = sizeof(struct pci_dev); 422 423 mtx_lock(&Giant); 424 error = devclass_add_driver(dc, &pdrv->bsddriver, 425 BUS_PASS_DEFAULT, &pdrv->bsdclass); 426 mtx_unlock(&Giant); 427 return (-error); 428 } 429 430 int 431 linux_pci_register_driver(struct pci_driver *pdrv) 432 { 433 devclass_t dc; 434 435 dc = devclass_find("pci"); 436 if (dc == NULL) 437 return (-ENXIO); 438 pdrv->isdrm = false; 439 return (_linux_pci_register_driver(pdrv, dc)); 440 } 441 442 unsigned long 443 pci_resource_start(struct pci_dev *pdev, int bar) 444 { 445 struct resource_list_entry *rle; 446 rman_res_t newstart; 447 device_t dev; 448 449 if ((rle = linux_pci_get_bar(pdev, bar)) == NULL) 450 return (0); 451 dev = pci_find_dbsf(pdev->bus->domain, pdev->bus->number, 452 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); 453 MPASS(dev != NULL); 454 if (BUS_TRANSLATE_RESOURCE(dev, rle->type, rle->start, &newstart)) { 455 device_printf(pdev->dev.bsddev, "translate of %#jx failed\n", 456 (uintmax_t)rle->start); 457 return (0); 458 } 459 return (newstart); 460 } 461 462 unsigned long 463 pci_resource_len(struct pci_dev *pdev, int bar) 464 { 465 struct resource_list_entry *rle; 466 467 if ((rle = linux_pci_get_bar(pdev, bar)) == NULL) 468 return (0); 469 return (rle->count); 470 } 471 472 int 473 linux_pci_register_drm_driver(struct pci_driver *pdrv) 474 { 475 devclass_t dc; 476 477 dc = devclass_create("vgapci"); 478 if (dc == NULL) 479 return (-ENXIO); 480 pdrv->isdrm = true; 481 pdrv->name = "drmn"; 482 return (_linux_pci_register_driver(pdrv, dc)); 483 } 484 485 void 486 linux_pci_unregister_driver(struct pci_driver *pdrv) 487 { 488 devclass_t bus; 489 490 bus = devclass_find("pci"); 491 492 spin_lock(&pci_lock); 493 list_del(&pdrv->links); 494 spin_unlock(&pci_lock); 495 mtx_lock(&Giant); 496 if (bus != NULL) 497 devclass_delete_driver(bus, &pdrv->bsddriver); 498 mtx_unlock(&Giant); 499 } 500 501 void 502 linux_pci_unregister_drm_driver(struct pci_driver *pdrv) 503 { 504 devclass_t bus; 505 506 bus = devclass_find("vgapci"); 507 508 spin_lock(&pci_lock); 509 list_del(&pdrv->links); 510 spin_unlock(&pci_lock); 511 mtx_lock(&Giant); 512 if (bus != NULL) 513 devclass_delete_driver(bus, &pdrv->bsddriver); 514 mtx_unlock(&Giant); 515 } 516 517 CTASSERT(sizeof(dma_addr_t) <= sizeof(uint64_t)); 518 519 struct linux_dma_obj { 520 void *vaddr; 521 uint64_t dma_addr; 522 bus_dmamap_t dmamap; 523 }; 524 525 static uma_zone_t linux_dma_trie_zone; 526 static uma_zone_t linux_dma_obj_zone; 527 528 static void 529 linux_dma_init(void *arg) 530 { 531 532 linux_dma_trie_zone = uma_zcreate("linux_dma_pctrie", 533 pctrie_node_size(), NULL, NULL, pctrie_zone_init, NULL, 534 UMA_ALIGN_PTR, 0); 535 linux_dma_obj_zone = uma_zcreate("linux_dma_object", 536 sizeof(struct linux_dma_obj), NULL, NULL, NULL, NULL, 537 UMA_ALIGN_PTR, 0); 538 539 } 540 SYSINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_init, NULL); 541 542 static void 543 linux_dma_uninit(void *arg) 544 { 545 546 uma_zdestroy(linux_dma_obj_zone); 547 uma_zdestroy(linux_dma_trie_zone); 548 } 549 SYSUNINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_uninit, NULL); 550 551 static void * 552 linux_dma_trie_alloc(struct pctrie *ptree) 553 { 554 555 return (uma_zalloc(linux_dma_trie_zone, M_NOWAIT)); 556 } 557 558 static void 559 linux_dma_trie_free(struct pctrie *ptree, void *node) 560 { 561 562 uma_zfree(linux_dma_trie_zone, node); 563 } 564 565 566 PCTRIE_DEFINE(LINUX_DMA, linux_dma_obj, dma_addr, linux_dma_trie_alloc, 567 linux_dma_trie_free); 568 569 void * 570 linux_dma_alloc_coherent(struct device *dev, size_t size, 571 dma_addr_t *dma_handle, gfp_t flag) 572 { 573 struct linux_dma_priv *priv; 574 vm_paddr_t high; 575 size_t align; 576 void *mem; 577 578 if (dev == NULL || dev->dma_priv == NULL) { 579 *dma_handle = 0; 580 return (NULL); 581 } 582 priv = dev->dma_priv; 583 if (priv->dma_mask) 584 high = priv->dma_mask; 585 else if (flag & GFP_DMA32) 586 high = BUS_SPACE_MAXADDR_32BIT; 587 else 588 high = BUS_SPACE_MAXADDR; 589 align = PAGE_SIZE << get_order(size); 590 mem = (void *)kmem_alloc_contig(size, flag, 0, high, align, 0, 591 VM_MEMATTR_DEFAULT); 592 if (mem != NULL) { 593 *dma_handle = linux_dma_map_phys(dev, vtophys(mem), size); 594 if (*dma_handle == 0) { 595 kmem_free((vm_offset_t)mem, size); 596 mem = NULL; 597 } 598 } else { 599 *dma_handle = 0; 600 } 601 return (mem); 602 } 603 604 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 605 dma_addr_t 606 linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len) 607 { 608 struct linux_dma_priv *priv; 609 struct linux_dma_obj *obj; 610 int error, nseg; 611 bus_dma_segment_t seg; 612 613 priv = dev->dma_priv; 614 615 /* 616 * If the resultant mapping will be entirely 1:1 with the 617 * physical address, short-circuit the remainder of the 618 * bus_dma API. This avoids tracking collisions in the pctrie 619 * with the additional benefit of reducing overhead. 620 */ 621 if (bus_dma_id_mapped(priv->dmat, phys, len)) 622 return (phys); 623 624 obj = uma_zalloc(linux_dma_obj_zone, M_NOWAIT); 625 if (obj == NULL) { 626 return (0); 627 } 628 629 DMA_PRIV_LOCK(priv); 630 if (bus_dmamap_create(priv->dmat, 0, &obj->dmamap) != 0) { 631 DMA_PRIV_UNLOCK(priv); 632 uma_zfree(linux_dma_obj_zone, obj); 633 return (0); 634 } 635 636 nseg = -1; 637 if (_bus_dmamap_load_phys(priv->dmat, obj->dmamap, phys, len, 638 BUS_DMA_NOWAIT, &seg, &nseg) != 0) { 639 bus_dmamap_destroy(priv->dmat, obj->dmamap); 640 DMA_PRIV_UNLOCK(priv); 641 uma_zfree(linux_dma_obj_zone, obj); 642 return (0); 643 } 644 645 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); 646 obj->dma_addr = seg.ds_addr; 647 648 error = LINUX_DMA_PCTRIE_INSERT(&priv->ptree, obj); 649 if (error != 0) { 650 bus_dmamap_unload(priv->dmat, obj->dmamap); 651 bus_dmamap_destroy(priv->dmat, obj->dmamap); 652 DMA_PRIV_UNLOCK(priv); 653 uma_zfree(linux_dma_obj_zone, obj); 654 return (0); 655 } 656 DMA_PRIV_UNLOCK(priv); 657 return (obj->dma_addr); 658 } 659 #else 660 dma_addr_t 661 linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len) 662 { 663 return (phys); 664 } 665 #endif 666 667 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 668 void 669 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len) 670 { 671 struct linux_dma_priv *priv; 672 struct linux_dma_obj *obj; 673 674 priv = dev->dma_priv; 675 676 if (pctrie_is_empty(&priv->ptree)) 677 return; 678 679 DMA_PRIV_LOCK(priv); 680 obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr); 681 if (obj == NULL) { 682 DMA_PRIV_UNLOCK(priv); 683 return; 684 } 685 LINUX_DMA_PCTRIE_REMOVE(&priv->ptree, dma_addr); 686 bus_dmamap_unload(priv->dmat, obj->dmamap); 687 bus_dmamap_destroy(priv->dmat, obj->dmamap); 688 DMA_PRIV_UNLOCK(priv); 689 690 uma_zfree(linux_dma_obj_zone, obj); 691 } 692 #else 693 void 694 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len) 695 { 696 } 697 #endif 698 699 int 700 linux_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents, 701 enum dma_data_direction dir, struct dma_attrs *attrs) 702 { 703 struct linux_dma_priv *priv; 704 struct scatterlist *sg; 705 int i, nseg; 706 bus_dma_segment_t seg; 707 708 priv = dev->dma_priv; 709 710 DMA_PRIV_LOCK(priv); 711 712 /* create common DMA map in the first S/G entry */ 713 if (bus_dmamap_create(priv->dmat, 0, &sgl->dma_map) != 0) { 714 DMA_PRIV_UNLOCK(priv); 715 return (0); 716 } 717 718 /* load all S/G list entries */ 719 for_each_sg(sgl, sg, nents, i) { 720 nseg = -1; 721 if (_bus_dmamap_load_phys(priv->dmat, sgl->dma_map, 722 sg_phys(sg), sg->length, BUS_DMA_NOWAIT, 723 &seg, &nseg) != 0) { 724 bus_dmamap_unload(priv->dmat, sgl->dma_map); 725 bus_dmamap_destroy(priv->dmat, sgl->dma_map); 726 DMA_PRIV_UNLOCK(priv); 727 return (0); 728 } 729 KASSERT(nseg == 0, 730 ("More than one segment (nseg=%d)", nseg + 1)); 731 732 sg_dma_address(sg) = seg.ds_addr; 733 } 734 DMA_PRIV_UNLOCK(priv); 735 736 return (nents); 737 } 738 739 void 740 linux_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, 741 int nents, enum dma_data_direction dir, struct dma_attrs *attrs) 742 { 743 struct linux_dma_priv *priv; 744 745 priv = dev->dma_priv; 746 747 DMA_PRIV_LOCK(priv); 748 bus_dmamap_unload(priv->dmat, sgl->dma_map); 749 bus_dmamap_destroy(priv->dmat, sgl->dma_map); 750 DMA_PRIV_UNLOCK(priv); 751 } 752 753 struct dma_pool { 754 struct device *pool_device; 755 uma_zone_t pool_zone; 756 struct mtx pool_lock; 757 bus_dma_tag_t pool_dmat; 758 size_t pool_entry_size; 759 struct pctrie pool_ptree; 760 }; 761 762 #define DMA_POOL_LOCK(pool) mtx_lock(&(pool)->pool_lock) 763 #define DMA_POOL_UNLOCK(pool) mtx_unlock(&(pool)->pool_lock) 764 765 static inline int 766 dma_pool_obj_ctor(void *mem, int size, void *arg, int flags) 767 { 768 struct linux_dma_obj *obj = mem; 769 struct dma_pool *pool = arg; 770 int error, nseg; 771 bus_dma_segment_t seg; 772 773 nseg = -1; 774 DMA_POOL_LOCK(pool); 775 error = _bus_dmamap_load_phys(pool->pool_dmat, obj->dmamap, 776 vtophys(obj->vaddr), pool->pool_entry_size, BUS_DMA_NOWAIT, 777 &seg, &nseg); 778 DMA_POOL_UNLOCK(pool); 779 if (error != 0) { 780 return (error); 781 } 782 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); 783 obj->dma_addr = seg.ds_addr; 784 785 return (0); 786 } 787 788 static void 789 dma_pool_obj_dtor(void *mem, int size, void *arg) 790 { 791 struct linux_dma_obj *obj = mem; 792 struct dma_pool *pool = arg; 793 794 DMA_POOL_LOCK(pool); 795 bus_dmamap_unload(pool->pool_dmat, obj->dmamap); 796 DMA_POOL_UNLOCK(pool); 797 } 798 799 static int 800 dma_pool_obj_import(void *arg, void **store, int count, int domain __unused, 801 int flags) 802 { 803 struct dma_pool *pool = arg; 804 struct linux_dma_priv *priv; 805 struct linux_dma_obj *obj; 806 int error, i; 807 808 priv = pool->pool_device->dma_priv; 809 for (i = 0; i < count; i++) { 810 obj = uma_zalloc(linux_dma_obj_zone, flags); 811 if (obj == NULL) 812 break; 813 814 error = bus_dmamem_alloc(pool->pool_dmat, &obj->vaddr, 815 BUS_DMA_NOWAIT, &obj->dmamap); 816 if (error!= 0) { 817 uma_zfree(linux_dma_obj_zone, obj); 818 break; 819 } 820 821 store[i] = obj; 822 } 823 824 return (i); 825 } 826 827 static void 828 dma_pool_obj_release(void *arg, void **store, int count) 829 { 830 struct dma_pool *pool = arg; 831 struct linux_dma_priv *priv; 832 struct linux_dma_obj *obj; 833 int i; 834 835 priv = pool->pool_device->dma_priv; 836 for (i = 0; i < count; i++) { 837 obj = store[i]; 838 bus_dmamem_free(pool->pool_dmat, obj->vaddr, obj->dmamap); 839 uma_zfree(linux_dma_obj_zone, obj); 840 } 841 } 842 843 struct dma_pool * 844 linux_dma_pool_create(char *name, struct device *dev, size_t size, 845 size_t align, size_t boundary) 846 { 847 struct linux_dma_priv *priv; 848 struct dma_pool *pool; 849 850 priv = dev->dma_priv; 851 852 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 853 pool->pool_device = dev; 854 pool->pool_entry_size = size; 855 856 if (bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 857 align, boundary, /* alignment, boundary */ 858 priv->dma_mask, /* lowaddr */ 859 BUS_SPACE_MAXADDR, /* highaddr */ 860 NULL, NULL, /* filtfunc, filtfuncarg */ 861 size, /* maxsize */ 862 1, /* nsegments */ 863 size, /* maxsegsz */ 864 0, /* flags */ 865 NULL, NULL, /* lockfunc, lockfuncarg */ 866 &pool->pool_dmat)) { 867 kfree(pool); 868 return (NULL); 869 } 870 871 pool->pool_zone = uma_zcache_create(name, -1, dma_pool_obj_ctor, 872 dma_pool_obj_dtor, NULL, NULL, dma_pool_obj_import, 873 dma_pool_obj_release, pool, 0); 874 875 mtx_init(&pool->pool_lock, "lkpi-dma-pool", NULL, MTX_DEF); 876 pctrie_init(&pool->pool_ptree); 877 878 return (pool); 879 } 880 881 void 882 linux_dma_pool_destroy(struct dma_pool *pool) 883 { 884 885 uma_zdestroy(pool->pool_zone); 886 bus_dma_tag_destroy(pool->pool_dmat); 887 mtx_destroy(&pool->pool_lock); 888 kfree(pool); 889 } 890 891 void * 892 linux_dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, 893 dma_addr_t *handle) 894 { 895 struct linux_dma_obj *obj; 896 897 obj = uma_zalloc_arg(pool->pool_zone, pool, mem_flags); 898 if (obj == NULL) 899 return (NULL); 900 901 DMA_POOL_LOCK(pool); 902 if (LINUX_DMA_PCTRIE_INSERT(&pool->pool_ptree, obj) != 0) { 903 DMA_POOL_UNLOCK(pool); 904 uma_zfree_arg(pool->pool_zone, obj, pool); 905 return (NULL); 906 } 907 DMA_POOL_UNLOCK(pool); 908 909 *handle = obj->dma_addr; 910 return (obj->vaddr); 911 } 912 913 void 914 linux_dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma_addr) 915 { 916 struct linux_dma_obj *obj; 917 918 DMA_POOL_LOCK(pool); 919 obj = LINUX_DMA_PCTRIE_LOOKUP(&pool->pool_ptree, dma_addr); 920 if (obj == NULL) { 921 DMA_POOL_UNLOCK(pool); 922 return; 923 } 924 LINUX_DMA_PCTRIE_REMOVE(&pool->pool_ptree, dma_addr); 925 DMA_POOL_UNLOCK(pool); 926 927 uma_zfree_arg(pool->pool_zone, obj, pool); 928 } 929