1 /*- 2 * Copyright (c) 2015-2016 Mellanox Technologies, Ltd. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/bus.h> 33 #include <sys/malloc.h> 34 #include <sys/kernel.h> 35 #include <sys/sysctl.h> 36 #include <sys/lock.h> 37 #include <sys/mutex.h> 38 #include <sys/fcntl.h> 39 #include <sys/file.h> 40 #include <sys/filio.h> 41 #include <sys/pctrie.h> 42 #include <sys/rwlock.h> 43 44 #include <vm/vm.h> 45 #include <vm/pmap.h> 46 47 #include <machine/stdarg.h> 48 49 #include <linux/kobject.h> 50 #include <linux/device.h> 51 #include <linux/slab.h> 52 #include <linux/module.h> 53 #include <linux/cdev.h> 54 #include <linux/file.h> 55 #include <linux/sysfs.h> 56 #include <linux/mm.h> 57 #include <linux/io.h> 58 #include <linux/vmalloc.h> 59 #include <linux/pci.h> 60 #include <linux/compat.h> 61 62 static device_probe_t linux_pci_probe; 63 static device_attach_t linux_pci_attach; 64 static device_detach_t linux_pci_detach; 65 static device_suspend_t linux_pci_suspend; 66 static device_resume_t linux_pci_resume; 67 static device_shutdown_t linux_pci_shutdown; 68 69 static device_method_t pci_methods[] = { 70 DEVMETHOD(device_probe, linux_pci_probe), 71 DEVMETHOD(device_attach, linux_pci_attach), 72 DEVMETHOD(device_detach, linux_pci_detach), 73 DEVMETHOD(device_suspend, linux_pci_suspend), 74 DEVMETHOD(device_resume, linux_pci_resume), 75 DEVMETHOD(device_shutdown, linux_pci_shutdown), 76 DEVMETHOD_END 77 }; 78 79 struct linux_dma_priv { 80 uint64_t dma_mask; 81 struct mtx lock; 82 bus_dma_tag_t dmat; 83 struct pctrie ptree; 84 }; 85 #define DMA_PRIV_LOCK(priv) mtx_lock(&(priv)->lock) 86 #define DMA_PRIV_UNLOCK(priv) mtx_unlock(&(priv)->lock) 87 88 static int 89 linux_pdev_dma_init(struct pci_dev *pdev) 90 { 91 struct linux_dma_priv *priv; 92 int error; 93 94 priv = malloc(sizeof(*priv), M_DEVBUF, M_WAITOK | M_ZERO); 95 pdev->dev.dma_priv = priv; 96 97 mtx_init(&priv->lock, "lkpi-priv-dma", NULL, MTX_DEF); 98 99 pctrie_init(&priv->ptree); 100 101 /* create a default DMA tag */ 102 error = linux_dma_tag_init(&pdev->dev, DMA_BIT_MASK(64)); 103 if (error) { 104 mtx_destroy(&priv->lock); 105 free(priv, M_DEVBUF); 106 pdev->dev.dma_priv = NULL; 107 } 108 return (error); 109 } 110 111 static int 112 linux_pdev_dma_uninit(struct pci_dev *pdev) 113 { 114 struct linux_dma_priv *priv; 115 116 priv = pdev->dev.dma_priv; 117 if (priv->dmat) 118 bus_dma_tag_destroy(priv->dmat); 119 mtx_destroy(&priv->lock); 120 free(priv, M_DEVBUF); 121 pdev->dev.dma_priv = NULL; 122 return (0); 123 } 124 125 int 126 linux_dma_tag_init(struct device *dev, u64 dma_mask) 127 { 128 struct linux_dma_priv *priv; 129 int error; 130 131 priv = dev->dma_priv; 132 133 if (priv->dmat) { 134 if (priv->dma_mask == dma_mask) 135 return (0); 136 137 bus_dma_tag_destroy(priv->dmat); 138 } 139 140 priv->dma_mask = dma_mask; 141 142 error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 143 1, 0, /* alignment, boundary */ 144 dma_mask, /* lowaddr */ 145 BUS_SPACE_MAXADDR, /* highaddr */ 146 NULL, NULL, /* filtfunc, filtfuncarg */ 147 BUS_SPACE_MAXSIZE, /* maxsize */ 148 1, /* nsegments */ 149 BUS_SPACE_MAXSIZE, /* maxsegsz */ 150 0, /* flags */ 151 NULL, NULL, /* lockfunc, lockfuncarg */ 152 &priv->dmat); 153 return (-error); 154 } 155 156 static struct pci_driver * 157 linux_pci_find(device_t dev, const struct pci_device_id **idp) 158 { 159 const struct pci_device_id *id; 160 struct pci_driver *pdrv; 161 uint16_t vendor; 162 uint16_t device; 163 uint16_t subvendor; 164 uint16_t subdevice; 165 166 vendor = pci_get_vendor(dev); 167 device = pci_get_device(dev); 168 subvendor = pci_get_subvendor(dev); 169 subdevice = pci_get_subdevice(dev); 170 171 spin_lock(&pci_lock); 172 list_for_each_entry(pdrv, &pci_drivers, links) { 173 for (id = pdrv->id_table; id->vendor != 0; id++) { 174 if (vendor == id->vendor && 175 (PCI_ANY_ID == id->device || device == id->device) && 176 (PCI_ANY_ID == id->subvendor || subvendor == id->subvendor) && 177 (PCI_ANY_ID == id->subdevice || subdevice == id->subdevice)) { 178 *idp = id; 179 spin_unlock(&pci_lock); 180 return (pdrv); 181 } 182 } 183 } 184 spin_unlock(&pci_lock); 185 return (NULL); 186 } 187 188 static int 189 linux_pci_probe(device_t dev) 190 { 191 const struct pci_device_id *id; 192 struct pci_driver *pdrv; 193 194 if ((pdrv = linux_pci_find(dev, &id)) == NULL) 195 return (ENXIO); 196 if (device_get_driver(dev) != &pdrv->bsddriver) 197 return (ENXIO); 198 device_set_desc(dev, pdrv->name); 199 return (0); 200 } 201 202 static int 203 linux_pci_attach(device_t dev) 204 { 205 struct resource_list_entry *rle; 206 struct pci_bus *pbus; 207 struct pci_dev *pdev; 208 struct pci_devinfo *dinfo; 209 struct pci_driver *pdrv; 210 const struct pci_device_id *id; 211 device_t parent; 212 int error; 213 214 linux_set_current(curthread); 215 216 pdrv = linux_pci_find(dev, &id); 217 pdev = device_get_softc(dev); 218 219 parent = device_get_parent(dev); 220 if (pdrv->isdrm) { 221 dinfo = device_get_ivars(parent); 222 device_set_ivars(dev, dinfo); 223 } else { 224 dinfo = device_get_ivars(dev); 225 } 226 227 pdev->dev.parent = &linux_root_device; 228 pdev->dev.bsddev = dev; 229 INIT_LIST_HEAD(&pdev->dev.irqents); 230 pdev->devfn = PCI_DEVFN(pci_get_slot(dev), pci_get_function(dev)); 231 pdev->device = dinfo->cfg.device; 232 pdev->vendor = dinfo->cfg.vendor; 233 pdev->subsystem_vendor = dinfo->cfg.subvendor; 234 pdev->subsystem_device = dinfo->cfg.subdevice; 235 pdev->class = pci_get_class(dev); 236 pdev->revision = pci_get_revid(dev); 237 pdev->pdrv = pdrv; 238 kobject_init(&pdev->dev.kobj, &linux_dev_ktype); 239 kobject_set_name(&pdev->dev.kobj, device_get_nameunit(dev)); 240 kobject_add(&pdev->dev.kobj, &linux_root_device.kobj, 241 kobject_name(&pdev->dev.kobj)); 242 rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 0); 243 if (rle != NULL) 244 pdev->dev.irq = rle->start; 245 else 246 pdev->dev.irq = LINUX_IRQ_INVALID; 247 pdev->irq = pdev->dev.irq; 248 error = linux_pdev_dma_init(pdev); 249 if (error) 250 goto out_dma_init; 251 252 pbus = malloc(sizeof(*pbus), M_DEVBUF, M_WAITOK | M_ZERO); 253 pbus->self = pdev; 254 pbus->number = pci_get_bus(dev); 255 pbus->domain = pci_get_domain(dev); 256 pdev->bus = pbus; 257 258 spin_lock(&pci_lock); 259 list_add(&pdev->links, &pci_devices); 260 spin_unlock(&pci_lock); 261 262 error = pdrv->probe(pdev, id); 263 if (error) 264 goto out_probe; 265 return (0); 266 267 out_probe: 268 free(pdev->bus, M_DEVBUF); 269 linux_pdev_dma_uninit(pdev); 270 out_dma_init: 271 spin_lock(&pci_lock); 272 list_del(&pdev->links); 273 spin_unlock(&pci_lock); 274 put_device(&pdev->dev); 275 return (-error); 276 } 277 278 static int 279 linux_pci_detach(device_t dev) 280 { 281 struct pci_dev *pdev; 282 283 linux_set_current(curthread); 284 pdev = device_get_softc(dev); 285 286 pdev->pdrv->remove(pdev); 287 288 free(pdev->bus, M_DEVBUF); 289 linux_pdev_dma_uninit(pdev); 290 291 spin_lock(&pci_lock); 292 list_del(&pdev->links); 293 spin_unlock(&pci_lock); 294 device_set_desc(dev, NULL); 295 put_device(&pdev->dev); 296 297 return (0); 298 } 299 300 static int 301 linux_pci_suspend(device_t dev) 302 { 303 const struct dev_pm_ops *pmops; 304 struct pm_message pm = { }; 305 struct pci_dev *pdev; 306 int error; 307 308 error = 0; 309 linux_set_current(curthread); 310 pdev = device_get_softc(dev); 311 pmops = pdev->pdrv->driver.pm; 312 313 if (pdev->pdrv->suspend != NULL) 314 error = -pdev->pdrv->suspend(pdev, pm); 315 else if (pmops != NULL && pmops->suspend != NULL) { 316 error = -pmops->suspend(&pdev->dev); 317 if (error == 0 && pmops->suspend_late != NULL) 318 error = -pmops->suspend_late(&pdev->dev); 319 } 320 return (error); 321 } 322 323 static int 324 linux_pci_resume(device_t dev) 325 { 326 const struct dev_pm_ops *pmops; 327 struct pci_dev *pdev; 328 int error; 329 330 error = 0; 331 linux_set_current(curthread); 332 pdev = device_get_softc(dev); 333 pmops = pdev->pdrv->driver.pm; 334 335 if (pdev->pdrv->resume != NULL) 336 error = -pdev->pdrv->resume(pdev); 337 else if (pmops != NULL && pmops->resume != NULL) { 338 if (pmops->resume_early != NULL) 339 error = -pmops->resume_early(&pdev->dev); 340 if (error == 0 && pmops->resume != NULL) 341 error = -pmops->resume(&pdev->dev); 342 } 343 return (error); 344 } 345 346 static int 347 linux_pci_shutdown(device_t dev) 348 { 349 struct pci_dev *pdev; 350 351 linux_set_current(curthread); 352 pdev = device_get_softc(dev); 353 if (pdev->pdrv->shutdown != NULL) 354 pdev->pdrv->shutdown(pdev); 355 return (0); 356 } 357 358 static int 359 _linux_pci_register_driver(struct pci_driver *pdrv, devclass_t dc) 360 { 361 int error; 362 363 linux_set_current(curthread); 364 spin_lock(&pci_lock); 365 list_add(&pdrv->links, &pci_drivers); 366 spin_unlock(&pci_lock); 367 pdrv->bsddriver.name = pdrv->name; 368 pdrv->bsddriver.methods = pci_methods; 369 pdrv->bsddriver.size = sizeof(struct pci_dev); 370 371 mtx_lock(&Giant); 372 error = devclass_add_driver(dc, &pdrv->bsddriver, 373 BUS_PASS_DEFAULT, &pdrv->bsdclass); 374 mtx_unlock(&Giant); 375 return (-error); 376 } 377 378 int 379 linux_pci_register_driver(struct pci_driver *pdrv) 380 { 381 devclass_t dc; 382 383 dc = devclass_find("pci"); 384 if (dc == NULL) 385 return (-ENXIO); 386 pdrv->isdrm = false; 387 return (_linux_pci_register_driver(pdrv, dc)); 388 } 389 390 unsigned long 391 pci_resource_start(struct pci_dev *pdev, int bar) 392 { 393 struct resource_list_entry *rle; 394 rman_res_t newstart; 395 device_t dev; 396 397 if ((rle = linux_pci_get_bar(pdev, bar)) == NULL) 398 return (0); 399 dev = pci_find_dbsf(pdev->bus->domain, pdev->bus->number, 400 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); 401 MPASS(dev != NULL); 402 if (BUS_TRANSLATE_RESOURCE(dev, rle->type, rle->start, &newstart)) { 403 device_printf(pdev->dev.bsddev, "translate of %#jx failed\n", 404 (uintmax_t)rle->start); 405 return (0); 406 } 407 return (newstart); 408 } 409 410 unsigned long 411 pci_resource_len(struct pci_dev *pdev, int bar) 412 { 413 struct resource_list_entry *rle; 414 415 if ((rle = linux_pci_get_bar(pdev, bar)) == NULL) 416 return (0); 417 return (rle->count); 418 } 419 420 int 421 linux_pci_register_drm_driver(struct pci_driver *pdrv) 422 { 423 devclass_t dc; 424 425 dc = devclass_create("vgapci"); 426 if (dc == NULL) 427 return (-ENXIO); 428 pdrv->isdrm = true; 429 pdrv->name = "drmn"; 430 return (_linux_pci_register_driver(pdrv, dc)); 431 } 432 433 void 434 linux_pci_unregister_driver(struct pci_driver *pdrv) 435 { 436 devclass_t bus; 437 438 bus = devclass_find("pci"); 439 440 spin_lock(&pci_lock); 441 list_del(&pdrv->links); 442 spin_unlock(&pci_lock); 443 mtx_lock(&Giant); 444 if (bus != NULL) 445 devclass_delete_driver(bus, &pdrv->bsddriver); 446 mtx_unlock(&Giant); 447 } 448 449 void 450 linux_pci_unregister_drm_driver(struct pci_driver *pdrv) 451 { 452 devclass_t bus; 453 454 bus = devclass_find("vgapci"); 455 456 spin_lock(&pci_lock); 457 list_del(&pdrv->links); 458 spin_unlock(&pci_lock); 459 mtx_lock(&Giant); 460 if (bus != NULL) 461 devclass_delete_driver(bus, &pdrv->bsddriver); 462 mtx_unlock(&Giant); 463 } 464 465 CTASSERT(sizeof(dma_addr_t) <= sizeof(uint64_t)); 466 467 struct linux_dma_obj { 468 void *vaddr; 469 uint64_t dma_addr; 470 bus_dmamap_t dmamap; 471 }; 472 473 static uma_zone_t linux_dma_trie_zone; 474 static uma_zone_t linux_dma_obj_zone; 475 476 static void 477 linux_dma_init(void *arg) 478 { 479 480 linux_dma_trie_zone = uma_zcreate("linux_dma_pctrie", 481 pctrie_node_size(), NULL, NULL, pctrie_zone_init, NULL, 482 UMA_ALIGN_PTR, 0); 483 linux_dma_obj_zone = uma_zcreate("linux_dma_object", 484 sizeof(struct linux_dma_obj), NULL, NULL, NULL, NULL, 485 UMA_ALIGN_PTR, 0); 486 487 } 488 SYSINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_init, NULL); 489 490 static void 491 linux_dma_uninit(void *arg) 492 { 493 494 uma_zdestroy(linux_dma_obj_zone); 495 uma_zdestroy(linux_dma_trie_zone); 496 } 497 SYSUNINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_uninit, NULL); 498 499 static void * 500 linux_dma_trie_alloc(struct pctrie *ptree) 501 { 502 503 return (uma_zalloc(linux_dma_trie_zone, 0)); 504 } 505 506 static void 507 linux_dma_trie_free(struct pctrie *ptree, void *node) 508 { 509 510 uma_zfree(linux_dma_trie_zone, node); 511 } 512 513 514 PCTRIE_DEFINE(LINUX_DMA, linux_dma_obj, dma_addr, linux_dma_trie_alloc, 515 linux_dma_trie_free); 516 517 void * 518 linux_dma_alloc_coherent(struct device *dev, size_t size, 519 dma_addr_t *dma_handle, gfp_t flag) 520 { 521 struct linux_dma_priv *priv; 522 vm_paddr_t high; 523 size_t align; 524 void *mem; 525 526 if (dev == NULL || dev->dma_priv == NULL) { 527 *dma_handle = 0; 528 return (NULL); 529 } 530 priv = dev->dma_priv; 531 if (priv->dma_mask) 532 high = priv->dma_mask; 533 else if (flag & GFP_DMA32) 534 high = BUS_SPACE_MAXADDR_32BIT; 535 else 536 high = BUS_SPACE_MAXADDR; 537 align = PAGE_SIZE << get_order(size); 538 mem = (void *)kmem_alloc_contig(size, flag, 0, high, align, 0, 539 VM_MEMATTR_DEFAULT); 540 if (mem != NULL) { 541 *dma_handle = linux_dma_map_phys(dev, vtophys(mem), size); 542 if (*dma_handle == 0) { 543 kmem_free((vm_offset_t)mem, size); 544 mem = NULL; 545 } 546 } else { 547 *dma_handle = 0; 548 } 549 return (mem); 550 } 551 552 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 553 dma_addr_t 554 linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len) 555 { 556 struct linux_dma_priv *priv; 557 struct linux_dma_obj *obj; 558 int error, nseg; 559 bus_dma_segment_t seg; 560 561 priv = dev->dma_priv; 562 563 /* 564 * If the resultant mapping will be entirely 1:1 with the 565 * physical address, short-circuit the remainder of the 566 * bus_dma API. This avoids tracking collisions in the pctrie 567 * with the additional benefit of reducing overhead. 568 */ 569 if (bus_dma_id_mapped(priv->dmat, phys, len)) 570 return (phys); 571 572 obj = uma_zalloc(linux_dma_obj_zone, 0); 573 574 DMA_PRIV_LOCK(priv); 575 if (bus_dmamap_create(priv->dmat, 0, &obj->dmamap) != 0) { 576 DMA_PRIV_UNLOCK(priv); 577 uma_zfree(linux_dma_obj_zone, obj); 578 return (0); 579 } 580 581 nseg = -1; 582 if (_bus_dmamap_load_phys(priv->dmat, obj->dmamap, phys, len, 583 BUS_DMA_NOWAIT, &seg, &nseg) != 0) { 584 bus_dmamap_destroy(priv->dmat, obj->dmamap); 585 DMA_PRIV_UNLOCK(priv); 586 uma_zfree(linux_dma_obj_zone, obj); 587 return (0); 588 } 589 590 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); 591 obj->dma_addr = seg.ds_addr; 592 593 error = LINUX_DMA_PCTRIE_INSERT(&priv->ptree, obj); 594 if (error != 0) { 595 bus_dmamap_unload(priv->dmat, obj->dmamap); 596 bus_dmamap_destroy(priv->dmat, obj->dmamap); 597 DMA_PRIV_UNLOCK(priv); 598 uma_zfree(linux_dma_obj_zone, obj); 599 return (0); 600 } 601 DMA_PRIV_UNLOCK(priv); 602 return (obj->dma_addr); 603 } 604 #else 605 dma_addr_t 606 linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len) 607 { 608 return (phys); 609 } 610 #endif 611 612 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 613 void 614 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len) 615 { 616 struct linux_dma_priv *priv; 617 struct linux_dma_obj *obj; 618 619 priv = dev->dma_priv; 620 621 if (pctrie_is_empty(&priv->ptree)) 622 return; 623 624 DMA_PRIV_LOCK(priv); 625 obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr); 626 if (obj == NULL) { 627 DMA_PRIV_UNLOCK(priv); 628 return; 629 } 630 LINUX_DMA_PCTRIE_REMOVE(&priv->ptree, dma_addr); 631 bus_dmamap_unload(priv->dmat, obj->dmamap); 632 bus_dmamap_destroy(priv->dmat, obj->dmamap); 633 DMA_PRIV_UNLOCK(priv); 634 635 uma_zfree(linux_dma_obj_zone, obj); 636 } 637 #else 638 void 639 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len) 640 { 641 } 642 #endif 643 644 int 645 linux_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents, 646 enum dma_data_direction dir, struct dma_attrs *attrs) 647 { 648 struct linux_dma_priv *priv; 649 struct scatterlist *sg; 650 int i, nseg; 651 bus_dma_segment_t seg; 652 653 priv = dev->dma_priv; 654 655 DMA_PRIV_LOCK(priv); 656 657 /* create common DMA map in the first S/G entry */ 658 if (bus_dmamap_create(priv->dmat, 0, &sgl->dma_map) != 0) { 659 DMA_PRIV_UNLOCK(priv); 660 return (0); 661 } 662 663 /* load all S/G list entries */ 664 for_each_sg(sgl, sg, nents, i) { 665 nseg = -1; 666 if (_bus_dmamap_load_phys(priv->dmat, sgl->dma_map, 667 sg_phys(sg), sg->length, BUS_DMA_NOWAIT, 668 &seg, &nseg) != 0) { 669 bus_dmamap_unload(priv->dmat, sgl->dma_map); 670 bus_dmamap_destroy(priv->dmat, sgl->dma_map); 671 DMA_PRIV_UNLOCK(priv); 672 return (0); 673 } 674 KASSERT(nseg == 0, 675 ("More than one segment (nseg=%d)", nseg + 1)); 676 677 sg_dma_address(sg) = seg.ds_addr; 678 } 679 DMA_PRIV_UNLOCK(priv); 680 681 return (nents); 682 } 683 684 void 685 linux_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, 686 int nents, enum dma_data_direction dir, struct dma_attrs *attrs) 687 { 688 struct linux_dma_priv *priv; 689 690 priv = dev->dma_priv; 691 692 DMA_PRIV_LOCK(priv); 693 bus_dmamap_unload(priv->dmat, sgl->dma_map); 694 bus_dmamap_destroy(priv->dmat, sgl->dma_map); 695 DMA_PRIV_UNLOCK(priv); 696 } 697 698 struct dma_pool { 699 struct device *pool_device; 700 uma_zone_t pool_zone; 701 struct mtx pool_lock; 702 bus_dma_tag_t pool_dmat; 703 size_t pool_entry_size; 704 struct pctrie pool_ptree; 705 }; 706 707 #define DMA_POOL_LOCK(pool) mtx_lock(&(pool)->pool_lock) 708 #define DMA_POOL_UNLOCK(pool) mtx_unlock(&(pool)->pool_lock) 709 710 static inline int 711 dma_pool_obj_ctor(void *mem, int size, void *arg, int flags) 712 { 713 struct linux_dma_obj *obj = mem; 714 struct dma_pool *pool = arg; 715 int error, nseg; 716 bus_dma_segment_t seg; 717 718 nseg = -1; 719 DMA_POOL_LOCK(pool); 720 error = _bus_dmamap_load_phys(pool->pool_dmat, obj->dmamap, 721 vtophys(obj->vaddr), pool->pool_entry_size, BUS_DMA_NOWAIT, 722 &seg, &nseg); 723 DMA_POOL_UNLOCK(pool); 724 if (error != 0) { 725 return (error); 726 } 727 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); 728 obj->dma_addr = seg.ds_addr; 729 730 return (0); 731 } 732 733 static void 734 dma_pool_obj_dtor(void *mem, int size, void *arg) 735 { 736 struct linux_dma_obj *obj = mem; 737 struct dma_pool *pool = arg; 738 739 DMA_POOL_LOCK(pool); 740 bus_dmamap_unload(pool->pool_dmat, obj->dmamap); 741 DMA_POOL_UNLOCK(pool); 742 } 743 744 static int 745 dma_pool_obj_import(void *arg, void **store, int count, int domain __unused, 746 int flags) 747 { 748 struct dma_pool *pool = arg; 749 struct linux_dma_priv *priv; 750 struct linux_dma_obj *obj; 751 int error, i; 752 753 priv = pool->pool_device->dma_priv; 754 for (i = 0; i < count; i++) { 755 obj = uma_zalloc(linux_dma_obj_zone, flags); 756 if (obj == NULL) 757 break; 758 759 error = bus_dmamem_alloc(pool->pool_dmat, &obj->vaddr, 760 BUS_DMA_NOWAIT, &obj->dmamap); 761 if (error!= 0) { 762 uma_zfree(linux_dma_obj_zone, obj); 763 break; 764 } 765 766 store[i] = obj; 767 } 768 769 return (i); 770 } 771 772 static void 773 dma_pool_obj_release(void *arg, void **store, int count) 774 { 775 struct dma_pool *pool = arg; 776 struct linux_dma_priv *priv; 777 struct linux_dma_obj *obj; 778 int i; 779 780 priv = pool->pool_device->dma_priv; 781 for (i = 0; i < count; i++) { 782 obj = store[i]; 783 bus_dmamem_free(pool->pool_dmat, obj->vaddr, obj->dmamap); 784 uma_zfree(linux_dma_obj_zone, obj); 785 } 786 } 787 788 struct dma_pool * 789 linux_dma_pool_create(char *name, struct device *dev, size_t size, 790 size_t align, size_t boundary) 791 { 792 struct linux_dma_priv *priv; 793 struct dma_pool *pool; 794 795 priv = dev->dma_priv; 796 797 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 798 pool->pool_device = dev; 799 pool->pool_entry_size = size; 800 801 if (bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 802 align, boundary, /* alignment, boundary */ 803 priv->dma_mask, /* lowaddr */ 804 BUS_SPACE_MAXADDR, /* highaddr */ 805 NULL, NULL, /* filtfunc, filtfuncarg */ 806 size, /* maxsize */ 807 1, /* nsegments */ 808 size, /* maxsegsz */ 809 0, /* flags */ 810 NULL, NULL, /* lockfunc, lockfuncarg */ 811 &pool->pool_dmat)) { 812 kfree(pool); 813 return (NULL); 814 } 815 816 pool->pool_zone = uma_zcache_create(name, -1, dma_pool_obj_ctor, 817 dma_pool_obj_dtor, NULL, NULL, dma_pool_obj_import, 818 dma_pool_obj_release, pool, 0); 819 820 mtx_init(&pool->pool_lock, "lkpi-dma-pool", NULL, MTX_DEF); 821 pctrie_init(&pool->pool_ptree); 822 823 return (pool); 824 } 825 826 void 827 linux_dma_pool_destroy(struct dma_pool *pool) 828 { 829 830 uma_zdestroy(pool->pool_zone); 831 bus_dma_tag_destroy(pool->pool_dmat); 832 mtx_destroy(&pool->pool_lock); 833 kfree(pool); 834 } 835 836 void * 837 linux_dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, 838 dma_addr_t *handle) 839 { 840 struct linux_dma_obj *obj; 841 842 obj = uma_zalloc_arg(pool->pool_zone, pool, mem_flags); 843 if (obj == NULL) 844 return (NULL); 845 846 DMA_POOL_LOCK(pool); 847 if (LINUX_DMA_PCTRIE_INSERT(&pool->pool_ptree, obj) != 0) { 848 DMA_POOL_UNLOCK(pool); 849 uma_zfree_arg(pool->pool_zone, obj, pool); 850 return (NULL); 851 } 852 DMA_POOL_UNLOCK(pool); 853 854 *handle = obj->dma_addr; 855 return (obj->vaddr); 856 } 857 858 void 859 linux_dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma_addr) 860 { 861 struct linux_dma_obj *obj; 862 863 DMA_POOL_LOCK(pool); 864 obj = LINUX_DMA_PCTRIE_LOOKUP(&pool->pool_ptree, dma_addr); 865 if (obj == NULL) { 866 DMA_POOL_UNLOCK(pool); 867 return; 868 } 869 LINUX_DMA_PCTRIE_REMOVE(&pool->pool_ptree, dma_addr); 870 DMA_POOL_UNLOCK(pool); 871 872 uma_zfree_arg(pool->pool_zone, obj, pool); 873 } 874