1 /*- 2 * Copyright (c) 2015-2016 Mellanox Technologies, Ltd. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/malloc.h> 33 #include <sys/kernel.h> 34 #include <sys/sysctl.h> 35 #include <sys/lock.h> 36 #include <sys/mutex.h> 37 #include <sys/bus.h> 38 #include <sys/fcntl.h> 39 #include <sys/file.h> 40 #include <sys/filio.h> 41 #include <sys/pctrie.h> 42 #include <sys/rwlock.h> 43 44 #include <vm/vm.h> 45 #include <vm/pmap.h> 46 47 #include <machine/stdarg.h> 48 49 #include <linux/kobject.h> 50 #include <linux/device.h> 51 #include <linux/slab.h> 52 #include <linux/module.h> 53 #include <linux/cdev.h> 54 #include <linux/file.h> 55 #include <linux/sysfs.h> 56 #include <linux/mm.h> 57 #include <linux/io.h> 58 #include <linux/vmalloc.h> 59 #include <linux/pci.h> 60 #include <linux/compat.h> 61 62 static device_probe_t linux_pci_probe; 63 static device_attach_t linux_pci_attach; 64 static device_detach_t linux_pci_detach; 65 static device_suspend_t linux_pci_suspend; 66 static device_resume_t linux_pci_resume; 67 static device_shutdown_t linux_pci_shutdown; 68 69 static device_method_t pci_methods[] = { 70 DEVMETHOD(device_probe, linux_pci_probe), 71 DEVMETHOD(device_attach, linux_pci_attach), 72 DEVMETHOD(device_detach, linux_pci_detach), 73 DEVMETHOD(device_suspend, linux_pci_suspend), 74 DEVMETHOD(device_resume, linux_pci_resume), 75 DEVMETHOD(device_shutdown, linux_pci_shutdown), 76 DEVMETHOD_END 77 }; 78 79 struct linux_dma_priv { 80 uint64_t dma_mask; 81 struct mtx lock; 82 bus_dma_tag_t dmat; 83 struct pctrie ptree; 84 }; 85 #define DMA_PRIV_LOCK(priv) mtx_lock(&(priv)->lock) 86 #define DMA_PRIV_UNLOCK(priv) mtx_unlock(&(priv)->lock) 87 88 static int 89 linux_pdev_dma_init(struct pci_dev *pdev) 90 { 91 struct linux_dma_priv *priv; 92 int error; 93 94 priv = malloc(sizeof(*priv), M_DEVBUF, M_WAITOK | M_ZERO); 95 pdev->dev.dma_priv = priv; 96 97 mtx_init(&priv->lock, "lkpi-priv-dma", NULL, MTX_DEF); 98 99 pctrie_init(&priv->ptree); 100 101 /* create a default DMA tag */ 102 error = linux_dma_tag_init(&pdev->dev, DMA_BIT_MASK(64)); 103 if (error) { 104 mtx_destroy(&priv->lock); 105 free(priv, M_DEVBUF); 106 pdev->dev.dma_priv = NULL; 107 } 108 return (error); 109 } 110 111 static int 112 linux_pdev_dma_uninit(struct pci_dev *pdev) 113 { 114 struct linux_dma_priv *priv; 115 116 priv = pdev->dev.dma_priv; 117 if (priv->dmat) 118 bus_dma_tag_destroy(priv->dmat); 119 mtx_destroy(&priv->lock); 120 free(priv, M_DEVBUF); 121 pdev->dev.dma_priv = NULL; 122 return (0); 123 } 124 125 int 126 linux_dma_tag_init(struct device *dev, u64 dma_mask) 127 { 128 struct linux_dma_priv *priv; 129 int error; 130 131 priv = dev->dma_priv; 132 133 if (priv->dmat) { 134 if (priv->dma_mask == dma_mask) 135 return (0); 136 137 bus_dma_tag_destroy(priv->dmat); 138 } 139 140 priv->dma_mask = dma_mask; 141 142 error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 143 1, 0, /* alignment, boundary */ 144 dma_mask, /* lowaddr */ 145 BUS_SPACE_MAXADDR, /* highaddr */ 146 NULL, NULL, /* filtfunc, filtfuncarg */ 147 BUS_SPACE_MAXSIZE, /* maxsize */ 148 1, /* nsegments */ 149 BUS_SPACE_MAXSIZE, /* maxsegsz */ 150 0, /* flags */ 151 NULL, NULL, /* lockfunc, lockfuncarg */ 152 &priv->dmat); 153 return (-error); 154 } 155 156 static struct pci_driver * 157 linux_pci_find(device_t dev, const struct pci_device_id **idp) 158 { 159 const struct pci_device_id *id; 160 struct pci_driver *pdrv; 161 uint16_t vendor; 162 uint16_t device; 163 uint16_t subvendor; 164 uint16_t subdevice; 165 166 vendor = pci_get_vendor(dev); 167 device = pci_get_device(dev); 168 subvendor = pci_get_subvendor(dev); 169 subdevice = pci_get_subdevice(dev); 170 171 spin_lock(&pci_lock); 172 list_for_each_entry(pdrv, &pci_drivers, links) { 173 for (id = pdrv->id_table; id->vendor != 0; id++) { 174 if (vendor == id->vendor && 175 (PCI_ANY_ID == id->device || device == id->device) && 176 (PCI_ANY_ID == id->subvendor || subvendor == id->subvendor) && 177 (PCI_ANY_ID == id->subdevice || subdevice == id->subdevice)) { 178 *idp = id; 179 spin_unlock(&pci_lock); 180 return (pdrv); 181 } 182 } 183 } 184 spin_unlock(&pci_lock); 185 return (NULL); 186 } 187 188 static int 189 linux_pci_probe(device_t dev) 190 { 191 const struct pci_device_id *id; 192 struct pci_driver *pdrv; 193 194 if ((pdrv = linux_pci_find(dev, &id)) == NULL) 195 return (ENXIO); 196 if (device_get_driver(dev) != &pdrv->bsddriver) 197 return (ENXIO); 198 device_set_desc(dev, pdrv->name); 199 return (0); 200 } 201 202 static int 203 linux_pci_attach(device_t dev) 204 { 205 struct resource_list_entry *rle; 206 struct pci_bus *pbus; 207 struct pci_dev *pdev; 208 struct pci_devinfo *dinfo; 209 struct pci_driver *pdrv; 210 const struct pci_device_id *id; 211 device_t parent; 212 devclass_t devclass; 213 int error; 214 215 linux_set_current(curthread); 216 217 pdrv = linux_pci_find(dev, &id); 218 pdev = device_get_softc(dev); 219 220 parent = device_get_parent(dev); 221 devclass = device_get_devclass(parent); 222 if (pdrv->isdrm) { 223 dinfo = device_get_ivars(parent); 224 device_set_ivars(dev, dinfo); 225 } else { 226 dinfo = device_get_ivars(dev); 227 } 228 229 pdev->dev.parent = &linux_root_device; 230 pdev->dev.bsddev = dev; 231 INIT_LIST_HEAD(&pdev->dev.irqents); 232 pdev->devfn = PCI_DEVFN(pci_get_slot(dev), pci_get_function(dev)); 233 pdev->device = dinfo->cfg.device; 234 pdev->vendor = dinfo->cfg.vendor; 235 pdev->subsystem_vendor = dinfo->cfg.subvendor; 236 pdev->subsystem_device = dinfo->cfg.subdevice; 237 pdev->class = pci_get_class(dev); 238 pdev->revision = pci_get_revid(dev); 239 pdev->pdrv = pdrv; 240 kobject_init(&pdev->dev.kobj, &linux_dev_ktype); 241 kobject_set_name(&pdev->dev.kobj, device_get_nameunit(dev)); 242 kobject_add(&pdev->dev.kobj, &linux_root_device.kobj, 243 kobject_name(&pdev->dev.kobj)); 244 rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 0); 245 if (rle != NULL) 246 pdev->dev.irq = rle->start; 247 else 248 pdev->dev.irq = LINUX_IRQ_INVALID; 249 pdev->irq = pdev->dev.irq; 250 error = linux_pdev_dma_init(pdev); 251 if (error) 252 goto out_dma_init; 253 254 pbus = malloc(sizeof(*pbus), M_DEVBUF, M_WAITOK | M_ZERO); 255 pbus->self = pdev; 256 pbus->number = pci_get_bus(dev); 257 pdev->bus = pbus; 258 259 spin_lock(&pci_lock); 260 list_add(&pdev->links, &pci_devices); 261 spin_unlock(&pci_lock); 262 263 error = pdrv->probe(pdev, id); 264 if (error) 265 goto out_probe; 266 return (0); 267 268 out_probe: 269 free(pdev->bus, M_DEVBUF); 270 linux_pdev_dma_uninit(pdev); 271 out_dma_init: 272 spin_lock(&pci_lock); 273 list_del(&pdev->links); 274 spin_unlock(&pci_lock); 275 put_device(&pdev->dev); 276 return (-error); 277 } 278 279 static int 280 linux_pci_detach(device_t dev) 281 { 282 struct pci_dev *pdev; 283 284 linux_set_current(curthread); 285 pdev = device_get_softc(dev); 286 287 pdev->pdrv->remove(pdev); 288 289 free(pdev->bus, M_DEVBUF); 290 linux_pdev_dma_uninit(pdev); 291 292 spin_lock(&pci_lock); 293 list_del(&pdev->links); 294 spin_unlock(&pci_lock); 295 device_set_desc(dev, NULL); 296 put_device(&pdev->dev); 297 298 return (0); 299 } 300 301 static int 302 linux_pci_suspend(device_t dev) 303 { 304 const struct dev_pm_ops *pmops; 305 struct pm_message pm = { }; 306 struct pci_dev *pdev; 307 int error; 308 309 error = 0; 310 linux_set_current(curthread); 311 pdev = device_get_softc(dev); 312 pmops = pdev->pdrv->driver.pm; 313 314 if (pdev->pdrv->suspend != NULL) 315 error = -pdev->pdrv->suspend(pdev, pm); 316 else if (pmops != NULL && pmops->suspend != NULL) { 317 error = -pmops->suspend(&pdev->dev); 318 if (error == 0 && pmops->suspend_late != NULL) 319 error = -pmops->suspend_late(&pdev->dev); 320 } 321 return (error); 322 } 323 324 static int 325 linux_pci_resume(device_t dev) 326 { 327 const struct dev_pm_ops *pmops; 328 struct pci_dev *pdev; 329 int error; 330 331 error = 0; 332 linux_set_current(curthread); 333 pdev = device_get_softc(dev); 334 pmops = pdev->pdrv->driver.pm; 335 336 if (pdev->pdrv->resume != NULL) 337 error = -pdev->pdrv->resume(pdev); 338 else if (pmops != NULL && pmops->resume != NULL) { 339 if (pmops->resume_early != NULL) 340 error = -pmops->resume_early(&pdev->dev); 341 if (error == 0 && pmops->resume != NULL) 342 error = -pmops->resume(&pdev->dev); 343 } 344 return (error); 345 } 346 347 static int 348 linux_pci_shutdown(device_t dev) 349 { 350 struct pci_dev *pdev; 351 352 linux_set_current(curthread); 353 pdev = device_get_softc(dev); 354 if (pdev->pdrv->shutdown != NULL) 355 pdev->pdrv->shutdown(pdev); 356 return (0); 357 } 358 359 static int 360 _linux_pci_register_driver(struct pci_driver *pdrv, devclass_t dc) 361 { 362 int error; 363 364 linux_set_current(curthread); 365 spin_lock(&pci_lock); 366 list_add(&pdrv->links, &pci_drivers); 367 spin_unlock(&pci_lock); 368 pdrv->bsddriver.name = pdrv->name; 369 pdrv->bsddriver.methods = pci_methods; 370 pdrv->bsddriver.size = sizeof(struct pci_dev); 371 372 mtx_lock(&Giant); 373 error = devclass_add_driver(dc, &pdrv->bsddriver, 374 BUS_PASS_DEFAULT, &pdrv->bsdclass); 375 mtx_unlock(&Giant); 376 return (-error); 377 } 378 379 int 380 linux_pci_register_driver(struct pci_driver *pdrv) 381 { 382 devclass_t dc; 383 384 dc = devclass_find("pci"); 385 if (dc == NULL) 386 return (-ENXIO); 387 pdrv->isdrm = false; 388 return (_linux_pci_register_driver(pdrv, dc)); 389 } 390 391 int 392 linux_pci_register_drm_driver(struct pci_driver *pdrv) 393 { 394 devclass_t dc; 395 396 dc = devclass_create("vgapci"); 397 if (dc == NULL) 398 return (-ENXIO); 399 pdrv->isdrm = true; 400 pdrv->name = "drmn"; 401 return (_linux_pci_register_driver(pdrv, dc)); 402 } 403 404 void 405 linux_pci_unregister_driver(struct pci_driver *pdrv) 406 { 407 devclass_t bus; 408 409 bus = devclass_find("pci"); 410 411 spin_lock(&pci_lock); 412 list_del(&pdrv->links); 413 spin_unlock(&pci_lock); 414 mtx_lock(&Giant); 415 if (bus != NULL) 416 devclass_delete_driver(bus, &pdrv->bsddriver); 417 mtx_unlock(&Giant); 418 } 419 420 void 421 linux_pci_unregister_drm_driver(struct pci_driver *pdrv) 422 { 423 devclass_t bus; 424 425 bus = devclass_find("vgapci"); 426 427 spin_lock(&pci_lock); 428 list_del(&pdrv->links); 429 spin_unlock(&pci_lock); 430 mtx_lock(&Giant); 431 if (bus != NULL) 432 devclass_delete_driver(bus, &pdrv->bsddriver); 433 mtx_unlock(&Giant); 434 } 435 436 CTASSERT(sizeof(dma_addr_t) <= sizeof(uint64_t)); 437 438 struct linux_dma_obj { 439 void *vaddr; 440 uint64_t dma_addr; 441 bus_dmamap_t dmamap; 442 }; 443 444 static uma_zone_t linux_dma_trie_zone; 445 static uma_zone_t linux_dma_obj_zone; 446 447 static void 448 linux_dma_init(void *arg) 449 { 450 451 linux_dma_trie_zone = uma_zcreate("linux_dma_pctrie", 452 pctrie_node_size(), NULL, NULL, pctrie_zone_init, NULL, 453 UMA_ALIGN_PTR, 0); 454 linux_dma_obj_zone = uma_zcreate("linux_dma_object", 455 sizeof(struct linux_dma_obj), NULL, NULL, NULL, NULL, 456 UMA_ALIGN_PTR, 0); 457 458 } 459 SYSINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_init, NULL); 460 461 static void 462 linux_dma_uninit(void *arg) 463 { 464 465 uma_zdestroy(linux_dma_obj_zone); 466 uma_zdestroy(linux_dma_trie_zone); 467 } 468 SYSUNINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_uninit, NULL); 469 470 static void * 471 linux_dma_trie_alloc(struct pctrie *ptree) 472 { 473 474 return (uma_zalloc(linux_dma_trie_zone, 0)); 475 } 476 477 static void 478 linux_dma_trie_free(struct pctrie *ptree, void *node) 479 { 480 481 uma_zfree(linux_dma_trie_zone, node); 482 } 483 484 485 PCTRIE_DEFINE(LINUX_DMA, linux_dma_obj, dma_addr, linux_dma_trie_alloc, 486 linux_dma_trie_free); 487 488 void * 489 linux_dma_alloc_coherent(struct device *dev, size_t size, 490 dma_addr_t *dma_handle, gfp_t flag) 491 { 492 struct linux_dma_priv *priv; 493 vm_paddr_t high; 494 size_t align; 495 void *mem; 496 497 if (dev == NULL || dev->dma_priv == NULL) { 498 *dma_handle = 0; 499 return (NULL); 500 } 501 priv = dev->dma_priv; 502 if (priv->dma_mask) 503 high = priv->dma_mask; 504 else if (flag & GFP_DMA32) 505 high = BUS_SPACE_MAXADDR_32BIT; 506 else 507 high = BUS_SPACE_MAXADDR; 508 align = PAGE_SIZE << get_order(size); 509 mem = (void *)kmem_alloc_contig(size, flag, 0, high, align, 0, 510 VM_MEMATTR_DEFAULT); 511 if (mem != NULL) { 512 *dma_handle = linux_dma_map_phys(dev, vtophys(mem), size); 513 if (*dma_handle == 0) { 514 kmem_free((vm_offset_t)mem, size); 515 mem = NULL; 516 } 517 } else { 518 *dma_handle = 0; 519 } 520 return (mem); 521 } 522 523 dma_addr_t 524 linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len) 525 { 526 struct linux_dma_priv *priv; 527 struct linux_dma_obj *obj; 528 int error, nseg; 529 bus_dma_segment_t seg; 530 531 priv = dev->dma_priv; 532 533 obj = uma_zalloc(linux_dma_obj_zone, 0); 534 535 DMA_PRIV_LOCK(priv); 536 if (bus_dmamap_create(priv->dmat, 0, &obj->dmamap) != 0) { 537 DMA_PRIV_UNLOCK(priv); 538 uma_zfree(linux_dma_obj_zone, obj); 539 return (0); 540 } 541 542 nseg = -1; 543 if (_bus_dmamap_load_phys(priv->dmat, obj->dmamap, phys, len, 544 BUS_DMA_NOWAIT, &seg, &nseg) != 0) { 545 bus_dmamap_destroy(priv->dmat, obj->dmamap); 546 DMA_PRIV_UNLOCK(priv); 547 uma_zfree(linux_dma_obj_zone, obj); 548 return (0); 549 } 550 551 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); 552 obj->dma_addr = seg.ds_addr; 553 554 error = LINUX_DMA_PCTRIE_INSERT(&priv->ptree, obj); 555 if (error != 0) { 556 bus_dmamap_unload(priv->dmat, obj->dmamap); 557 bus_dmamap_destroy(priv->dmat, obj->dmamap); 558 DMA_PRIV_UNLOCK(priv); 559 uma_zfree(linux_dma_obj_zone, obj); 560 return (0); 561 } 562 DMA_PRIV_UNLOCK(priv); 563 return (obj->dma_addr); 564 } 565 566 void 567 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len) 568 { 569 struct linux_dma_priv *priv; 570 struct linux_dma_obj *obj; 571 572 priv = dev->dma_priv; 573 574 DMA_PRIV_LOCK(priv); 575 obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr); 576 if (obj == NULL) { 577 DMA_PRIV_UNLOCK(priv); 578 return; 579 } 580 LINUX_DMA_PCTRIE_REMOVE(&priv->ptree, dma_addr); 581 bus_dmamap_unload(priv->dmat, obj->dmamap); 582 bus_dmamap_destroy(priv->dmat, obj->dmamap); 583 DMA_PRIV_UNLOCK(priv); 584 585 uma_zfree(linux_dma_obj_zone, obj); 586 } 587 588 int 589 linux_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents, 590 enum dma_data_direction dir, struct dma_attrs *attrs) 591 { 592 struct linux_dma_priv *priv; 593 struct scatterlist *sg; 594 int i, nseg; 595 bus_dma_segment_t seg; 596 597 priv = dev->dma_priv; 598 599 DMA_PRIV_LOCK(priv); 600 601 /* create common DMA map in the first S/G entry */ 602 if (bus_dmamap_create(priv->dmat, 0, &sgl->dma_map) != 0) { 603 DMA_PRIV_UNLOCK(priv); 604 return (0); 605 } 606 607 /* load all S/G list entries */ 608 for_each_sg(sgl, sg, nents, i) { 609 nseg = -1; 610 if (_bus_dmamap_load_phys(priv->dmat, sgl->dma_map, 611 sg_phys(sg), sg->length, BUS_DMA_NOWAIT, 612 &seg, &nseg) != 0) { 613 bus_dmamap_unload(priv->dmat, sgl->dma_map); 614 bus_dmamap_destroy(priv->dmat, sgl->dma_map); 615 DMA_PRIV_UNLOCK(priv); 616 return (0); 617 } 618 KASSERT(nseg == 0, 619 ("More than one segment (nseg=%d)", nseg + 1)); 620 621 sg_dma_address(sg) = seg.ds_addr; 622 } 623 DMA_PRIV_UNLOCK(priv); 624 625 return (nents); 626 } 627 628 void 629 linux_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, 630 int nents, enum dma_data_direction dir, struct dma_attrs *attrs) 631 { 632 struct linux_dma_priv *priv; 633 634 priv = dev->dma_priv; 635 636 DMA_PRIV_LOCK(priv); 637 bus_dmamap_unload(priv->dmat, sgl->dma_map); 638 bus_dmamap_destroy(priv->dmat, sgl->dma_map); 639 DMA_PRIV_UNLOCK(priv); 640 } 641 642 struct dma_pool { 643 struct device *pool_device; 644 uma_zone_t pool_zone; 645 struct mtx pool_lock; 646 bus_dma_tag_t pool_dmat; 647 size_t pool_entry_size; 648 struct pctrie pool_ptree; 649 }; 650 651 #define DMA_POOL_LOCK(pool) mtx_lock(&(pool)->pool_lock) 652 #define DMA_POOL_UNLOCK(pool) mtx_unlock(&(pool)->pool_lock) 653 654 static inline int 655 dma_pool_obj_ctor(void *mem, int size, void *arg, int flags) 656 { 657 struct linux_dma_obj *obj = mem; 658 struct dma_pool *pool = arg; 659 int error, nseg; 660 bus_dma_segment_t seg; 661 662 nseg = -1; 663 DMA_POOL_LOCK(pool); 664 error = _bus_dmamap_load_phys(pool->pool_dmat, obj->dmamap, 665 vtophys(obj->vaddr), pool->pool_entry_size, BUS_DMA_NOWAIT, 666 &seg, &nseg); 667 DMA_POOL_UNLOCK(pool); 668 if (error != 0) { 669 return (error); 670 } 671 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); 672 obj->dma_addr = seg.ds_addr; 673 674 return (0); 675 } 676 677 static void 678 dma_pool_obj_dtor(void *mem, int size, void *arg) 679 { 680 struct linux_dma_obj *obj = mem; 681 struct dma_pool *pool = arg; 682 683 DMA_POOL_LOCK(pool); 684 bus_dmamap_unload(pool->pool_dmat, obj->dmamap); 685 DMA_POOL_UNLOCK(pool); 686 } 687 688 static int 689 dma_pool_obj_import(void *arg, void **store, int count, int domain __unused, 690 int flags) 691 { 692 struct dma_pool *pool = arg; 693 struct linux_dma_priv *priv; 694 struct linux_dma_obj *obj; 695 int error, i; 696 697 priv = pool->pool_device->dma_priv; 698 for (i = 0; i < count; i++) { 699 obj = uma_zalloc(linux_dma_obj_zone, flags); 700 if (obj == NULL) 701 break; 702 703 error = bus_dmamem_alloc(pool->pool_dmat, &obj->vaddr, 704 BUS_DMA_NOWAIT, &obj->dmamap); 705 if (error!= 0) { 706 uma_zfree(linux_dma_obj_zone, obj); 707 break; 708 } 709 710 store[i] = obj; 711 } 712 713 return (i); 714 } 715 716 static void 717 dma_pool_obj_release(void *arg, void **store, int count) 718 { 719 struct dma_pool *pool = arg; 720 struct linux_dma_priv *priv; 721 struct linux_dma_obj *obj; 722 int i; 723 724 priv = pool->pool_device->dma_priv; 725 for (i = 0; i < count; i++) { 726 obj = store[i]; 727 bus_dmamem_free(pool->pool_dmat, obj->vaddr, obj->dmamap); 728 uma_zfree(linux_dma_obj_zone, obj); 729 } 730 } 731 732 struct dma_pool * 733 linux_dma_pool_create(char *name, struct device *dev, size_t size, 734 size_t align, size_t boundary) 735 { 736 struct linux_dma_priv *priv; 737 struct dma_pool *pool; 738 739 priv = dev->dma_priv; 740 741 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 742 pool->pool_device = dev; 743 pool->pool_entry_size = size; 744 745 if (bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 746 align, boundary, /* alignment, boundary */ 747 priv->dma_mask, /* lowaddr */ 748 BUS_SPACE_MAXADDR, /* highaddr */ 749 NULL, NULL, /* filtfunc, filtfuncarg */ 750 size, /* maxsize */ 751 1, /* nsegments */ 752 size, /* maxsegsz */ 753 0, /* flags */ 754 NULL, NULL, /* lockfunc, lockfuncarg */ 755 &pool->pool_dmat)) { 756 kfree(pool); 757 return (NULL); 758 } 759 760 pool->pool_zone = uma_zcache_create(name, -1, dma_pool_obj_ctor, 761 dma_pool_obj_dtor, NULL, NULL, dma_pool_obj_import, 762 dma_pool_obj_release, pool, 0); 763 764 mtx_init(&pool->pool_lock, "lkpi-dma-pool", NULL, MTX_DEF); 765 pctrie_init(&pool->pool_ptree); 766 767 return (pool); 768 } 769 770 void 771 linux_dma_pool_destroy(struct dma_pool *pool) 772 { 773 774 uma_zdestroy(pool->pool_zone); 775 bus_dma_tag_destroy(pool->pool_dmat); 776 mtx_destroy(&pool->pool_lock); 777 kfree(pool); 778 } 779 780 void * 781 linux_dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, 782 dma_addr_t *handle) 783 { 784 struct linux_dma_obj *obj; 785 786 obj = uma_zalloc_arg(pool->pool_zone, pool, mem_flags); 787 if (obj == NULL) 788 return (NULL); 789 790 DMA_POOL_LOCK(pool); 791 if (LINUX_DMA_PCTRIE_INSERT(&pool->pool_ptree, obj) != 0) { 792 DMA_POOL_UNLOCK(pool); 793 uma_zfree_arg(pool->pool_zone, obj, pool); 794 return (NULL); 795 } 796 DMA_POOL_UNLOCK(pool); 797 798 *handle = obj->dma_addr; 799 return (obj->vaddr); 800 } 801 802 void 803 linux_dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma_addr) 804 { 805 struct linux_dma_obj *obj; 806 807 DMA_POOL_LOCK(pool); 808 obj = LINUX_DMA_PCTRIE_LOOKUP(&pool->pool_ptree, dma_addr); 809 if (obj == NULL) { 810 DMA_POOL_UNLOCK(pool); 811 return; 812 } 813 LINUX_DMA_PCTRIE_REMOVE(&pool->pool_ptree, dma_addr); 814 DMA_POOL_UNLOCK(pool); 815 816 uma_zfree_arg(pool->pool_zone, obj, pool); 817 } 818