1 /*- 2 * Copyright (c) 2015-2016 Mellanox Technologies, Ltd. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/malloc.h> 33 #include <sys/kernel.h> 34 #include <sys/sysctl.h> 35 #include <sys/lock.h> 36 #include <sys/mutex.h> 37 #include <sys/bus.h> 38 #include <sys/fcntl.h> 39 #include <sys/file.h> 40 #include <sys/filio.h> 41 #include <sys/pctrie.h> 42 #include <sys/rwlock.h> 43 44 #include <vm/vm.h> 45 #include <vm/pmap.h> 46 47 #include <machine/stdarg.h> 48 49 #include <linux/kobject.h> 50 #include <linux/device.h> 51 #include <linux/slab.h> 52 #include <linux/module.h> 53 #include <linux/cdev.h> 54 #include <linux/file.h> 55 #include <linux/sysfs.h> 56 #include <linux/mm.h> 57 #include <linux/io.h> 58 #include <linux/vmalloc.h> 59 #include <linux/pci.h> 60 #include <linux/compat.h> 61 62 static device_probe_t linux_pci_probe; 63 static device_attach_t linux_pci_attach; 64 static device_detach_t linux_pci_detach; 65 static device_suspend_t linux_pci_suspend; 66 static device_resume_t linux_pci_resume; 67 static device_shutdown_t linux_pci_shutdown; 68 69 static device_method_t pci_methods[] = { 70 DEVMETHOD(device_probe, linux_pci_probe), 71 DEVMETHOD(device_attach, linux_pci_attach), 72 DEVMETHOD(device_detach, linux_pci_detach), 73 DEVMETHOD(device_suspend, linux_pci_suspend), 74 DEVMETHOD(device_resume, linux_pci_resume), 75 DEVMETHOD(device_shutdown, linux_pci_shutdown), 76 DEVMETHOD_END 77 }; 78 79 struct linux_dma_priv { 80 uint64_t dma_mask; 81 struct mtx lock; 82 bus_dma_tag_t dmat; 83 struct pctrie ptree; 84 }; 85 #define DMA_PRIV_LOCK(priv) mtx_lock(&(priv)->lock) 86 #define DMA_PRIV_UNLOCK(priv) mtx_unlock(&(priv)->lock) 87 88 static int 89 linux_pdev_dma_init(struct pci_dev *pdev) 90 { 91 struct linux_dma_priv *priv; 92 int error; 93 94 priv = malloc(sizeof(*priv), M_DEVBUF, M_WAITOK | M_ZERO); 95 pdev->dev.dma_priv = priv; 96 97 mtx_init(&priv->lock, "lkpi-priv-dma", NULL, MTX_DEF); 98 99 pctrie_init(&priv->ptree); 100 101 /* create a default DMA tag */ 102 error = linux_dma_tag_init(&pdev->dev, DMA_BIT_MASK(64)); 103 if (error) { 104 mtx_destroy(&priv->lock); 105 free(priv, M_DEVBUF); 106 pdev->dev.dma_priv = NULL; 107 } 108 return (error); 109 } 110 111 static int 112 linux_pdev_dma_uninit(struct pci_dev *pdev) 113 { 114 struct linux_dma_priv *priv; 115 116 priv = pdev->dev.dma_priv; 117 if (priv->dmat) 118 bus_dma_tag_destroy(priv->dmat); 119 mtx_destroy(&priv->lock); 120 free(priv, M_DEVBUF); 121 pdev->dev.dma_priv = NULL; 122 return (0); 123 } 124 125 int 126 linux_dma_tag_init(struct device *dev, u64 dma_mask) 127 { 128 struct linux_dma_priv *priv; 129 int error; 130 131 priv = dev->dma_priv; 132 133 if (priv->dmat) { 134 if (priv->dma_mask == dma_mask) 135 return (0); 136 137 bus_dma_tag_destroy(priv->dmat); 138 } 139 140 priv->dma_mask = dma_mask; 141 142 error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 143 1, 0, /* alignment, boundary */ 144 dma_mask, /* lowaddr */ 145 BUS_SPACE_MAXADDR, /* highaddr */ 146 NULL, NULL, /* filtfunc, filtfuncarg */ 147 BUS_SPACE_MAXSIZE, /* maxsize */ 148 1, /* nsegments */ 149 BUS_SPACE_MAXSIZE, /* maxsegsz */ 150 0, /* flags */ 151 NULL, NULL, /* lockfunc, lockfuncarg */ 152 &priv->dmat); 153 return (-error); 154 } 155 156 static struct pci_driver * 157 linux_pci_find(device_t dev, const struct pci_device_id **idp) 158 { 159 const struct pci_device_id *id; 160 struct pci_driver *pdrv; 161 uint16_t vendor; 162 uint16_t device; 163 uint16_t subvendor; 164 uint16_t subdevice; 165 166 vendor = pci_get_vendor(dev); 167 device = pci_get_device(dev); 168 subvendor = pci_get_subvendor(dev); 169 subdevice = pci_get_subdevice(dev); 170 171 spin_lock(&pci_lock); 172 list_for_each_entry(pdrv, &pci_drivers, links) { 173 for (id = pdrv->id_table; id->vendor != 0; id++) { 174 if (vendor == id->vendor && 175 (PCI_ANY_ID == id->device || device == id->device) && 176 (PCI_ANY_ID == id->subvendor || subvendor == id->subvendor) && 177 (PCI_ANY_ID == id->subdevice || subdevice == id->subdevice)) { 178 *idp = id; 179 spin_unlock(&pci_lock); 180 return (pdrv); 181 } 182 } 183 } 184 spin_unlock(&pci_lock); 185 return (NULL); 186 } 187 188 static int 189 linux_pci_probe(device_t dev) 190 { 191 const struct pci_device_id *id; 192 struct pci_driver *pdrv; 193 194 if ((pdrv = linux_pci_find(dev, &id)) == NULL) 195 return (ENXIO); 196 if (device_get_driver(dev) != &pdrv->bsddriver) 197 return (ENXIO); 198 device_set_desc(dev, pdrv->name); 199 return (0); 200 } 201 202 static int 203 linux_pci_attach(device_t dev) 204 { 205 struct resource_list_entry *rle; 206 struct pci_bus *pbus; 207 struct pci_dev *pdev; 208 struct pci_devinfo *dinfo; 209 struct pci_driver *pdrv; 210 const struct pci_device_id *id; 211 device_t parent; 212 devclass_t devclass; 213 int error; 214 215 linux_set_current(curthread); 216 217 pdrv = linux_pci_find(dev, &id); 218 pdev = device_get_softc(dev); 219 220 parent = device_get_parent(dev); 221 devclass = device_get_devclass(parent); 222 if (pdrv->isdrm) { 223 dinfo = device_get_ivars(parent); 224 device_set_ivars(dev, dinfo); 225 } else { 226 dinfo = device_get_ivars(dev); 227 } 228 229 pdev->dev.parent = &linux_root_device; 230 pdev->dev.bsddev = dev; 231 INIT_LIST_HEAD(&pdev->dev.irqents); 232 pdev->devfn = PCI_DEVFN(pci_get_slot(dev), pci_get_function(dev)); 233 pdev->device = dinfo->cfg.device; 234 pdev->vendor = dinfo->cfg.vendor; 235 pdev->subsystem_vendor = dinfo->cfg.subvendor; 236 pdev->subsystem_device = dinfo->cfg.subdevice; 237 pdev->class = pci_get_class(dev); 238 pdev->revision = pci_get_revid(dev); 239 pdev->pdrv = pdrv; 240 kobject_init(&pdev->dev.kobj, &linux_dev_ktype); 241 kobject_set_name(&pdev->dev.kobj, device_get_nameunit(dev)); 242 kobject_add(&pdev->dev.kobj, &linux_root_device.kobj, 243 kobject_name(&pdev->dev.kobj)); 244 rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 0); 245 if (rle != NULL) 246 pdev->dev.irq = rle->start; 247 else 248 pdev->dev.irq = LINUX_IRQ_INVALID; 249 pdev->irq = pdev->dev.irq; 250 error = linux_pdev_dma_init(pdev); 251 if (error) 252 goto out_dma_init; 253 254 pbus = malloc(sizeof(*pbus), M_DEVBUF, M_WAITOK | M_ZERO); 255 pbus->self = pdev; 256 pbus->number = pci_get_bus(dev); 257 pdev->bus = pbus; 258 259 spin_lock(&pci_lock); 260 list_add(&pdev->links, &pci_devices); 261 spin_unlock(&pci_lock); 262 263 error = pdrv->probe(pdev, id); 264 if (error) 265 goto out_probe; 266 return (0); 267 268 out_probe: 269 free(pdev->bus, M_DEVBUF); 270 linux_pdev_dma_uninit(pdev); 271 out_dma_init: 272 spin_lock(&pci_lock); 273 list_del(&pdev->links); 274 spin_unlock(&pci_lock); 275 put_device(&pdev->dev); 276 return (-error); 277 } 278 279 static int 280 linux_pci_detach(device_t dev) 281 { 282 struct pci_dev *pdev; 283 284 linux_set_current(curthread); 285 pdev = device_get_softc(dev); 286 287 pdev->pdrv->remove(pdev); 288 289 free(pdev->bus, M_DEVBUF); 290 linux_pdev_dma_uninit(pdev); 291 292 spin_lock(&pci_lock); 293 list_del(&pdev->links); 294 spin_unlock(&pci_lock); 295 device_set_desc(dev, NULL); 296 put_device(&pdev->dev); 297 298 return (0); 299 } 300 301 static int 302 linux_pci_suspend(device_t dev) 303 { 304 const struct dev_pm_ops *pmops; 305 struct pm_message pm = { }; 306 struct pci_dev *pdev; 307 int error; 308 309 error = 0; 310 linux_set_current(curthread); 311 pdev = device_get_softc(dev); 312 pmops = pdev->pdrv->driver.pm; 313 314 if (pdev->pdrv->suspend != NULL) 315 error = -pdev->pdrv->suspend(pdev, pm); 316 else if (pmops != NULL && pmops->suspend != NULL) { 317 error = -pmops->suspend(&pdev->dev); 318 if (error == 0 && pmops->suspend_late != NULL) 319 error = -pmops->suspend_late(&pdev->dev); 320 } 321 return (error); 322 } 323 324 static int 325 linux_pci_resume(device_t dev) 326 { 327 const struct dev_pm_ops *pmops; 328 struct pci_dev *pdev; 329 int error; 330 331 error = 0; 332 linux_set_current(curthread); 333 pdev = device_get_softc(dev); 334 pmops = pdev->pdrv->driver.pm; 335 336 if (pdev->pdrv->resume != NULL) 337 error = -pdev->pdrv->resume(pdev); 338 else if (pmops != NULL && pmops->resume != NULL) { 339 if (pmops->resume_early != NULL) 340 error = -pmops->resume_early(&pdev->dev); 341 if (error == 0 && pmops->resume != NULL) 342 error = -pmops->resume(&pdev->dev); 343 } 344 return (error); 345 } 346 347 static int 348 linux_pci_shutdown(device_t dev) 349 { 350 struct pci_dev *pdev; 351 352 linux_set_current(curthread); 353 pdev = device_get_softc(dev); 354 if (pdev->pdrv->shutdown != NULL) 355 pdev->pdrv->shutdown(pdev); 356 return (0); 357 } 358 359 static int 360 _linux_pci_register_driver(struct pci_driver *pdrv, devclass_t dc) 361 { 362 int error; 363 364 linux_set_current(curthread); 365 spin_lock(&pci_lock); 366 list_add(&pdrv->links, &pci_drivers); 367 spin_unlock(&pci_lock); 368 pdrv->bsddriver.name = pdrv->name; 369 pdrv->bsddriver.methods = pci_methods; 370 pdrv->bsddriver.size = sizeof(struct pci_dev); 371 372 mtx_lock(&Giant); 373 error = devclass_add_driver(dc, &pdrv->bsddriver, 374 BUS_PASS_DEFAULT, &pdrv->bsdclass); 375 mtx_unlock(&Giant); 376 return (-error); 377 } 378 379 int 380 linux_pci_register_driver(struct pci_driver *pdrv) 381 { 382 devclass_t dc; 383 384 dc = devclass_find("pci"); 385 if (dc == NULL) 386 return (-ENXIO); 387 pdrv->isdrm = false; 388 return (_linux_pci_register_driver(pdrv, dc)); 389 } 390 391 int 392 linux_pci_register_drm_driver(struct pci_driver *pdrv) 393 { 394 devclass_t dc; 395 396 dc = devclass_create("vgapci"); 397 if (dc == NULL) 398 return (-ENXIO); 399 pdrv->isdrm = true; 400 pdrv->name = "drmn"; 401 return (_linux_pci_register_driver(pdrv, dc)); 402 } 403 404 void 405 linux_pci_unregister_driver(struct pci_driver *pdrv) 406 { 407 devclass_t bus; 408 409 bus = devclass_find("pci"); 410 411 spin_lock(&pci_lock); 412 list_del(&pdrv->links); 413 spin_unlock(&pci_lock); 414 mtx_lock(&Giant); 415 if (bus != NULL) 416 devclass_delete_driver(bus, &pdrv->bsddriver); 417 mtx_unlock(&Giant); 418 } 419 420 CTASSERT(sizeof(dma_addr_t) <= sizeof(uint64_t)); 421 422 struct linux_dma_obj { 423 void *vaddr; 424 uint64_t dma_addr; 425 bus_dmamap_t dmamap; 426 }; 427 428 static uma_zone_t linux_dma_trie_zone; 429 static uma_zone_t linux_dma_obj_zone; 430 431 static void 432 linux_dma_init(void *arg) 433 { 434 435 linux_dma_trie_zone = uma_zcreate("linux_dma_pctrie", 436 pctrie_node_size(), NULL, NULL, pctrie_zone_init, NULL, 437 UMA_ALIGN_PTR, 0); 438 linux_dma_obj_zone = uma_zcreate("linux_dma_object", 439 sizeof(struct linux_dma_obj), NULL, NULL, NULL, NULL, 440 UMA_ALIGN_PTR, 0); 441 442 } 443 SYSINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_init, NULL); 444 445 static void 446 linux_dma_uninit(void *arg) 447 { 448 449 uma_zdestroy(linux_dma_obj_zone); 450 uma_zdestroy(linux_dma_trie_zone); 451 } 452 SYSUNINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_uninit, NULL); 453 454 static void * 455 linux_dma_trie_alloc(struct pctrie *ptree) 456 { 457 458 return (uma_zalloc(linux_dma_trie_zone, 0)); 459 } 460 461 static void 462 linux_dma_trie_free(struct pctrie *ptree, void *node) 463 { 464 465 uma_zfree(linux_dma_trie_zone, node); 466 } 467 468 469 PCTRIE_DEFINE(LINUX_DMA, linux_dma_obj, dma_addr, linux_dma_trie_alloc, 470 linux_dma_trie_free); 471 472 void * 473 linux_dma_alloc_coherent(struct device *dev, size_t size, 474 dma_addr_t *dma_handle, gfp_t flag) 475 { 476 struct linux_dma_priv *priv; 477 vm_paddr_t high; 478 size_t align; 479 void *mem; 480 481 if (dev == NULL || dev->dma_priv == NULL) { 482 *dma_handle = 0; 483 return (NULL); 484 } 485 priv = dev->dma_priv; 486 if (priv->dma_mask) 487 high = priv->dma_mask; 488 else if (flag & GFP_DMA32) 489 high = BUS_SPACE_MAXADDR_32BIT; 490 else 491 high = BUS_SPACE_MAXADDR; 492 align = PAGE_SIZE << get_order(size); 493 mem = (void *)kmem_alloc_contig(size, flag, 0, high, align, 0, 494 VM_MEMATTR_DEFAULT); 495 if (mem != NULL) { 496 *dma_handle = linux_dma_map_phys(dev, vtophys(mem), size); 497 if (*dma_handle == 0) { 498 kmem_free((vm_offset_t)mem, size); 499 mem = NULL; 500 } 501 } else { 502 *dma_handle = 0; 503 } 504 return (mem); 505 } 506 507 dma_addr_t 508 linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len) 509 { 510 struct linux_dma_priv *priv; 511 struct linux_dma_obj *obj; 512 int error, nseg; 513 bus_dma_segment_t seg; 514 515 priv = dev->dma_priv; 516 517 obj = uma_zalloc(linux_dma_obj_zone, 0); 518 519 DMA_PRIV_LOCK(priv); 520 if (bus_dmamap_create(priv->dmat, 0, &obj->dmamap) != 0) { 521 DMA_PRIV_UNLOCK(priv); 522 uma_zfree(linux_dma_obj_zone, obj); 523 return (0); 524 } 525 526 nseg = -1; 527 if (_bus_dmamap_load_phys(priv->dmat, obj->dmamap, phys, len, 528 BUS_DMA_NOWAIT, &seg, &nseg) != 0) { 529 bus_dmamap_destroy(priv->dmat, obj->dmamap); 530 DMA_PRIV_UNLOCK(priv); 531 uma_zfree(linux_dma_obj_zone, obj); 532 return (0); 533 } 534 535 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); 536 obj->dma_addr = seg.ds_addr; 537 538 error = LINUX_DMA_PCTRIE_INSERT(&priv->ptree, obj); 539 if (error != 0) { 540 bus_dmamap_unload(priv->dmat, obj->dmamap); 541 bus_dmamap_destroy(priv->dmat, obj->dmamap); 542 DMA_PRIV_UNLOCK(priv); 543 uma_zfree(linux_dma_obj_zone, obj); 544 return (0); 545 } 546 DMA_PRIV_UNLOCK(priv); 547 return (obj->dma_addr); 548 } 549 550 void 551 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len) 552 { 553 struct linux_dma_priv *priv; 554 struct linux_dma_obj *obj; 555 556 priv = dev->dma_priv; 557 558 DMA_PRIV_LOCK(priv); 559 obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr); 560 if (obj == NULL) { 561 DMA_PRIV_UNLOCK(priv); 562 return; 563 } 564 LINUX_DMA_PCTRIE_REMOVE(&priv->ptree, dma_addr); 565 bus_dmamap_unload(priv->dmat, obj->dmamap); 566 bus_dmamap_destroy(priv->dmat, obj->dmamap); 567 DMA_PRIV_UNLOCK(priv); 568 569 uma_zfree(linux_dma_obj_zone, obj); 570 } 571 572 int 573 linux_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents, 574 enum dma_data_direction dir, struct dma_attrs *attrs) 575 { 576 struct linux_dma_priv *priv; 577 struct scatterlist *sg; 578 int i, nseg; 579 bus_dma_segment_t seg; 580 581 priv = dev->dma_priv; 582 583 DMA_PRIV_LOCK(priv); 584 585 /* create common DMA map in the first S/G entry */ 586 if (bus_dmamap_create(priv->dmat, 0, &sgl->dma_map) != 0) { 587 DMA_PRIV_UNLOCK(priv); 588 return (0); 589 } 590 591 /* load all S/G list entries */ 592 for_each_sg(sgl, sg, nents, i) { 593 nseg = -1; 594 if (_bus_dmamap_load_phys(priv->dmat, sgl->dma_map, 595 sg_phys(sg), sg->length, BUS_DMA_NOWAIT, 596 &seg, &nseg) != 0) { 597 bus_dmamap_unload(priv->dmat, sgl->dma_map); 598 bus_dmamap_destroy(priv->dmat, sgl->dma_map); 599 DMA_PRIV_UNLOCK(priv); 600 return (0); 601 } 602 KASSERT(nseg == 0, 603 ("More than one segment (nseg=%d)", nseg + 1)); 604 605 sg_dma_address(sg) = seg.ds_addr; 606 } 607 DMA_PRIV_UNLOCK(priv); 608 609 return (nents); 610 } 611 612 void 613 linux_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, 614 int nents, enum dma_data_direction dir, struct dma_attrs *attrs) 615 { 616 struct linux_dma_priv *priv; 617 618 priv = dev->dma_priv; 619 620 DMA_PRIV_LOCK(priv); 621 bus_dmamap_unload(priv->dmat, sgl->dma_map); 622 bus_dmamap_destroy(priv->dmat, sgl->dma_map); 623 DMA_PRIV_UNLOCK(priv); 624 } 625 626 struct dma_pool { 627 struct device *pool_device; 628 uma_zone_t pool_zone; 629 struct mtx pool_lock; 630 bus_dma_tag_t pool_dmat; 631 size_t pool_entry_size; 632 struct pctrie pool_ptree; 633 }; 634 635 #define DMA_POOL_LOCK(pool) mtx_lock(&(pool)->pool_lock) 636 #define DMA_POOL_UNLOCK(pool) mtx_unlock(&(pool)->pool_lock) 637 638 static inline int 639 dma_pool_obj_ctor(void *mem, int size, void *arg, int flags) 640 { 641 struct linux_dma_obj *obj = mem; 642 struct dma_pool *pool = arg; 643 int error, nseg; 644 bus_dma_segment_t seg; 645 646 nseg = -1; 647 DMA_POOL_LOCK(pool); 648 error = _bus_dmamap_load_phys(pool->pool_dmat, obj->dmamap, 649 vtophys(obj->vaddr), pool->pool_entry_size, BUS_DMA_NOWAIT, 650 &seg, &nseg); 651 DMA_POOL_UNLOCK(pool); 652 if (error != 0) { 653 return (error); 654 } 655 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); 656 obj->dma_addr = seg.ds_addr; 657 658 return (0); 659 } 660 661 static void 662 dma_pool_obj_dtor(void *mem, int size, void *arg) 663 { 664 struct linux_dma_obj *obj = mem; 665 struct dma_pool *pool = arg; 666 667 DMA_POOL_LOCK(pool); 668 bus_dmamap_unload(pool->pool_dmat, obj->dmamap); 669 DMA_POOL_UNLOCK(pool); 670 } 671 672 static int 673 dma_pool_obj_import(void *arg, void **store, int count, int domain __unused, 674 int flags) 675 { 676 struct dma_pool *pool = arg; 677 struct linux_dma_priv *priv; 678 struct linux_dma_obj *obj; 679 int error, i; 680 681 priv = pool->pool_device->dma_priv; 682 for (i = 0; i < count; i++) { 683 obj = uma_zalloc(linux_dma_obj_zone, flags); 684 if (obj == NULL) 685 break; 686 687 error = bus_dmamem_alloc(pool->pool_dmat, &obj->vaddr, 688 BUS_DMA_NOWAIT, &obj->dmamap); 689 if (error!= 0) { 690 uma_zfree(linux_dma_obj_zone, obj); 691 break; 692 } 693 694 store[i] = obj; 695 } 696 697 return (i); 698 } 699 700 static void 701 dma_pool_obj_release(void *arg, void **store, int count) 702 { 703 struct dma_pool *pool = arg; 704 struct linux_dma_priv *priv; 705 struct linux_dma_obj *obj; 706 int i; 707 708 priv = pool->pool_device->dma_priv; 709 for (i = 0; i < count; i++) { 710 obj = store[i]; 711 bus_dmamem_free(pool->pool_dmat, obj->vaddr, obj->dmamap); 712 uma_zfree(linux_dma_obj_zone, obj); 713 } 714 } 715 716 struct dma_pool * 717 linux_dma_pool_create(char *name, struct device *dev, size_t size, 718 size_t align, size_t boundary) 719 { 720 struct linux_dma_priv *priv; 721 struct dma_pool *pool; 722 723 priv = dev->dma_priv; 724 725 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 726 pool->pool_device = dev; 727 pool->pool_entry_size = size; 728 729 if (bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 730 align, boundary, /* alignment, boundary */ 731 priv->dma_mask, /* lowaddr */ 732 BUS_SPACE_MAXADDR, /* highaddr */ 733 NULL, NULL, /* filtfunc, filtfuncarg */ 734 size, /* maxsize */ 735 1, /* nsegments */ 736 size, /* maxsegsz */ 737 0, /* flags */ 738 NULL, NULL, /* lockfunc, lockfuncarg */ 739 &pool->pool_dmat)) { 740 kfree(pool); 741 return (NULL); 742 } 743 744 pool->pool_zone = uma_zcache_create(name, -1, dma_pool_obj_ctor, 745 dma_pool_obj_dtor, NULL, NULL, dma_pool_obj_import, 746 dma_pool_obj_release, pool, 0); 747 748 mtx_init(&pool->pool_lock, "lkpi-dma-pool", NULL, MTX_DEF); 749 pctrie_init(&pool->pool_ptree); 750 751 return (pool); 752 } 753 754 void 755 linux_dma_pool_destroy(struct dma_pool *pool) 756 { 757 758 uma_zdestroy(pool->pool_zone); 759 bus_dma_tag_destroy(pool->pool_dmat); 760 mtx_destroy(&pool->pool_lock); 761 kfree(pool); 762 } 763 764 void * 765 linux_dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, 766 dma_addr_t *handle) 767 { 768 struct linux_dma_obj *obj; 769 770 obj = uma_zalloc_arg(pool->pool_zone, pool, mem_flags); 771 if (obj == NULL) 772 return (NULL); 773 774 DMA_POOL_LOCK(pool); 775 if (LINUX_DMA_PCTRIE_INSERT(&pool->pool_ptree, obj) != 0) { 776 DMA_POOL_UNLOCK(pool); 777 uma_zfree_arg(pool->pool_zone, obj, pool); 778 return (NULL); 779 } 780 DMA_POOL_UNLOCK(pool); 781 782 *handle = obj->dma_addr; 783 return (obj->vaddr); 784 } 785 786 void 787 linux_dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma_addr) 788 { 789 struct linux_dma_obj *obj; 790 791 DMA_POOL_LOCK(pool); 792 obj = LINUX_DMA_PCTRIE_LOOKUP(&pool->pool_ptree, dma_addr); 793 if (obj == NULL) { 794 DMA_POOL_UNLOCK(pool); 795 return; 796 } 797 LINUX_DMA_PCTRIE_REMOVE(&pool->pool_ptree, dma_addr); 798 DMA_POOL_UNLOCK(pool); 799 800 uma_zfree_arg(pool->pool_zone, obj, pool); 801 } 802