1 /*- 2 * Copyright (c) 2015-2016 Mellanox Technologies, Ltd. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/malloc.h> 33 #include <sys/kernel.h> 34 #include <sys/sysctl.h> 35 #include <sys/lock.h> 36 #include <sys/mutex.h> 37 #include <sys/bus.h> 38 #include <sys/fcntl.h> 39 #include <sys/file.h> 40 #include <sys/filio.h> 41 #include <sys/pctrie.h> 42 #include <sys/rwlock.h> 43 44 #include <vm/vm.h> 45 #include <vm/pmap.h> 46 47 #include <machine/stdarg.h> 48 49 #include <linux/kobject.h> 50 #include <linux/device.h> 51 #include <linux/slab.h> 52 #include <linux/module.h> 53 #include <linux/cdev.h> 54 #include <linux/file.h> 55 #include <linux/sysfs.h> 56 #include <linux/mm.h> 57 #include <linux/io.h> 58 #include <linux/vmalloc.h> 59 #include <linux/pci.h> 60 #include <linux/compat.h> 61 62 static device_probe_t linux_pci_probe; 63 static device_attach_t linux_pci_attach; 64 static device_detach_t linux_pci_detach; 65 static device_suspend_t linux_pci_suspend; 66 static device_resume_t linux_pci_resume; 67 static device_shutdown_t linux_pci_shutdown; 68 69 static device_method_t pci_methods[] = { 70 DEVMETHOD(device_probe, linux_pci_probe), 71 DEVMETHOD(device_attach, linux_pci_attach), 72 DEVMETHOD(device_detach, linux_pci_detach), 73 DEVMETHOD(device_suspend, linux_pci_suspend), 74 DEVMETHOD(device_resume, linux_pci_resume), 75 DEVMETHOD(device_shutdown, linux_pci_shutdown), 76 DEVMETHOD_END 77 }; 78 79 struct linux_dma_priv { 80 uint64_t dma_mask; 81 struct mtx lock; 82 bus_dma_tag_t dmat; 83 struct pctrie ptree; 84 }; 85 #define DMA_PRIV_LOCK(priv) mtx_lock(&(priv)->lock) 86 #define DMA_PRIV_UNLOCK(priv) mtx_unlock(&(priv)->lock) 87 88 static int 89 linux_pdev_dma_init(struct pci_dev *pdev) 90 { 91 struct linux_dma_priv *priv; 92 int error; 93 94 priv = malloc(sizeof(*priv), M_DEVBUF, M_WAITOK | M_ZERO); 95 pdev->dev.dma_priv = priv; 96 97 mtx_init(&priv->lock, "lkpi-priv-dma", NULL, MTX_DEF); 98 99 pctrie_init(&priv->ptree); 100 101 /* create a default DMA tag */ 102 error = linux_dma_tag_init(&pdev->dev, DMA_BIT_MASK(64)); 103 if (error) { 104 mtx_destroy(&priv->lock); 105 free(priv, M_DEVBUF); 106 pdev->dev.dma_priv = NULL; 107 } 108 return (error); 109 } 110 111 static int 112 linux_pdev_dma_uninit(struct pci_dev *pdev) 113 { 114 struct linux_dma_priv *priv; 115 116 priv = pdev->dev.dma_priv; 117 if (priv->dmat) 118 bus_dma_tag_destroy(priv->dmat); 119 mtx_destroy(&priv->lock); 120 free(priv, M_DEVBUF); 121 pdev->dev.dma_priv = NULL; 122 return (0); 123 } 124 125 int 126 linux_dma_tag_init(struct device *dev, u64 dma_mask) 127 { 128 struct linux_dma_priv *priv; 129 int error; 130 131 priv = dev->dma_priv; 132 133 if (priv->dmat) { 134 if (priv->dma_mask == dma_mask) 135 return (0); 136 137 bus_dma_tag_destroy(priv->dmat); 138 } 139 140 priv->dma_mask = dma_mask; 141 142 error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 143 1, 0, /* alignment, boundary */ 144 dma_mask, /* lowaddr */ 145 BUS_SPACE_MAXADDR, /* highaddr */ 146 NULL, NULL, /* filtfunc, filtfuncarg */ 147 BUS_SPACE_MAXSIZE, /* maxsize */ 148 1, /* nsegments */ 149 BUS_SPACE_MAXSIZE, /* maxsegsz */ 150 0, /* flags */ 151 NULL, NULL, /* lockfunc, lockfuncarg */ 152 &priv->dmat); 153 return (-error); 154 } 155 156 static struct pci_driver * 157 linux_pci_find(device_t dev, const struct pci_device_id **idp) 158 { 159 const struct pci_device_id *id; 160 struct pci_driver *pdrv; 161 uint16_t vendor; 162 uint16_t device; 163 uint16_t subvendor; 164 uint16_t subdevice; 165 166 vendor = pci_get_vendor(dev); 167 device = pci_get_device(dev); 168 subvendor = pci_get_subvendor(dev); 169 subdevice = pci_get_subdevice(dev); 170 171 spin_lock(&pci_lock); 172 list_for_each_entry(pdrv, &pci_drivers, links) { 173 for (id = pdrv->id_table; id->vendor != 0; id++) { 174 if (vendor == id->vendor && 175 (PCI_ANY_ID == id->device || device == id->device) && 176 (PCI_ANY_ID == id->subvendor || subvendor == id->subvendor) && 177 (PCI_ANY_ID == id->subdevice || subdevice == id->subdevice)) { 178 *idp = id; 179 spin_unlock(&pci_lock); 180 return (pdrv); 181 } 182 } 183 } 184 spin_unlock(&pci_lock); 185 return (NULL); 186 } 187 188 static int 189 linux_pci_probe(device_t dev) 190 { 191 const struct pci_device_id *id; 192 struct pci_driver *pdrv; 193 194 if ((pdrv = linux_pci_find(dev, &id)) == NULL) 195 return (ENXIO); 196 if (device_get_driver(dev) != &pdrv->bsddriver) 197 return (ENXIO); 198 device_set_desc(dev, pdrv->name); 199 return (0); 200 } 201 202 static int 203 linux_pci_attach(device_t dev) 204 { 205 struct resource_list_entry *rle; 206 struct pci_bus *pbus; 207 struct pci_dev *pdev; 208 struct pci_devinfo *dinfo; 209 struct pci_driver *pdrv; 210 const struct pci_device_id *id; 211 device_t parent; 212 devclass_t devclass; 213 int error; 214 215 linux_set_current(curthread); 216 217 pdrv = linux_pci_find(dev, &id); 218 pdev = device_get_softc(dev); 219 220 parent = device_get_parent(dev); 221 devclass = device_get_devclass(parent); 222 if (pdrv->isdrm) { 223 dinfo = device_get_ivars(parent); 224 device_set_ivars(dev, dinfo); 225 } else { 226 dinfo = device_get_ivars(dev); 227 } 228 229 pdev->dev.parent = &linux_root_device; 230 pdev->dev.bsddev = dev; 231 INIT_LIST_HEAD(&pdev->dev.irqents); 232 pdev->devfn = PCI_DEVFN(pci_get_slot(dev), pci_get_function(dev)); 233 pdev->device = dinfo->cfg.device; 234 pdev->vendor = dinfo->cfg.vendor; 235 pdev->subsystem_vendor = dinfo->cfg.subvendor; 236 pdev->subsystem_device = dinfo->cfg.subdevice; 237 pdev->class = pci_get_class(dev); 238 pdev->revision = pci_get_revid(dev); 239 pdev->pdrv = pdrv; 240 kobject_init(&pdev->dev.kobj, &linux_dev_ktype); 241 kobject_set_name(&pdev->dev.kobj, device_get_nameunit(dev)); 242 kobject_add(&pdev->dev.kobj, &linux_root_device.kobj, 243 kobject_name(&pdev->dev.kobj)); 244 rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 0); 245 if (rle != NULL) 246 pdev->dev.irq = rle->start; 247 else 248 pdev->dev.irq = LINUX_IRQ_INVALID; 249 pdev->irq = pdev->dev.irq; 250 error = linux_pdev_dma_init(pdev); 251 if (error) 252 goto out_dma_init; 253 254 pbus = malloc(sizeof(*pbus), M_DEVBUF, M_WAITOK | M_ZERO); 255 pbus->self = pdev; 256 pbus->number = pci_get_bus(dev); 257 pdev->bus = pbus; 258 259 spin_lock(&pci_lock); 260 list_add(&pdev->links, &pci_devices); 261 spin_unlock(&pci_lock); 262 263 error = pdrv->probe(pdev, id); 264 if (error) 265 goto out_probe; 266 return (0); 267 268 out_probe: 269 free(pdev->bus, M_DEVBUF); 270 linux_pdev_dma_uninit(pdev); 271 out_dma_init: 272 spin_lock(&pci_lock); 273 list_del(&pdev->links); 274 spin_unlock(&pci_lock); 275 put_device(&pdev->dev); 276 return (-error); 277 } 278 279 static int 280 linux_pci_detach(device_t dev) 281 { 282 struct pci_dev *pdev; 283 284 linux_set_current(curthread); 285 pdev = device_get_softc(dev); 286 287 pdev->pdrv->remove(pdev); 288 289 free(pdev->bus, M_DEVBUF); 290 linux_pdev_dma_uninit(pdev); 291 292 spin_lock(&pci_lock); 293 list_del(&pdev->links); 294 spin_unlock(&pci_lock); 295 device_set_desc(dev, NULL); 296 put_device(&pdev->dev); 297 298 return (0); 299 } 300 301 static int 302 linux_pci_suspend(device_t dev) 303 { 304 const struct dev_pm_ops *pmops; 305 struct pm_message pm = { }; 306 struct pci_dev *pdev; 307 int error; 308 309 error = 0; 310 linux_set_current(curthread); 311 pdev = device_get_softc(dev); 312 pmops = pdev->pdrv->driver.pm; 313 314 if (pdev->pdrv->suspend != NULL) 315 error = -pdev->pdrv->suspend(pdev, pm); 316 else if (pmops != NULL && pmops->suspend != NULL) { 317 error = -pmops->suspend(&pdev->dev); 318 if (error == 0 && pmops->suspend_late != NULL) 319 error = -pmops->suspend_late(&pdev->dev); 320 } 321 return (error); 322 } 323 324 static int 325 linux_pci_resume(device_t dev) 326 { 327 const struct dev_pm_ops *pmops; 328 struct pci_dev *pdev; 329 int error; 330 331 error = 0; 332 linux_set_current(curthread); 333 pdev = device_get_softc(dev); 334 pmops = pdev->pdrv->driver.pm; 335 336 if (pdev->pdrv->resume != NULL) 337 error = -pdev->pdrv->resume(pdev); 338 else if (pmops != NULL && pmops->resume != NULL) { 339 if (pmops->resume_early != NULL) 340 error = -pmops->resume_early(&pdev->dev); 341 if (error == 0 && pmops->resume != NULL) 342 error = -pmops->resume(&pdev->dev); 343 } 344 return (error); 345 } 346 347 static int 348 linux_pci_shutdown(device_t dev) 349 { 350 struct pci_dev *pdev; 351 352 linux_set_current(curthread); 353 pdev = device_get_softc(dev); 354 if (pdev->pdrv->shutdown != NULL) 355 pdev->pdrv->shutdown(pdev); 356 return (0); 357 } 358 359 static int 360 _linux_pci_register_driver(struct pci_driver *pdrv, devclass_t dc) 361 { 362 int error; 363 364 linux_set_current(curthread); 365 spin_lock(&pci_lock); 366 list_add(&pdrv->links, &pci_drivers); 367 spin_unlock(&pci_lock); 368 pdrv->bsddriver.name = pdrv->name; 369 pdrv->bsddriver.methods = pci_methods; 370 pdrv->bsddriver.size = sizeof(struct pci_dev); 371 372 mtx_lock(&Giant); 373 error = devclass_add_driver(dc, &pdrv->bsddriver, 374 BUS_PASS_DEFAULT, &pdrv->bsdclass); 375 mtx_unlock(&Giant); 376 return (-error); 377 } 378 379 int 380 linux_pci_register_driver(struct pci_driver *pdrv) 381 { 382 devclass_t dc; 383 384 dc = devclass_find("pci"); 385 if (dc == NULL) 386 return (-ENXIO); 387 pdrv->isdrm = false; 388 return (_linux_pci_register_driver(pdrv, dc)); 389 } 390 391 int 392 linux_pci_register_drm_driver(struct pci_driver *pdrv) 393 { 394 devclass_t dc; 395 396 dc = devclass_create("vgapci"); 397 if (dc == NULL) 398 return (-ENXIO); 399 pdrv->isdrm = true; 400 pdrv->name = "drmn"; 401 return (_linux_pci_register_driver(pdrv, dc)); 402 } 403 404 void 405 linux_pci_unregister_driver(struct pci_driver *pdrv) 406 { 407 devclass_t bus; 408 409 bus = devclass_find("pci"); 410 411 spin_lock(&pci_lock); 412 list_del(&pdrv->links); 413 spin_unlock(&pci_lock); 414 mtx_lock(&Giant); 415 if (bus != NULL) 416 devclass_delete_driver(bus, &pdrv->bsddriver); 417 mtx_unlock(&Giant); 418 } 419 420 void 421 linux_pci_unregister_drm_driver(struct pci_driver *pdrv) 422 { 423 devclass_t bus; 424 425 bus = devclass_find("vgapci"); 426 427 spin_lock(&pci_lock); 428 list_del(&pdrv->links); 429 spin_unlock(&pci_lock); 430 mtx_lock(&Giant); 431 if (bus != NULL) 432 devclass_delete_driver(bus, &pdrv->bsddriver); 433 mtx_unlock(&Giant); 434 } 435 436 CTASSERT(sizeof(dma_addr_t) <= sizeof(uint64_t)); 437 438 struct linux_dma_obj { 439 void *vaddr; 440 uint64_t dma_addr; 441 bus_dmamap_t dmamap; 442 }; 443 444 static uma_zone_t linux_dma_trie_zone; 445 static uma_zone_t linux_dma_obj_zone; 446 447 static void 448 linux_dma_init(void *arg) 449 { 450 451 linux_dma_trie_zone = uma_zcreate("linux_dma_pctrie", 452 pctrie_node_size(), NULL, NULL, pctrie_zone_init, NULL, 453 UMA_ALIGN_PTR, 0); 454 linux_dma_obj_zone = uma_zcreate("linux_dma_object", 455 sizeof(struct linux_dma_obj), NULL, NULL, NULL, NULL, 456 UMA_ALIGN_PTR, 0); 457 458 } 459 SYSINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_init, NULL); 460 461 static void 462 linux_dma_uninit(void *arg) 463 { 464 465 uma_zdestroy(linux_dma_obj_zone); 466 uma_zdestroy(linux_dma_trie_zone); 467 } 468 SYSUNINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_uninit, NULL); 469 470 static void * 471 linux_dma_trie_alloc(struct pctrie *ptree) 472 { 473 474 return (uma_zalloc(linux_dma_trie_zone, 0)); 475 } 476 477 static void 478 linux_dma_trie_free(struct pctrie *ptree, void *node) 479 { 480 481 uma_zfree(linux_dma_trie_zone, node); 482 } 483 484 485 PCTRIE_DEFINE(LINUX_DMA, linux_dma_obj, dma_addr, linux_dma_trie_alloc, 486 linux_dma_trie_free); 487 488 void * 489 linux_dma_alloc_coherent(struct device *dev, size_t size, 490 dma_addr_t *dma_handle, gfp_t flag) 491 { 492 struct linux_dma_priv *priv; 493 vm_paddr_t high; 494 size_t align; 495 void *mem; 496 497 if (dev == NULL || dev->dma_priv == NULL) { 498 *dma_handle = 0; 499 return (NULL); 500 } 501 priv = dev->dma_priv; 502 if (priv->dma_mask) 503 high = priv->dma_mask; 504 else if (flag & GFP_DMA32) 505 high = BUS_SPACE_MAXADDR_32BIT; 506 else 507 high = BUS_SPACE_MAXADDR; 508 align = PAGE_SIZE << get_order(size); 509 mem = (void *)kmem_alloc_contig(size, flag, 0, high, align, 0, 510 VM_MEMATTR_DEFAULT); 511 if (mem != NULL) { 512 *dma_handle = linux_dma_map_phys(dev, vtophys(mem), size); 513 if (*dma_handle == 0) { 514 kmem_free((vm_offset_t)mem, size); 515 mem = NULL; 516 } 517 } else { 518 *dma_handle = 0; 519 } 520 return (mem); 521 } 522 523 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 524 dma_addr_t 525 linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len) 526 { 527 struct linux_dma_priv *priv; 528 struct linux_dma_obj *obj; 529 int error, nseg; 530 bus_dma_segment_t seg; 531 532 priv = dev->dma_priv; 533 534 /* 535 * If the resultant mapping will be entirely 1:1 with the 536 * physical address, short-circuit the remainder of the 537 * bus_dma API. This avoids tracking collisions in the pctrie 538 * with the additional benefit of reducing overhead. 539 */ 540 if (bus_dma_id_mapped(priv->dmat, phys, len)) 541 return (phys); 542 543 obj = uma_zalloc(linux_dma_obj_zone, 0); 544 545 DMA_PRIV_LOCK(priv); 546 if (bus_dmamap_create(priv->dmat, 0, &obj->dmamap) != 0) { 547 DMA_PRIV_UNLOCK(priv); 548 uma_zfree(linux_dma_obj_zone, obj); 549 return (0); 550 } 551 552 nseg = -1; 553 if (_bus_dmamap_load_phys(priv->dmat, obj->dmamap, phys, len, 554 BUS_DMA_NOWAIT, &seg, &nseg) != 0) { 555 bus_dmamap_destroy(priv->dmat, obj->dmamap); 556 DMA_PRIV_UNLOCK(priv); 557 uma_zfree(linux_dma_obj_zone, obj); 558 return (0); 559 } 560 561 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); 562 obj->dma_addr = seg.ds_addr; 563 564 error = LINUX_DMA_PCTRIE_INSERT(&priv->ptree, obj); 565 if (error != 0) { 566 bus_dmamap_unload(priv->dmat, obj->dmamap); 567 bus_dmamap_destroy(priv->dmat, obj->dmamap); 568 DMA_PRIV_UNLOCK(priv); 569 uma_zfree(linux_dma_obj_zone, obj); 570 return (0); 571 } 572 DMA_PRIV_UNLOCK(priv); 573 return (obj->dma_addr); 574 } 575 #else 576 dma_addr_t 577 linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len) 578 { 579 return (phys); 580 } 581 #endif 582 583 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 584 void 585 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len) 586 { 587 struct linux_dma_priv *priv; 588 struct linux_dma_obj *obj; 589 590 priv = dev->dma_priv; 591 592 if (pctrie_is_empty(&priv->ptree)) 593 return; 594 595 DMA_PRIV_LOCK(priv); 596 obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr); 597 if (obj == NULL) { 598 DMA_PRIV_UNLOCK(priv); 599 return; 600 } 601 LINUX_DMA_PCTRIE_REMOVE(&priv->ptree, dma_addr); 602 bus_dmamap_unload(priv->dmat, obj->dmamap); 603 bus_dmamap_destroy(priv->dmat, obj->dmamap); 604 DMA_PRIV_UNLOCK(priv); 605 606 uma_zfree(linux_dma_obj_zone, obj); 607 } 608 #else 609 void 610 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len) 611 { 612 } 613 #endif 614 615 int 616 linux_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents, 617 enum dma_data_direction dir, struct dma_attrs *attrs) 618 { 619 struct linux_dma_priv *priv; 620 struct scatterlist *sg; 621 int i, nseg; 622 bus_dma_segment_t seg; 623 624 priv = dev->dma_priv; 625 626 DMA_PRIV_LOCK(priv); 627 628 /* create common DMA map in the first S/G entry */ 629 if (bus_dmamap_create(priv->dmat, 0, &sgl->dma_map) != 0) { 630 DMA_PRIV_UNLOCK(priv); 631 return (0); 632 } 633 634 /* load all S/G list entries */ 635 for_each_sg(sgl, sg, nents, i) { 636 nseg = -1; 637 if (_bus_dmamap_load_phys(priv->dmat, sgl->dma_map, 638 sg_phys(sg), sg->length, BUS_DMA_NOWAIT, 639 &seg, &nseg) != 0) { 640 bus_dmamap_unload(priv->dmat, sgl->dma_map); 641 bus_dmamap_destroy(priv->dmat, sgl->dma_map); 642 DMA_PRIV_UNLOCK(priv); 643 return (0); 644 } 645 KASSERT(nseg == 0, 646 ("More than one segment (nseg=%d)", nseg + 1)); 647 648 sg_dma_address(sg) = seg.ds_addr; 649 } 650 DMA_PRIV_UNLOCK(priv); 651 652 return (nents); 653 } 654 655 void 656 linux_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, 657 int nents, enum dma_data_direction dir, struct dma_attrs *attrs) 658 { 659 struct linux_dma_priv *priv; 660 661 priv = dev->dma_priv; 662 663 DMA_PRIV_LOCK(priv); 664 bus_dmamap_unload(priv->dmat, sgl->dma_map); 665 bus_dmamap_destroy(priv->dmat, sgl->dma_map); 666 DMA_PRIV_UNLOCK(priv); 667 } 668 669 struct dma_pool { 670 struct device *pool_device; 671 uma_zone_t pool_zone; 672 struct mtx pool_lock; 673 bus_dma_tag_t pool_dmat; 674 size_t pool_entry_size; 675 struct pctrie pool_ptree; 676 }; 677 678 #define DMA_POOL_LOCK(pool) mtx_lock(&(pool)->pool_lock) 679 #define DMA_POOL_UNLOCK(pool) mtx_unlock(&(pool)->pool_lock) 680 681 static inline int 682 dma_pool_obj_ctor(void *mem, int size, void *arg, int flags) 683 { 684 struct linux_dma_obj *obj = mem; 685 struct dma_pool *pool = arg; 686 int error, nseg; 687 bus_dma_segment_t seg; 688 689 nseg = -1; 690 DMA_POOL_LOCK(pool); 691 error = _bus_dmamap_load_phys(pool->pool_dmat, obj->dmamap, 692 vtophys(obj->vaddr), pool->pool_entry_size, BUS_DMA_NOWAIT, 693 &seg, &nseg); 694 DMA_POOL_UNLOCK(pool); 695 if (error != 0) { 696 return (error); 697 } 698 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); 699 obj->dma_addr = seg.ds_addr; 700 701 return (0); 702 } 703 704 static void 705 dma_pool_obj_dtor(void *mem, int size, void *arg) 706 { 707 struct linux_dma_obj *obj = mem; 708 struct dma_pool *pool = arg; 709 710 DMA_POOL_LOCK(pool); 711 bus_dmamap_unload(pool->pool_dmat, obj->dmamap); 712 DMA_POOL_UNLOCK(pool); 713 } 714 715 static int 716 dma_pool_obj_import(void *arg, void **store, int count, int domain __unused, 717 int flags) 718 { 719 struct dma_pool *pool = arg; 720 struct linux_dma_priv *priv; 721 struct linux_dma_obj *obj; 722 int error, i; 723 724 priv = pool->pool_device->dma_priv; 725 for (i = 0; i < count; i++) { 726 obj = uma_zalloc(linux_dma_obj_zone, flags); 727 if (obj == NULL) 728 break; 729 730 error = bus_dmamem_alloc(pool->pool_dmat, &obj->vaddr, 731 BUS_DMA_NOWAIT, &obj->dmamap); 732 if (error!= 0) { 733 uma_zfree(linux_dma_obj_zone, obj); 734 break; 735 } 736 737 store[i] = obj; 738 } 739 740 return (i); 741 } 742 743 static void 744 dma_pool_obj_release(void *arg, void **store, int count) 745 { 746 struct dma_pool *pool = arg; 747 struct linux_dma_priv *priv; 748 struct linux_dma_obj *obj; 749 int i; 750 751 priv = pool->pool_device->dma_priv; 752 for (i = 0; i < count; i++) { 753 obj = store[i]; 754 bus_dmamem_free(pool->pool_dmat, obj->vaddr, obj->dmamap); 755 uma_zfree(linux_dma_obj_zone, obj); 756 } 757 } 758 759 struct dma_pool * 760 linux_dma_pool_create(char *name, struct device *dev, size_t size, 761 size_t align, size_t boundary) 762 { 763 struct linux_dma_priv *priv; 764 struct dma_pool *pool; 765 766 priv = dev->dma_priv; 767 768 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 769 pool->pool_device = dev; 770 pool->pool_entry_size = size; 771 772 if (bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 773 align, boundary, /* alignment, boundary */ 774 priv->dma_mask, /* lowaddr */ 775 BUS_SPACE_MAXADDR, /* highaddr */ 776 NULL, NULL, /* filtfunc, filtfuncarg */ 777 size, /* maxsize */ 778 1, /* nsegments */ 779 size, /* maxsegsz */ 780 0, /* flags */ 781 NULL, NULL, /* lockfunc, lockfuncarg */ 782 &pool->pool_dmat)) { 783 kfree(pool); 784 return (NULL); 785 } 786 787 pool->pool_zone = uma_zcache_create(name, -1, dma_pool_obj_ctor, 788 dma_pool_obj_dtor, NULL, NULL, dma_pool_obj_import, 789 dma_pool_obj_release, pool, 0); 790 791 mtx_init(&pool->pool_lock, "lkpi-dma-pool", NULL, MTX_DEF); 792 pctrie_init(&pool->pool_ptree); 793 794 return (pool); 795 } 796 797 void 798 linux_dma_pool_destroy(struct dma_pool *pool) 799 { 800 801 uma_zdestroy(pool->pool_zone); 802 bus_dma_tag_destroy(pool->pool_dmat); 803 mtx_destroy(&pool->pool_lock); 804 kfree(pool); 805 } 806 807 void * 808 linux_dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, 809 dma_addr_t *handle) 810 { 811 struct linux_dma_obj *obj; 812 813 obj = uma_zalloc_arg(pool->pool_zone, pool, mem_flags); 814 if (obj == NULL) 815 return (NULL); 816 817 DMA_POOL_LOCK(pool); 818 if (LINUX_DMA_PCTRIE_INSERT(&pool->pool_ptree, obj) != 0) { 819 DMA_POOL_UNLOCK(pool); 820 uma_zfree_arg(pool->pool_zone, obj, pool); 821 return (NULL); 822 } 823 DMA_POOL_UNLOCK(pool); 824 825 *handle = obj->dma_addr; 826 return (obj->vaddr); 827 } 828 829 void 830 linux_dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma_addr) 831 { 832 struct linux_dma_obj *obj; 833 834 DMA_POOL_LOCK(pool); 835 obj = LINUX_DMA_PCTRIE_LOOKUP(&pool->pool_ptree, dma_addr); 836 if (obj == NULL) { 837 DMA_POOL_UNLOCK(pool); 838 return; 839 } 840 LINUX_DMA_PCTRIE_REMOVE(&pool->pool_ptree, dma_addr); 841 DMA_POOL_UNLOCK(pool); 842 843 uma_zfree_arg(pool->pool_zone, obj, pool); 844 } 845