1 /*- 2 * Copyright (c) 2015-2016 Mellanox Technologies, Ltd. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 #include <sys/cdefs.h> 28 __FBSDID("$FreeBSD$"); 29 30 #include <sys/param.h> 31 #include <sys/systm.h> 32 #include <sys/bus.h> 33 #include <sys/malloc.h> 34 #include <sys/kernel.h> 35 #include <sys/sysctl.h> 36 #include <sys/lock.h> 37 #include <sys/mutex.h> 38 #include <sys/fcntl.h> 39 #include <sys/file.h> 40 #include <sys/filio.h> 41 #include <sys/pctrie.h> 42 #include <sys/rwlock.h> 43 44 #include <vm/vm.h> 45 #include <vm/pmap.h> 46 47 #include <machine/stdarg.h> 48 49 #include <linux/kobject.h> 50 #include <linux/device.h> 51 #include <linux/slab.h> 52 #include <linux/module.h> 53 #include <linux/cdev.h> 54 #include <linux/file.h> 55 #include <linux/sysfs.h> 56 #include <linux/mm.h> 57 #include <linux/io.h> 58 #include <linux/vmalloc.h> 59 #include <linux/pci.h> 60 #include <linux/compat.h> 61 62 static device_probe_t linux_pci_probe; 63 static device_attach_t linux_pci_attach; 64 static device_detach_t linux_pci_detach; 65 static device_suspend_t linux_pci_suspend; 66 static device_resume_t linux_pci_resume; 67 static device_shutdown_t linux_pci_shutdown; 68 69 static device_method_t pci_methods[] = { 70 DEVMETHOD(device_probe, linux_pci_probe), 71 DEVMETHOD(device_attach, linux_pci_attach), 72 DEVMETHOD(device_detach, linux_pci_detach), 73 DEVMETHOD(device_suspend, linux_pci_suspend), 74 DEVMETHOD(device_resume, linux_pci_resume), 75 DEVMETHOD(device_shutdown, linux_pci_shutdown), 76 DEVMETHOD_END 77 }; 78 79 struct linux_dma_priv { 80 uint64_t dma_mask; 81 struct mtx lock; 82 bus_dma_tag_t dmat; 83 struct pctrie ptree; 84 }; 85 #define DMA_PRIV_LOCK(priv) mtx_lock(&(priv)->lock) 86 #define DMA_PRIV_UNLOCK(priv) mtx_unlock(&(priv)->lock) 87 88 static int 89 linux_pdev_dma_init(struct pci_dev *pdev) 90 { 91 struct linux_dma_priv *priv; 92 int error; 93 94 priv = malloc(sizeof(*priv), M_DEVBUF, M_WAITOK | M_ZERO); 95 pdev->dev.dma_priv = priv; 96 97 mtx_init(&priv->lock, "lkpi-priv-dma", NULL, MTX_DEF); 98 99 pctrie_init(&priv->ptree); 100 101 /* create a default DMA tag */ 102 error = linux_dma_tag_init(&pdev->dev, DMA_BIT_MASK(64)); 103 if (error) { 104 mtx_destroy(&priv->lock); 105 free(priv, M_DEVBUF); 106 pdev->dev.dma_priv = NULL; 107 } 108 return (error); 109 } 110 111 static int 112 linux_pdev_dma_uninit(struct pci_dev *pdev) 113 { 114 struct linux_dma_priv *priv; 115 116 priv = pdev->dev.dma_priv; 117 if (priv->dmat) 118 bus_dma_tag_destroy(priv->dmat); 119 mtx_destroy(&priv->lock); 120 free(priv, M_DEVBUF); 121 pdev->dev.dma_priv = NULL; 122 return (0); 123 } 124 125 int 126 linux_dma_tag_init(struct device *dev, u64 dma_mask) 127 { 128 struct linux_dma_priv *priv; 129 int error; 130 131 priv = dev->dma_priv; 132 133 if (priv->dmat) { 134 if (priv->dma_mask == dma_mask) 135 return (0); 136 137 bus_dma_tag_destroy(priv->dmat); 138 } 139 140 priv->dma_mask = dma_mask; 141 142 error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 143 1, 0, /* alignment, boundary */ 144 dma_mask, /* lowaddr */ 145 BUS_SPACE_MAXADDR, /* highaddr */ 146 NULL, NULL, /* filtfunc, filtfuncarg */ 147 BUS_SPACE_MAXSIZE, /* maxsize */ 148 1, /* nsegments */ 149 BUS_SPACE_MAXSIZE, /* maxsegsz */ 150 0, /* flags */ 151 NULL, NULL, /* lockfunc, lockfuncarg */ 152 &priv->dmat); 153 return (-error); 154 } 155 156 static struct pci_driver * 157 linux_pci_find(device_t dev, const struct pci_device_id **idp) 158 { 159 const struct pci_device_id *id; 160 struct pci_driver *pdrv; 161 uint16_t vendor; 162 uint16_t device; 163 uint16_t subvendor; 164 uint16_t subdevice; 165 166 vendor = pci_get_vendor(dev); 167 device = pci_get_device(dev); 168 subvendor = pci_get_subvendor(dev); 169 subdevice = pci_get_subdevice(dev); 170 171 spin_lock(&pci_lock); 172 list_for_each_entry(pdrv, &pci_drivers, links) { 173 for (id = pdrv->id_table; id->vendor != 0; id++) { 174 if (vendor == id->vendor && 175 (PCI_ANY_ID == id->device || device == id->device) && 176 (PCI_ANY_ID == id->subvendor || subvendor == id->subvendor) && 177 (PCI_ANY_ID == id->subdevice || subdevice == id->subdevice)) { 178 *idp = id; 179 spin_unlock(&pci_lock); 180 return (pdrv); 181 } 182 } 183 } 184 spin_unlock(&pci_lock); 185 return (NULL); 186 } 187 188 static int 189 linux_pci_probe(device_t dev) 190 { 191 const struct pci_device_id *id; 192 struct pci_driver *pdrv; 193 194 if ((pdrv = linux_pci_find(dev, &id)) == NULL) 195 return (ENXIO); 196 if (device_get_driver(dev) != &pdrv->bsddriver) 197 return (ENXIO); 198 device_set_desc(dev, pdrv->name); 199 return (0); 200 } 201 202 static int 203 linux_pci_attach(device_t dev) 204 { 205 struct resource_list_entry *rle; 206 struct pci_bus *pbus; 207 struct pci_dev *pdev; 208 struct pci_devinfo *dinfo; 209 struct pci_driver *pdrv; 210 const struct pci_device_id *id; 211 device_t parent; 212 int error; 213 214 linux_set_current(curthread); 215 216 pdrv = linux_pci_find(dev, &id); 217 pdev = device_get_softc(dev); 218 219 parent = device_get_parent(dev); 220 if (pdrv->isdrm) { 221 dinfo = device_get_ivars(parent); 222 device_set_ivars(dev, dinfo); 223 } else { 224 dinfo = device_get_ivars(dev); 225 } 226 227 pdev->dev.parent = &linux_root_device; 228 pdev->dev.bsddev = dev; 229 INIT_LIST_HEAD(&pdev->dev.irqents); 230 pdev->devfn = PCI_DEVFN(pci_get_slot(dev), pci_get_function(dev)); 231 pdev->device = dinfo->cfg.device; 232 pdev->vendor = dinfo->cfg.vendor; 233 pdev->subsystem_vendor = dinfo->cfg.subvendor; 234 pdev->subsystem_device = dinfo->cfg.subdevice; 235 pdev->class = pci_get_class(dev); 236 pdev->revision = pci_get_revid(dev); 237 pdev->pdrv = pdrv; 238 kobject_init(&pdev->dev.kobj, &linux_dev_ktype); 239 kobject_set_name(&pdev->dev.kobj, device_get_nameunit(dev)); 240 kobject_add(&pdev->dev.kobj, &linux_root_device.kobj, 241 kobject_name(&pdev->dev.kobj)); 242 rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 0); 243 if (rle != NULL) 244 pdev->dev.irq = rle->start; 245 else 246 pdev->dev.irq = LINUX_IRQ_INVALID; 247 pdev->irq = pdev->dev.irq; 248 error = linux_pdev_dma_init(pdev); 249 if (error) 250 goto out_dma_init; 251 252 pbus = malloc(sizeof(*pbus), M_DEVBUF, M_WAITOK | M_ZERO); 253 pbus->self = pdev; 254 pbus->number = pci_get_bus(dev); 255 pbus->domain = pci_get_domain(dev); 256 pdev->bus = pbus; 257 258 spin_lock(&pci_lock); 259 list_add(&pdev->links, &pci_devices); 260 spin_unlock(&pci_lock); 261 262 error = pdrv->probe(pdev, id); 263 if (error) 264 goto out_probe; 265 return (0); 266 267 out_probe: 268 free(pdev->bus, M_DEVBUF); 269 linux_pdev_dma_uninit(pdev); 270 out_dma_init: 271 spin_lock(&pci_lock); 272 list_del(&pdev->links); 273 spin_unlock(&pci_lock); 274 put_device(&pdev->dev); 275 return (-error); 276 } 277 278 static int 279 linux_pci_detach(device_t dev) 280 { 281 struct pci_dev *pdev; 282 283 linux_set_current(curthread); 284 pdev = device_get_softc(dev); 285 286 pdev->pdrv->remove(pdev); 287 288 free(pdev->bus, M_DEVBUF); 289 linux_pdev_dma_uninit(pdev); 290 291 spin_lock(&pci_lock); 292 list_del(&pdev->links); 293 spin_unlock(&pci_lock); 294 device_set_desc(dev, NULL); 295 put_device(&pdev->dev); 296 297 return (0); 298 } 299 300 static int 301 linux_pci_suspend(device_t dev) 302 { 303 const struct dev_pm_ops *pmops; 304 struct pm_message pm = { }; 305 struct pci_dev *pdev; 306 int error; 307 308 error = 0; 309 linux_set_current(curthread); 310 pdev = device_get_softc(dev); 311 pmops = pdev->pdrv->driver.pm; 312 313 if (pdev->pdrv->suspend != NULL) 314 error = -pdev->pdrv->suspend(pdev, pm); 315 else if (pmops != NULL && pmops->suspend != NULL) { 316 error = -pmops->suspend(&pdev->dev); 317 if (error == 0 && pmops->suspend_late != NULL) 318 error = -pmops->suspend_late(&pdev->dev); 319 } 320 return (error); 321 } 322 323 static int 324 linux_pci_resume(device_t dev) 325 { 326 const struct dev_pm_ops *pmops; 327 struct pci_dev *pdev; 328 int error; 329 330 error = 0; 331 linux_set_current(curthread); 332 pdev = device_get_softc(dev); 333 pmops = pdev->pdrv->driver.pm; 334 335 if (pdev->pdrv->resume != NULL) 336 error = -pdev->pdrv->resume(pdev); 337 else if (pmops != NULL && pmops->resume != NULL) { 338 if (pmops->resume_early != NULL) 339 error = -pmops->resume_early(&pdev->dev); 340 if (error == 0 && pmops->resume != NULL) 341 error = -pmops->resume(&pdev->dev); 342 } 343 return (error); 344 } 345 346 static int 347 linux_pci_shutdown(device_t dev) 348 { 349 struct pci_dev *pdev; 350 351 linux_set_current(curthread); 352 pdev = device_get_softc(dev); 353 if (pdev->pdrv->shutdown != NULL) 354 pdev->pdrv->shutdown(pdev); 355 return (0); 356 } 357 358 static int 359 _linux_pci_register_driver(struct pci_driver *pdrv, devclass_t dc) 360 { 361 int error; 362 363 linux_set_current(curthread); 364 spin_lock(&pci_lock); 365 list_add(&pdrv->links, &pci_drivers); 366 spin_unlock(&pci_lock); 367 pdrv->bsddriver.name = pdrv->name; 368 pdrv->bsddriver.methods = pci_methods; 369 pdrv->bsddriver.size = sizeof(struct pci_dev); 370 371 mtx_lock(&Giant); 372 error = devclass_add_driver(dc, &pdrv->bsddriver, 373 BUS_PASS_DEFAULT, &pdrv->bsdclass); 374 mtx_unlock(&Giant); 375 return (-error); 376 } 377 378 int 379 linux_pci_register_driver(struct pci_driver *pdrv) 380 { 381 devclass_t dc; 382 383 dc = devclass_find("pci"); 384 if (dc == NULL) 385 return (-ENXIO); 386 pdrv->isdrm = false; 387 return (_linux_pci_register_driver(pdrv, dc)); 388 } 389 390 unsigned long 391 pci_resource_start(struct pci_dev *pdev, int bar) 392 { 393 struct resource_list_entry *rle; 394 rman_res_t newstart; 395 device_t dev; 396 397 if ((rle = linux_pci_get_bar(pdev, bar)) == NULL) 398 return (0); 399 dev = pci_find_dbsf(pdev->bus->domain, pdev->bus->number, 400 PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn)); 401 MPASS(dev != NULL); 402 if (BUS_TRANSLATE_RESOURCE(dev, rle->type, rle->start, &newstart)) { 403 device_printf(pdev->dev.bsddev, "translate of %#jx failed\n", 404 (uintmax_t)rle->start); 405 return (0); 406 } 407 return (newstart); 408 } 409 410 unsigned long 411 pci_resource_len(struct pci_dev *pdev, int bar) 412 { 413 struct resource_list_entry *rle; 414 415 if ((rle = linux_pci_get_bar(pdev, bar)) == NULL) 416 return (0); 417 return (rle->count); 418 } 419 420 int 421 linux_pci_register_drm_driver(struct pci_driver *pdrv) 422 { 423 devclass_t dc; 424 425 dc = devclass_create("vgapci"); 426 if (dc == NULL) 427 return (-ENXIO); 428 pdrv->isdrm = true; 429 pdrv->name = "drmn"; 430 return (_linux_pci_register_driver(pdrv, dc)); 431 } 432 433 void 434 linux_pci_unregister_driver(struct pci_driver *pdrv) 435 { 436 devclass_t bus; 437 438 bus = devclass_find("pci"); 439 440 spin_lock(&pci_lock); 441 list_del(&pdrv->links); 442 spin_unlock(&pci_lock); 443 mtx_lock(&Giant); 444 if (bus != NULL) 445 devclass_delete_driver(bus, &pdrv->bsddriver); 446 mtx_unlock(&Giant); 447 } 448 449 void 450 linux_pci_unregister_drm_driver(struct pci_driver *pdrv) 451 { 452 devclass_t bus; 453 454 bus = devclass_find("vgapci"); 455 456 spin_lock(&pci_lock); 457 list_del(&pdrv->links); 458 spin_unlock(&pci_lock); 459 mtx_lock(&Giant); 460 if (bus != NULL) 461 devclass_delete_driver(bus, &pdrv->bsddriver); 462 mtx_unlock(&Giant); 463 } 464 465 CTASSERT(sizeof(dma_addr_t) <= sizeof(uint64_t)); 466 467 struct linux_dma_obj { 468 void *vaddr; 469 uint64_t dma_addr; 470 bus_dmamap_t dmamap; 471 }; 472 473 static uma_zone_t linux_dma_trie_zone; 474 static uma_zone_t linux_dma_obj_zone; 475 476 static void 477 linux_dma_init(void *arg) 478 { 479 480 linux_dma_trie_zone = uma_zcreate("linux_dma_pctrie", 481 pctrie_node_size(), NULL, NULL, pctrie_zone_init, NULL, 482 UMA_ALIGN_PTR, 0); 483 linux_dma_obj_zone = uma_zcreate("linux_dma_object", 484 sizeof(struct linux_dma_obj), NULL, NULL, NULL, NULL, 485 UMA_ALIGN_PTR, 0); 486 487 } 488 SYSINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_init, NULL); 489 490 static void 491 linux_dma_uninit(void *arg) 492 { 493 494 uma_zdestroy(linux_dma_obj_zone); 495 uma_zdestroy(linux_dma_trie_zone); 496 } 497 SYSUNINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_uninit, NULL); 498 499 static void * 500 linux_dma_trie_alloc(struct pctrie *ptree) 501 { 502 503 return (uma_zalloc(linux_dma_trie_zone, M_NOWAIT)); 504 } 505 506 static void 507 linux_dma_trie_free(struct pctrie *ptree, void *node) 508 { 509 510 uma_zfree(linux_dma_trie_zone, node); 511 } 512 513 514 PCTRIE_DEFINE(LINUX_DMA, linux_dma_obj, dma_addr, linux_dma_trie_alloc, 515 linux_dma_trie_free); 516 517 void * 518 linux_dma_alloc_coherent(struct device *dev, size_t size, 519 dma_addr_t *dma_handle, gfp_t flag) 520 { 521 struct linux_dma_priv *priv; 522 vm_paddr_t high; 523 size_t align; 524 void *mem; 525 526 if (dev == NULL || dev->dma_priv == NULL) { 527 *dma_handle = 0; 528 return (NULL); 529 } 530 priv = dev->dma_priv; 531 if (priv->dma_mask) 532 high = priv->dma_mask; 533 else if (flag & GFP_DMA32) 534 high = BUS_SPACE_MAXADDR_32BIT; 535 else 536 high = BUS_SPACE_MAXADDR; 537 align = PAGE_SIZE << get_order(size); 538 mem = (void *)kmem_alloc_contig(size, flag, 0, high, align, 0, 539 VM_MEMATTR_DEFAULT); 540 if (mem != NULL) { 541 *dma_handle = linux_dma_map_phys(dev, vtophys(mem), size); 542 if (*dma_handle == 0) { 543 kmem_free((vm_offset_t)mem, size); 544 mem = NULL; 545 } 546 } else { 547 *dma_handle = 0; 548 } 549 return (mem); 550 } 551 552 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 553 dma_addr_t 554 linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len) 555 { 556 struct linux_dma_priv *priv; 557 struct linux_dma_obj *obj; 558 int error, nseg; 559 bus_dma_segment_t seg; 560 561 priv = dev->dma_priv; 562 563 /* 564 * If the resultant mapping will be entirely 1:1 with the 565 * physical address, short-circuit the remainder of the 566 * bus_dma API. This avoids tracking collisions in the pctrie 567 * with the additional benefit of reducing overhead. 568 */ 569 if (bus_dma_id_mapped(priv->dmat, phys, len)) 570 return (phys); 571 572 obj = uma_zalloc(linux_dma_obj_zone, M_NOWAIT); 573 if (obj == NULL) { 574 return (0); 575 } 576 577 DMA_PRIV_LOCK(priv); 578 if (bus_dmamap_create(priv->dmat, 0, &obj->dmamap) != 0) { 579 DMA_PRIV_UNLOCK(priv); 580 uma_zfree(linux_dma_obj_zone, obj); 581 return (0); 582 } 583 584 nseg = -1; 585 if (_bus_dmamap_load_phys(priv->dmat, obj->dmamap, phys, len, 586 BUS_DMA_NOWAIT, &seg, &nseg) != 0) { 587 bus_dmamap_destroy(priv->dmat, obj->dmamap); 588 DMA_PRIV_UNLOCK(priv); 589 uma_zfree(linux_dma_obj_zone, obj); 590 return (0); 591 } 592 593 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); 594 obj->dma_addr = seg.ds_addr; 595 596 error = LINUX_DMA_PCTRIE_INSERT(&priv->ptree, obj); 597 if (error != 0) { 598 bus_dmamap_unload(priv->dmat, obj->dmamap); 599 bus_dmamap_destroy(priv->dmat, obj->dmamap); 600 DMA_PRIV_UNLOCK(priv); 601 uma_zfree(linux_dma_obj_zone, obj); 602 return (0); 603 } 604 DMA_PRIV_UNLOCK(priv); 605 return (obj->dma_addr); 606 } 607 #else 608 dma_addr_t 609 linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len) 610 { 611 return (phys); 612 } 613 #endif 614 615 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 616 void 617 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len) 618 { 619 struct linux_dma_priv *priv; 620 struct linux_dma_obj *obj; 621 622 priv = dev->dma_priv; 623 624 if (pctrie_is_empty(&priv->ptree)) 625 return; 626 627 DMA_PRIV_LOCK(priv); 628 obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr); 629 if (obj == NULL) { 630 DMA_PRIV_UNLOCK(priv); 631 return; 632 } 633 LINUX_DMA_PCTRIE_REMOVE(&priv->ptree, dma_addr); 634 bus_dmamap_unload(priv->dmat, obj->dmamap); 635 bus_dmamap_destroy(priv->dmat, obj->dmamap); 636 DMA_PRIV_UNLOCK(priv); 637 638 uma_zfree(linux_dma_obj_zone, obj); 639 } 640 #else 641 void 642 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len) 643 { 644 } 645 #endif 646 647 int 648 linux_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents, 649 enum dma_data_direction dir, struct dma_attrs *attrs) 650 { 651 struct linux_dma_priv *priv; 652 struct scatterlist *sg; 653 int i, nseg; 654 bus_dma_segment_t seg; 655 656 priv = dev->dma_priv; 657 658 DMA_PRIV_LOCK(priv); 659 660 /* create common DMA map in the first S/G entry */ 661 if (bus_dmamap_create(priv->dmat, 0, &sgl->dma_map) != 0) { 662 DMA_PRIV_UNLOCK(priv); 663 return (0); 664 } 665 666 /* load all S/G list entries */ 667 for_each_sg(sgl, sg, nents, i) { 668 nseg = -1; 669 if (_bus_dmamap_load_phys(priv->dmat, sgl->dma_map, 670 sg_phys(sg), sg->length, BUS_DMA_NOWAIT, 671 &seg, &nseg) != 0) { 672 bus_dmamap_unload(priv->dmat, sgl->dma_map); 673 bus_dmamap_destroy(priv->dmat, sgl->dma_map); 674 DMA_PRIV_UNLOCK(priv); 675 return (0); 676 } 677 KASSERT(nseg == 0, 678 ("More than one segment (nseg=%d)", nseg + 1)); 679 680 sg_dma_address(sg) = seg.ds_addr; 681 } 682 DMA_PRIV_UNLOCK(priv); 683 684 return (nents); 685 } 686 687 void 688 linux_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, 689 int nents, enum dma_data_direction dir, struct dma_attrs *attrs) 690 { 691 struct linux_dma_priv *priv; 692 693 priv = dev->dma_priv; 694 695 DMA_PRIV_LOCK(priv); 696 bus_dmamap_unload(priv->dmat, sgl->dma_map); 697 bus_dmamap_destroy(priv->dmat, sgl->dma_map); 698 DMA_PRIV_UNLOCK(priv); 699 } 700 701 struct dma_pool { 702 struct device *pool_device; 703 uma_zone_t pool_zone; 704 struct mtx pool_lock; 705 bus_dma_tag_t pool_dmat; 706 size_t pool_entry_size; 707 struct pctrie pool_ptree; 708 }; 709 710 #define DMA_POOL_LOCK(pool) mtx_lock(&(pool)->pool_lock) 711 #define DMA_POOL_UNLOCK(pool) mtx_unlock(&(pool)->pool_lock) 712 713 static inline int 714 dma_pool_obj_ctor(void *mem, int size, void *arg, int flags) 715 { 716 struct linux_dma_obj *obj = mem; 717 struct dma_pool *pool = arg; 718 int error, nseg; 719 bus_dma_segment_t seg; 720 721 nseg = -1; 722 DMA_POOL_LOCK(pool); 723 error = _bus_dmamap_load_phys(pool->pool_dmat, obj->dmamap, 724 vtophys(obj->vaddr), pool->pool_entry_size, BUS_DMA_NOWAIT, 725 &seg, &nseg); 726 DMA_POOL_UNLOCK(pool); 727 if (error != 0) { 728 return (error); 729 } 730 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); 731 obj->dma_addr = seg.ds_addr; 732 733 return (0); 734 } 735 736 static void 737 dma_pool_obj_dtor(void *mem, int size, void *arg) 738 { 739 struct linux_dma_obj *obj = mem; 740 struct dma_pool *pool = arg; 741 742 DMA_POOL_LOCK(pool); 743 bus_dmamap_unload(pool->pool_dmat, obj->dmamap); 744 DMA_POOL_UNLOCK(pool); 745 } 746 747 static int 748 dma_pool_obj_import(void *arg, void **store, int count, int domain __unused, 749 int flags) 750 { 751 struct dma_pool *pool = arg; 752 struct linux_dma_priv *priv; 753 struct linux_dma_obj *obj; 754 int error, i; 755 756 priv = pool->pool_device->dma_priv; 757 for (i = 0; i < count; i++) { 758 obj = uma_zalloc(linux_dma_obj_zone, flags); 759 if (obj == NULL) 760 break; 761 762 error = bus_dmamem_alloc(pool->pool_dmat, &obj->vaddr, 763 BUS_DMA_NOWAIT, &obj->dmamap); 764 if (error!= 0) { 765 uma_zfree(linux_dma_obj_zone, obj); 766 break; 767 } 768 769 store[i] = obj; 770 } 771 772 return (i); 773 } 774 775 static void 776 dma_pool_obj_release(void *arg, void **store, int count) 777 { 778 struct dma_pool *pool = arg; 779 struct linux_dma_priv *priv; 780 struct linux_dma_obj *obj; 781 int i; 782 783 priv = pool->pool_device->dma_priv; 784 for (i = 0; i < count; i++) { 785 obj = store[i]; 786 bus_dmamem_free(pool->pool_dmat, obj->vaddr, obj->dmamap); 787 uma_zfree(linux_dma_obj_zone, obj); 788 } 789 } 790 791 struct dma_pool * 792 linux_dma_pool_create(char *name, struct device *dev, size_t size, 793 size_t align, size_t boundary) 794 { 795 struct linux_dma_priv *priv; 796 struct dma_pool *pool; 797 798 priv = dev->dma_priv; 799 800 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 801 pool->pool_device = dev; 802 pool->pool_entry_size = size; 803 804 if (bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 805 align, boundary, /* alignment, boundary */ 806 priv->dma_mask, /* lowaddr */ 807 BUS_SPACE_MAXADDR, /* highaddr */ 808 NULL, NULL, /* filtfunc, filtfuncarg */ 809 size, /* maxsize */ 810 1, /* nsegments */ 811 size, /* maxsegsz */ 812 0, /* flags */ 813 NULL, NULL, /* lockfunc, lockfuncarg */ 814 &pool->pool_dmat)) { 815 kfree(pool); 816 return (NULL); 817 } 818 819 pool->pool_zone = uma_zcache_create(name, -1, dma_pool_obj_ctor, 820 dma_pool_obj_dtor, NULL, NULL, dma_pool_obj_import, 821 dma_pool_obj_release, pool, 0); 822 823 mtx_init(&pool->pool_lock, "lkpi-dma-pool", NULL, MTX_DEF); 824 pctrie_init(&pool->pool_ptree); 825 826 return (pool); 827 } 828 829 void 830 linux_dma_pool_destroy(struct dma_pool *pool) 831 { 832 833 uma_zdestroy(pool->pool_zone); 834 bus_dma_tag_destroy(pool->pool_dmat); 835 mtx_destroy(&pool->pool_lock); 836 kfree(pool); 837 } 838 839 void * 840 linux_dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, 841 dma_addr_t *handle) 842 { 843 struct linux_dma_obj *obj; 844 845 obj = uma_zalloc_arg(pool->pool_zone, pool, mem_flags); 846 if (obj == NULL) 847 return (NULL); 848 849 DMA_POOL_LOCK(pool); 850 if (LINUX_DMA_PCTRIE_INSERT(&pool->pool_ptree, obj) != 0) { 851 DMA_POOL_UNLOCK(pool); 852 uma_zfree_arg(pool->pool_zone, obj, pool); 853 return (NULL); 854 } 855 DMA_POOL_UNLOCK(pool); 856 857 *handle = obj->dma_addr; 858 return (obj->vaddr); 859 } 860 861 void 862 linux_dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma_addr) 863 { 864 struct linux_dma_obj *obj; 865 866 DMA_POOL_LOCK(pool); 867 obj = LINUX_DMA_PCTRIE_LOOKUP(&pool->pool_ptree, dma_addr); 868 if (obj == NULL) { 869 DMA_POOL_UNLOCK(pool); 870 return; 871 } 872 LINUX_DMA_PCTRIE_REMOVE(&pool->pool_ptree, dma_addr); 873 DMA_POOL_UNLOCK(pool); 874 875 uma_zfree_arg(pool->pool_zone, obj, pool); 876 } 877