1 /*- 2 * Copyright (c) 2015-2016 Mellanox Technologies, Ltd. 3 * All rights reserved. 4 * Copyright (c) 2020-2022 The FreeBSD Foundation 5 * 6 * Portions of this software were developed by Björn Zeeb 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice unmodified, this list of conditions, and the following 14 * disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/bus.h> 37 #include <sys/malloc.h> 38 #include <sys/kernel.h> 39 #include <sys/sysctl.h> 40 #include <sys/lock.h> 41 #include <sys/mutex.h> 42 #include <sys/fcntl.h> 43 #include <sys/file.h> 44 #include <sys/filio.h> 45 #include <sys/pciio.h> 46 #include <sys/pctrie.h> 47 #include <sys/rwlock.h> 48 49 #include <vm/vm.h> 50 #include <vm/pmap.h> 51 52 #include <machine/stdarg.h> 53 54 #include <dev/pci/pcivar.h> 55 #include <dev/pci/pci_private.h> 56 #include <dev/pci/pci_iov.h> 57 #include <dev/backlight/backlight.h> 58 59 #include <linux/kernel.h> 60 #include <linux/kobject.h> 61 #include <linux/device.h> 62 #include <linux/slab.h> 63 #include <linux/module.h> 64 #include <linux/cdev.h> 65 #include <linux/file.h> 66 #include <linux/sysfs.h> 67 #include <linux/mm.h> 68 #include <linux/io.h> 69 #include <linux/vmalloc.h> 70 #include <linux/pci.h> 71 #include <linux/compat.h> 72 73 #include <linux/backlight.h> 74 75 #include "backlight_if.h" 76 #include "pcib_if.h" 77 78 /* Undef the linux function macro defined in linux/pci.h */ 79 #undef pci_get_class 80 81 extern int linuxkpi_debug; 82 83 SYSCTL_DECL(_compat_linuxkpi); 84 85 static counter_u64_t lkpi_pci_nseg1_fail; 86 SYSCTL_COUNTER_U64(_compat_linuxkpi, OID_AUTO, lkpi_pci_nseg1_fail, CTLFLAG_RD, 87 &lkpi_pci_nseg1_fail, "Count of busdma mapping failures of single-segment"); 88 89 static device_probe_t linux_pci_probe; 90 static device_attach_t linux_pci_attach; 91 static device_detach_t linux_pci_detach; 92 static device_suspend_t linux_pci_suspend; 93 static device_resume_t linux_pci_resume; 94 static device_shutdown_t linux_pci_shutdown; 95 static pci_iov_init_t linux_pci_iov_init; 96 static pci_iov_uninit_t linux_pci_iov_uninit; 97 static pci_iov_add_vf_t linux_pci_iov_add_vf; 98 static int linux_backlight_get_status(device_t dev, struct backlight_props *props); 99 static int linux_backlight_update_status(device_t dev, struct backlight_props *props); 100 static int linux_backlight_get_info(device_t dev, struct backlight_info *info); 101 102 static device_method_t pci_methods[] = { 103 DEVMETHOD(device_probe, linux_pci_probe), 104 DEVMETHOD(device_attach, linux_pci_attach), 105 DEVMETHOD(device_detach, linux_pci_detach), 106 DEVMETHOD(device_suspend, linux_pci_suspend), 107 DEVMETHOD(device_resume, linux_pci_resume), 108 DEVMETHOD(device_shutdown, linux_pci_shutdown), 109 DEVMETHOD(pci_iov_init, linux_pci_iov_init), 110 DEVMETHOD(pci_iov_uninit, linux_pci_iov_uninit), 111 DEVMETHOD(pci_iov_add_vf, linux_pci_iov_add_vf), 112 113 /* backlight interface */ 114 DEVMETHOD(backlight_update_status, linux_backlight_update_status), 115 DEVMETHOD(backlight_get_status, linux_backlight_get_status), 116 DEVMETHOD(backlight_get_info, linux_backlight_get_info), 117 DEVMETHOD_END 118 }; 119 120 struct linux_dma_priv { 121 uint64_t dma_mask; 122 bus_dma_tag_t dmat; 123 uint64_t dma_coherent_mask; 124 bus_dma_tag_t dmat_coherent; 125 struct mtx lock; 126 struct pctrie ptree; 127 }; 128 #define DMA_PRIV_LOCK(priv) mtx_lock(&(priv)->lock) 129 #define DMA_PRIV_UNLOCK(priv) mtx_unlock(&(priv)->lock) 130 131 static int 132 linux_pdev_dma_uninit(struct pci_dev *pdev) 133 { 134 struct linux_dma_priv *priv; 135 136 priv = pdev->dev.dma_priv; 137 if (priv->dmat) 138 bus_dma_tag_destroy(priv->dmat); 139 if (priv->dmat_coherent) 140 bus_dma_tag_destroy(priv->dmat_coherent); 141 mtx_destroy(&priv->lock); 142 pdev->dev.dma_priv = NULL; 143 free(priv, M_DEVBUF); 144 return (0); 145 } 146 147 static int 148 linux_pdev_dma_init(struct pci_dev *pdev) 149 { 150 struct linux_dma_priv *priv; 151 int error; 152 153 priv = malloc(sizeof(*priv), M_DEVBUF, M_WAITOK | M_ZERO); 154 155 mtx_init(&priv->lock, "lkpi-priv-dma", NULL, MTX_DEF); 156 pctrie_init(&priv->ptree); 157 158 pdev->dev.dma_priv = priv; 159 160 /* Create a default DMA tags. */ 161 error = linux_dma_tag_init(&pdev->dev, DMA_BIT_MASK(64)); 162 if (error != 0) 163 goto err; 164 /* Coherent is lower 32bit only by default in Linux. */ 165 error = linux_dma_tag_init_coherent(&pdev->dev, DMA_BIT_MASK(32)); 166 if (error != 0) 167 goto err; 168 169 return (error); 170 171 err: 172 linux_pdev_dma_uninit(pdev); 173 return (error); 174 } 175 176 int 177 linux_dma_tag_init(struct device *dev, u64 dma_mask) 178 { 179 struct linux_dma_priv *priv; 180 int error; 181 182 priv = dev->dma_priv; 183 184 if (priv->dmat) { 185 if (priv->dma_mask == dma_mask) 186 return (0); 187 188 bus_dma_tag_destroy(priv->dmat); 189 } 190 191 priv->dma_mask = dma_mask; 192 193 error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 194 1, 0, /* alignment, boundary */ 195 dma_mask, /* lowaddr */ 196 BUS_SPACE_MAXADDR, /* highaddr */ 197 NULL, NULL, /* filtfunc, filtfuncarg */ 198 BUS_SPACE_MAXSIZE, /* maxsize */ 199 1, /* nsegments */ 200 BUS_SPACE_MAXSIZE, /* maxsegsz */ 201 0, /* flags */ 202 NULL, NULL, /* lockfunc, lockfuncarg */ 203 &priv->dmat); 204 return (-error); 205 } 206 207 int 208 linux_dma_tag_init_coherent(struct device *dev, u64 dma_mask) 209 { 210 struct linux_dma_priv *priv; 211 int error; 212 213 priv = dev->dma_priv; 214 215 if (priv->dmat_coherent) { 216 if (priv->dma_coherent_mask == dma_mask) 217 return (0); 218 219 bus_dma_tag_destroy(priv->dmat_coherent); 220 } 221 222 priv->dma_coherent_mask = dma_mask; 223 224 error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 225 1, 0, /* alignment, boundary */ 226 dma_mask, /* lowaddr */ 227 BUS_SPACE_MAXADDR, /* highaddr */ 228 NULL, NULL, /* filtfunc, filtfuncarg */ 229 BUS_SPACE_MAXSIZE, /* maxsize */ 230 1, /* nsegments */ 231 BUS_SPACE_MAXSIZE, /* maxsegsz */ 232 0, /* flags */ 233 NULL, NULL, /* lockfunc, lockfuncarg */ 234 &priv->dmat_coherent); 235 return (-error); 236 } 237 238 static struct pci_driver * 239 linux_pci_find(device_t dev, const struct pci_device_id **idp) 240 { 241 const struct pci_device_id *id; 242 struct pci_driver *pdrv; 243 uint16_t vendor; 244 uint16_t device; 245 uint16_t subvendor; 246 uint16_t subdevice; 247 248 vendor = pci_get_vendor(dev); 249 device = pci_get_device(dev); 250 subvendor = pci_get_subvendor(dev); 251 subdevice = pci_get_subdevice(dev); 252 253 spin_lock(&pci_lock); 254 list_for_each_entry(pdrv, &pci_drivers, node) { 255 for (id = pdrv->id_table; id->vendor != 0; id++) { 256 if (vendor == id->vendor && 257 (PCI_ANY_ID == id->device || device == id->device) && 258 (PCI_ANY_ID == id->subvendor || subvendor == id->subvendor) && 259 (PCI_ANY_ID == id->subdevice || subdevice == id->subdevice)) { 260 *idp = id; 261 spin_unlock(&pci_lock); 262 return (pdrv); 263 } 264 } 265 } 266 spin_unlock(&pci_lock); 267 return (NULL); 268 } 269 270 static void 271 lkpi_pci_dev_release(struct device *dev) 272 { 273 274 lkpi_devres_release_free_list(dev); 275 spin_lock_destroy(&dev->devres_lock); 276 } 277 278 static void 279 lkpifill_pci_dev(device_t dev, struct pci_dev *pdev) 280 { 281 282 pdev->devfn = PCI_DEVFN(pci_get_slot(dev), pci_get_function(dev)); 283 pdev->vendor = pci_get_vendor(dev); 284 pdev->device = pci_get_device(dev); 285 pdev->subsystem_vendor = pci_get_subvendor(dev); 286 pdev->subsystem_device = pci_get_subdevice(dev); 287 pdev->class = pci_get_class(dev); 288 pdev->revision = pci_get_revid(dev); 289 pdev->bus = malloc(sizeof(*pdev->bus), M_DEVBUF, M_WAITOK | M_ZERO); 290 /* 291 * This should be the upstream bridge; pci_upstream_bridge() 292 * handles that case on demand as otherwise we'll shadow the 293 * entire PCI hierarchy. 294 */ 295 pdev->bus->self = pdev; 296 pdev->bus->number = pci_get_bus(dev); 297 pdev->bus->domain = pci_get_domain(dev); 298 pdev->dev.bsddev = dev; 299 pdev->dev.parent = &linux_root_device; 300 pdev->dev.release = lkpi_pci_dev_release; 301 INIT_LIST_HEAD(&pdev->dev.irqents); 302 kobject_init(&pdev->dev.kobj, &linux_dev_ktype); 303 kobject_set_name(&pdev->dev.kobj, device_get_nameunit(dev)); 304 kobject_add(&pdev->dev.kobj, &linux_root_device.kobj, 305 kobject_name(&pdev->dev.kobj)); 306 spin_lock_init(&pdev->dev.devres_lock); 307 INIT_LIST_HEAD(&pdev->dev.devres_head); 308 } 309 310 static void 311 lkpinew_pci_dev_release(struct device *dev) 312 { 313 struct pci_dev *pdev; 314 315 pdev = to_pci_dev(dev); 316 if (pdev->root != NULL) 317 pci_dev_put(pdev->root); 318 if (pdev->bus->self != pdev) 319 pci_dev_put(pdev->bus->self); 320 free(pdev->bus, M_DEVBUF); 321 free(pdev, M_DEVBUF); 322 } 323 324 struct pci_dev * 325 lkpinew_pci_dev(device_t dev) 326 { 327 struct pci_dev *pdev; 328 329 pdev = malloc(sizeof(*pdev), M_DEVBUF, M_WAITOK|M_ZERO); 330 lkpifill_pci_dev(dev, pdev); 331 pdev->dev.release = lkpinew_pci_dev_release; 332 333 return (pdev); 334 } 335 336 struct pci_dev * 337 lkpi_pci_get_class(unsigned int class, struct pci_dev *from) 338 { 339 device_t dev; 340 device_t devfrom = NULL; 341 struct pci_dev *pdev; 342 343 if (from != NULL) 344 devfrom = from->dev.bsddev; 345 346 dev = pci_find_class_from(class >> 16, (class >> 8) & 0xFF, devfrom); 347 if (dev == NULL) 348 return (NULL); 349 350 pdev = lkpinew_pci_dev(dev); 351 return (pdev); 352 } 353 354 struct pci_dev * 355 lkpi_pci_get_domain_bus_and_slot(int domain, unsigned int bus, 356 unsigned int devfn) 357 { 358 device_t dev; 359 struct pci_dev *pdev; 360 361 dev = pci_find_dbsf(domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); 362 if (dev == NULL) 363 return (NULL); 364 365 pdev = lkpinew_pci_dev(dev); 366 return (pdev); 367 } 368 369 static int 370 linux_pci_probe(device_t dev) 371 { 372 const struct pci_device_id *id; 373 struct pci_driver *pdrv; 374 375 if ((pdrv = linux_pci_find(dev, &id)) == NULL) 376 return (ENXIO); 377 if (device_get_driver(dev) != &pdrv->bsddriver) 378 return (ENXIO); 379 device_set_desc(dev, pdrv->name); 380 381 /* Assume BSS initialized (should never return BUS_PROBE_SPECIFIC). */ 382 if (pdrv->bsd_probe_return == 0) 383 return (BUS_PROBE_DEFAULT); 384 else 385 return (pdrv->bsd_probe_return); 386 } 387 388 static int 389 linux_pci_attach(device_t dev) 390 { 391 const struct pci_device_id *id; 392 struct pci_driver *pdrv; 393 struct pci_dev *pdev; 394 395 pdrv = linux_pci_find(dev, &id); 396 pdev = device_get_softc(dev); 397 398 MPASS(pdrv != NULL); 399 MPASS(pdev != NULL); 400 401 return (linux_pci_attach_device(dev, pdrv, id, pdev)); 402 } 403 404 int 405 linux_pci_attach_device(device_t dev, struct pci_driver *pdrv, 406 const struct pci_device_id *id, struct pci_dev *pdev) 407 { 408 struct resource_list_entry *rle; 409 device_t parent; 410 uintptr_t rid; 411 int error; 412 bool isdrm; 413 414 linux_set_current(curthread); 415 416 parent = device_get_parent(dev); 417 isdrm = pdrv != NULL && pdrv->isdrm; 418 419 if (isdrm) { 420 struct pci_devinfo *dinfo; 421 422 dinfo = device_get_ivars(parent); 423 device_set_ivars(dev, dinfo); 424 } 425 426 lkpifill_pci_dev(dev, pdev); 427 if (isdrm) 428 PCI_GET_ID(device_get_parent(parent), parent, PCI_ID_RID, &rid); 429 else 430 PCI_GET_ID(parent, dev, PCI_ID_RID, &rid); 431 pdev->devfn = rid; 432 pdev->pdrv = pdrv; 433 rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 0, false); 434 if (rle != NULL) 435 pdev->dev.irq = rle->start; 436 else 437 pdev->dev.irq = LINUX_IRQ_INVALID; 438 pdev->irq = pdev->dev.irq; 439 error = linux_pdev_dma_init(pdev); 440 if (error) 441 goto out_dma_init; 442 443 TAILQ_INIT(&pdev->mmio); 444 445 spin_lock(&pci_lock); 446 list_add(&pdev->links, &pci_devices); 447 spin_unlock(&pci_lock); 448 449 if (pdrv != NULL) { 450 error = pdrv->probe(pdev, id); 451 if (error) 452 goto out_probe; 453 } 454 return (0); 455 456 out_probe: 457 free(pdev->bus, M_DEVBUF); 458 linux_pdev_dma_uninit(pdev); 459 out_dma_init: 460 spin_lock(&pci_lock); 461 list_del(&pdev->links); 462 spin_unlock(&pci_lock); 463 put_device(&pdev->dev); 464 return (-error); 465 } 466 467 static int 468 linux_pci_detach(device_t dev) 469 { 470 struct pci_dev *pdev; 471 472 pdev = device_get_softc(dev); 473 474 MPASS(pdev != NULL); 475 476 device_set_desc(dev, NULL); 477 478 return (linux_pci_detach_device(pdev)); 479 } 480 481 int 482 linux_pci_detach_device(struct pci_dev *pdev) 483 { 484 485 linux_set_current(curthread); 486 487 if (pdev->pdrv != NULL) 488 pdev->pdrv->remove(pdev); 489 490 if (pdev->root != NULL) 491 pci_dev_put(pdev->root); 492 free(pdev->bus, M_DEVBUF); 493 linux_pdev_dma_uninit(pdev); 494 495 spin_lock(&pci_lock); 496 list_del(&pdev->links); 497 spin_unlock(&pci_lock); 498 put_device(&pdev->dev); 499 500 return (0); 501 } 502 503 static int 504 lkpi_pci_disable_dev(struct device *dev) 505 { 506 507 (void) pci_disable_io(dev->bsddev, SYS_RES_MEMORY); 508 (void) pci_disable_io(dev->bsddev, SYS_RES_IOPORT); 509 return (0); 510 } 511 512 struct pci_devres * 513 lkpi_pci_devres_get_alloc(struct pci_dev *pdev) 514 { 515 struct pci_devres *dr; 516 517 dr = lkpi_devres_find(&pdev->dev, lkpi_pci_devres_release, NULL, NULL); 518 if (dr == NULL) { 519 dr = lkpi_devres_alloc(lkpi_pci_devres_release, sizeof(*dr), 520 GFP_KERNEL | __GFP_ZERO); 521 if (dr != NULL) 522 lkpi_devres_add(&pdev->dev, dr); 523 } 524 525 return (dr); 526 } 527 528 void 529 lkpi_pci_devres_release(struct device *dev, void *p) 530 { 531 struct pci_devres *dr; 532 struct pci_dev *pdev; 533 int bar; 534 535 pdev = to_pci_dev(dev); 536 dr = p; 537 538 if (pdev->msix_enabled) 539 lkpi_pci_disable_msix(pdev); 540 if (pdev->msi_enabled) 541 lkpi_pci_disable_msi(pdev); 542 543 if (dr->enable_io && lkpi_pci_disable_dev(dev) == 0) 544 dr->enable_io = false; 545 546 if (dr->region_mask == 0) 547 return; 548 for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) { 549 550 if ((dr->region_mask & (1 << bar)) == 0) 551 continue; 552 pci_release_region(pdev, bar); 553 } 554 } 555 556 struct pcim_iomap_devres * 557 lkpi_pcim_iomap_devres_find(struct pci_dev *pdev) 558 { 559 struct pcim_iomap_devres *dr; 560 561 dr = lkpi_devres_find(&pdev->dev, lkpi_pcim_iomap_table_release, 562 NULL, NULL); 563 if (dr == NULL) { 564 dr = lkpi_devres_alloc(lkpi_pcim_iomap_table_release, 565 sizeof(*dr), GFP_KERNEL | __GFP_ZERO); 566 if (dr != NULL) 567 lkpi_devres_add(&pdev->dev, dr); 568 } 569 570 if (dr == NULL) 571 device_printf(pdev->dev.bsddev, "%s: NULL\n", __func__); 572 573 return (dr); 574 } 575 576 void 577 lkpi_pcim_iomap_table_release(struct device *dev, void *p) 578 { 579 struct pcim_iomap_devres *dr; 580 struct pci_dev *pdev; 581 int bar; 582 583 dr = p; 584 pdev = to_pci_dev(dev); 585 for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) { 586 587 if (dr->mmio_table[bar] == NULL) 588 continue; 589 590 pci_iounmap(pdev, dr->mmio_table[bar]); 591 } 592 } 593 594 static int 595 linux_pci_suspend(device_t dev) 596 { 597 const struct dev_pm_ops *pmops; 598 struct pm_message pm = { }; 599 struct pci_dev *pdev; 600 int error; 601 602 error = 0; 603 linux_set_current(curthread); 604 pdev = device_get_softc(dev); 605 pmops = pdev->pdrv->driver.pm; 606 607 if (pdev->pdrv->suspend != NULL) 608 error = -pdev->pdrv->suspend(pdev, pm); 609 else if (pmops != NULL && pmops->suspend != NULL) { 610 error = -pmops->suspend(&pdev->dev); 611 if (error == 0 && pmops->suspend_late != NULL) 612 error = -pmops->suspend_late(&pdev->dev); 613 } 614 return (error); 615 } 616 617 static int 618 linux_pci_resume(device_t dev) 619 { 620 const struct dev_pm_ops *pmops; 621 struct pci_dev *pdev; 622 int error; 623 624 error = 0; 625 linux_set_current(curthread); 626 pdev = device_get_softc(dev); 627 pmops = pdev->pdrv->driver.pm; 628 629 if (pdev->pdrv->resume != NULL) 630 error = -pdev->pdrv->resume(pdev); 631 else if (pmops != NULL && pmops->resume != NULL) { 632 if (pmops->resume_early != NULL) 633 error = -pmops->resume_early(&pdev->dev); 634 if (error == 0 && pmops->resume != NULL) 635 error = -pmops->resume(&pdev->dev); 636 } 637 return (error); 638 } 639 640 static int 641 linux_pci_shutdown(device_t dev) 642 { 643 struct pci_dev *pdev; 644 645 linux_set_current(curthread); 646 pdev = device_get_softc(dev); 647 if (pdev->pdrv->shutdown != NULL) 648 pdev->pdrv->shutdown(pdev); 649 return (0); 650 } 651 652 static int 653 linux_pci_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *pf_config) 654 { 655 struct pci_dev *pdev; 656 int error; 657 658 linux_set_current(curthread); 659 pdev = device_get_softc(dev); 660 if (pdev->pdrv->bsd_iov_init != NULL) 661 error = pdev->pdrv->bsd_iov_init(dev, num_vfs, pf_config); 662 else 663 error = EINVAL; 664 return (error); 665 } 666 667 static void 668 linux_pci_iov_uninit(device_t dev) 669 { 670 struct pci_dev *pdev; 671 672 linux_set_current(curthread); 673 pdev = device_get_softc(dev); 674 if (pdev->pdrv->bsd_iov_uninit != NULL) 675 pdev->pdrv->bsd_iov_uninit(dev); 676 } 677 678 static int 679 linux_pci_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *vf_config) 680 { 681 struct pci_dev *pdev; 682 int error; 683 684 linux_set_current(curthread); 685 pdev = device_get_softc(dev); 686 if (pdev->pdrv->bsd_iov_add_vf != NULL) 687 error = pdev->pdrv->bsd_iov_add_vf(dev, vfnum, vf_config); 688 else 689 error = EINVAL; 690 return (error); 691 } 692 693 static int 694 _linux_pci_register_driver(struct pci_driver *pdrv, devclass_t dc) 695 { 696 int error; 697 698 linux_set_current(curthread); 699 spin_lock(&pci_lock); 700 list_add(&pdrv->node, &pci_drivers); 701 spin_unlock(&pci_lock); 702 if (pdrv->bsddriver.name == NULL) 703 pdrv->bsddriver.name = pdrv->name; 704 pdrv->bsddriver.methods = pci_methods; 705 pdrv->bsddriver.size = sizeof(struct pci_dev); 706 707 bus_topo_lock(); 708 error = devclass_add_driver(dc, &pdrv->bsddriver, 709 BUS_PASS_DEFAULT, &pdrv->bsdclass); 710 bus_topo_unlock(); 711 return (-error); 712 } 713 714 int 715 linux_pci_register_driver(struct pci_driver *pdrv) 716 { 717 devclass_t dc; 718 719 dc = devclass_find("pci"); 720 if (dc == NULL) 721 return (-ENXIO); 722 pdrv->isdrm = false; 723 return (_linux_pci_register_driver(pdrv, dc)); 724 } 725 726 struct resource_list_entry * 727 linux_pci_reserve_bar(struct pci_dev *pdev, struct resource_list *rl, 728 int type, int rid) 729 { 730 device_t dev; 731 struct resource *res; 732 733 KASSERT(type == SYS_RES_IOPORT || type == SYS_RES_MEMORY, 734 ("trying to reserve non-BAR type %d", type)); 735 736 dev = pdev->pdrv != NULL && pdev->pdrv->isdrm ? 737 device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev; 738 res = pci_reserve_map(device_get_parent(dev), dev, type, &rid, 0, ~0, 739 1, 1, 0); 740 if (res == NULL) 741 return (NULL); 742 return (resource_list_find(rl, type, rid)); 743 } 744 745 unsigned long 746 pci_resource_start(struct pci_dev *pdev, int bar) 747 { 748 struct resource_list_entry *rle; 749 rman_res_t newstart; 750 device_t dev; 751 int error; 752 753 if ((rle = linux_pci_get_bar(pdev, bar, true)) == NULL) 754 return (0); 755 dev = pdev->pdrv != NULL && pdev->pdrv->isdrm ? 756 device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev; 757 error = bus_translate_resource(dev, rle->type, rle->start, &newstart); 758 if (error != 0) { 759 device_printf(pdev->dev.bsddev, 760 "translate of %#jx failed: %d\n", 761 (uintmax_t)rle->start, error); 762 return (0); 763 } 764 return (newstart); 765 } 766 767 unsigned long 768 pci_resource_len(struct pci_dev *pdev, int bar) 769 { 770 struct resource_list_entry *rle; 771 772 if ((rle = linux_pci_get_bar(pdev, bar, true)) == NULL) 773 return (0); 774 return (rle->count); 775 } 776 777 int 778 pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) 779 { 780 struct resource *res; 781 struct pci_devres *dr; 782 struct pci_mmio_region *mmio; 783 int rid; 784 int type; 785 786 type = pci_resource_type(pdev, bar); 787 if (type < 0) 788 return (-ENODEV); 789 rid = PCIR_BAR(bar); 790 res = bus_alloc_resource_any(pdev->dev.bsddev, type, &rid, 791 RF_ACTIVE|RF_SHAREABLE); 792 if (res == NULL) { 793 device_printf(pdev->dev.bsddev, "%s: failed to alloc " 794 "bar %d type %d rid %d\n", 795 __func__, bar, type, PCIR_BAR(bar)); 796 return (-ENODEV); 797 } 798 799 /* 800 * It seems there is an implicit devres tracking on these if the device 801 * is managed; otherwise the resources are not automatiaclly freed on 802 * FreeBSD/LinuxKPI tough they should be/are expected to be by Linux 803 * drivers. 804 */ 805 dr = lkpi_pci_devres_find(pdev); 806 if (dr != NULL) { 807 dr->region_mask |= (1 << bar); 808 dr->region_table[bar] = res; 809 } 810 811 /* Even if the device is not managed we need to track it for iomap. */ 812 mmio = malloc(sizeof(*mmio), M_DEVBUF, M_WAITOK | M_ZERO); 813 mmio->rid = PCIR_BAR(bar); 814 mmio->type = type; 815 mmio->res = res; 816 TAILQ_INSERT_TAIL(&pdev->mmio, mmio, next); 817 818 return (0); 819 } 820 821 struct resource * 822 _lkpi_pci_iomap(struct pci_dev *pdev, int bar, int mmio_size __unused) 823 { 824 struct pci_mmio_region *mmio, *p; 825 int type; 826 827 type = pci_resource_type(pdev, bar); 828 if (type < 0) { 829 device_printf(pdev->dev.bsddev, "%s: bar %d type %d\n", 830 __func__, bar, type); 831 return (NULL); 832 } 833 834 /* 835 * Check for duplicate mappings. 836 * This can happen if a driver calls pci_request_region() first. 837 */ 838 TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) { 839 if (mmio->type == type && mmio->rid == PCIR_BAR(bar)) { 840 return (mmio->res); 841 } 842 } 843 844 mmio = malloc(sizeof(*mmio), M_DEVBUF, M_WAITOK | M_ZERO); 845 mmio->rid = PCIR_BAR(bar); 846 mmio->type = type; 847 mmio->res = bus_alloc_resource_any(pdev->dev.bsddev, mmio->type, 848 &mmio->rid, RF_ACTIVE|RF_SHAREABLE); 849 if (mmio->res == NULL) { 850 device_printf(pdev->dev.bsddev, "%s: failed to alloc " 851 "bar %d type %d rid %d\n", 852 __func__, bar, type, PCIR_BAR(bar)); 853 free(mmio, M_DEVBUF); 854 return (NULL); 855 } 856 TAILQ_INSERT_TAIL(&pdev->mmio, mmio, next); 857 858 return (mmio->res); 859 } 860 861 int 862 linux_pci_register_drm_driver(struct pci_driver *pdrv) 863 { 864 devclass_t dc; 865 866 dc = devclass_create("vgapci"); 867 if (dc == NULL) 868 return (-ENXIO); 869 pdrv->isdrm = true; 870 pdrv->name = "drmn"; 871 return (_linux_pci_register_driver(pdrv, dc)); 872 } 873 874 void 875 linux_pci_unregister_driver(struct pci_driver *pdrv) 876 { 877 devclass_t bus; 878 879 bus = devclass_find("pci"); 880 881 spin_lock(&pci_lock); 882 list_del(&pdrv->node); 883 spin_unlock(&pci_lock); 884 bus_topo_lock(); 885 if (bus != NULL) 886 devclass_delete_driver(bus, &pdrv->bsddriver); 887 bus_topo_unlock(); 888 } 889 890 void 891 linux_pci_unregister_drm_driver(struct pci_driver *pdrv) 892 { 893 devclass_t bus; 894 895 bus = devclass_find("vgapci"); 896 897 spin_lock(&pci_lock); 898 list_del(&pdrv->node); 899 spin_unlock(&pci_lock); 900 bus_topo_lock(); 901 if (bus != NULL) 902 devclass_delete_driver(bus, &pdrv->bsddriver); 903 bus_topo_unlock(); 904 } 905 906 int 907 pci_alloc_irq_vectors(struct pci_dev *pdev, int minv, int maxv, 908 unsigned int flags) 909 { 910 int error; 911 912 if (flags & PCI_IRQ_MSIX) { 913 struct msix_entry *entries; 914 int i; 915 916 entries = kcalloc(maxv, sizeof(*entries), GFP_KERNEL); 917 if (entries == NULL) { 918 error = -ENOMEM; 919 goto out; 920 } 921 for (i = 0; i < maxv; ++i) 922 entries[i].entry = i; 923 error = pci_enable_msix(pdev, entries, maxv); 924 out: 925 kfree(entries); 926 if (error == 0 && pdev->msix_enabled) 927 return (pdev->dev.irq_end - pdev->dev.irq_start); 928 } 929 if (flags & PCI_IRQ_MSI) { 930 error = pci_enable_msi(pdev); 931 if (error == 0 && pdev->msi_enabled) 932 return (pdev->dev.irq_end - pdev->dev.irq_start); 933 } 934 if (flags & PCI_IRQ_LEGACY) { 935 if (pdev->irq) 936 return (1); 937 } 938 939 return (-EINVAL); 940 } 941 942 CTASSERT(sizeof(dma_addr_t) <= sizeof(uint64_t)); 943 944 struct linux_dma_obj { 945 void *vaddr; 946 uint64_t dma_addr; 947 bus_dmamap_t dmamap; 948 bus_dma_tag_t dmat; 949 }; 950 951 static uma_zone_t linux_dma_trie_zone; 952 static uma_zone_t linux_dma_obj_zone; 953 954 static void 955 linux_dma_init(void *arg) 956 { 957 958 linux_dma_trie_zone = uma_zcreate("linux_dma_pctrie", 959 pctrie_node_size(), NULL, NULL, pctrie_zone_init, NULL, 960 UMA_ALIGN_PTR, 0); 961 linux_dma_obj_zone = uma_zcreate("linux_dma_object", 962 sizeof(struct linux_dma_obj), NULL, NULL, NULL, NULL, 963 UMA_ALIGN_PTR, 0); 964 lkpi_pci_nseg1_fail = counter_u64_alloc(M_WAITOK); 965 } 966 SYSINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_init, NULL); 967 968 static void 969 linux_dma_uninit(void *arg) 970 { 971 972 counter_u64_free(lkpi_pci_nseg1_fail); 973 uma_zdestroy(linux_dma_obj_zone); 974 uma_zdestroy(linux_dma_trie_zone); 975 } 976 SYSUNINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_uninit, NULL); 977 978 static void * 979 linux_dma_trie_alloc(struct pctrie *ptree) 980 { 981 982 return (uma_zalloc(linux_dma_trie_zone, M_NOWAIT)); 983 } 984 985 static void 986 linux_dma_trie_free(struct pctrie *ptree, void *node) 987 { 988 989 uma_zfree(linux_dma_trie_zone, node); 990 } 991 992 PCTRIE_DEFINE(LINUX_DMA, linux_dma_obj, dma_addr, linux_dma_trie_alloc, 993 linux_dma_trie_free); 994 995 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 996 static dma_addr_t 997 linux_dma_map_phys_common(struct device *dev, vm_paddr_t phys, size_t len, 998 bus_dma_tag_t dmat) 999 { 1000 struct linux_dma_priv *priv; 1001 struct linux_dma_obj *obj; 1002 int error, nseg; 1003 bus_dma_segment_t seg; 1004 1005 priv = dev->dma_priv; 1006 1007 /* 1008 * If the resultant mapping will be entirely 1:1 with the 1009 * physical address, short-circuit the remainder of the 1010 * bus_dma API. This avoids tracking collisions in the pctrie 1011 * with the additional benefit of reducing overhead. 1012 */ 1013 if (bus_dma_id_mapped(dmat, phys, len)) 1014 return (phys); 1015 1016 obj = uma_zalloc(linux_dma_obj_zone, M_NOWAIT); 1017 if (obj == NULL) { 1018 return (0); 1019 } 1020 obj->dmat = dmat; 1021 1022 DMA_PRIV_LOCK(priv); 1023 if (bus_dmamap_create(obj->dmat, 0, &obj->dmamap) != 0) { 1024 DMA_PRIV_UNLOCK(priv); 1025 uma_zfree(linux_dma_obj_zone, obj); 1026 return (0); 1027 } 1028 1029 nseg = -1; 1030 if (_bus_dmamap_load_phys(obj->dmat, obj->dmamap, phys, len, 1031 BUS_DMA_NOWAIT, &seg, &nseg) != 0) { 1032 bus_dmamap_destroy(obj->dmat, obj->dmamap); 1033 DMA_PRIV_UNLOCK(priv); 1034 uma_zfree(linux_dma_obj_zone, obj); 1035 counter_u64_add(lkpi_pci_nseg1_fail, 1); 1036 if (linuxkpi_debug) 1037 dump_stack(); 1038 return (0); 1039 } 1040 1041 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); 1042 obj->dma_addr = seg.ds_addr; 1043 1044 error = LINUX_DMA_PCTRIE_INSERT(&priv->ptree, obj); 1045 if (error != 0) { 1046 bus_dmamap_unload(obj->dmat, obj->dmamap); 1047 bus_dmamap_destroy(obj->dmat, obj->dmamap); 1048 DMA_PRIV_UNLOCK(priv); 1049 uma_zfree(linux_dma_obj_zone, obj); 1050 return (0); 1051 } 1052 DMA_PRIV_UNLOCK(priv); 1053 return (obj->dma_addr); 1054 } 1055 #else 1056 static dma_addr_t 1057 linux_dma_map_phys_common(struct device *dev __unused, vm_paddr_t phys, 1058 size_t len __unused, bus_dma_tag_t dmat __unused) 1059 { 1060 return (phys); 1061 } 1062 #endif 1063 1064 dma_addr_t 1065 linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len) 1066 { 1067 struct linux_dma_priv *priv; 1068 1069 priv = dev->dma_priv; 1070 return (linux_dma_map_phys_common(dev, phys, len, priv->dmat)); 1071 } 1072 1073 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 1074 void 1075 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len) 1076 { 1077 struct linux_dma_priv *priv; 1078 struct linux_dma_obj *obj; 1079 1080 priv = dev->dma_priv; 1081 1082 if (pctrie_is_empty(&priv->ptree)) 1083 return; 1084 1085 DMA_PRIV_LOCK(priv); 1086 obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr); 1087 if (obj == NULL) { 1088 DMA_PRIV_UNLOCK(priv); 1089 return; 1090 } 1091 LINUX_DMA_PCTRIE_REMOVE(&priv->ptree, dma_addr); 1092 bus_dmamap_unload(obj->dmat, obj->dmamap); 1093 bus_dmamap_destroy(obj->dmat, obj->dmamap); 1094 DMA_PRIV_UNLOCK(priv); 1095 1096 uma_zfree(linux_dma_obj_zone, obj); 1097 } 1098 #else 1099 void 1100 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len) 1101 { 1102 } 1103 #endif 1104 1105 void * 1106 linux_dma_alloc_coherent(struct device *dev, size_t size, 1107 dma_addr_t *dma_handle, gfp_t flag) 1108 { 1109 struct linux_dma_priv *priv; 1110 vm_paddr_t high; 1111 size_t align; 1112 void *mem; 1113 1114 if (dev == NULL || dev->dma_priv == NULL) { 1115 *dma_handle = 0; 1116 return (NULL); 1117 } 1118 priv = dev->dma_priv; 1119 if (priv->dma_coherent_mask) 1120 high = priv->dma_coherent_mask; 1121 else 1122 /* Coherent is lower 32bit only by default in Linux. */ 1123 high = BUS_SPACE_MAXADDR_32BIT; 1124 align = PAGE_SIZE << get_order(size); 1125 /* Always zero the allocation. */ 1126 flag |= M_ZERO; 1127 mem = kmem_alloc_contig(size, flag & GFP_NATIVE_MASK, 0, high, 1128 align, 0, VM_MEMATTR_DEFAULT); 1129 if (mem != NULL) { 1130 *dma_handle = linux_dma_map_phys_common(dev, vtophys(mem), size, 1131 priv->dmat_coherent); 1132 if (*dma_handle == 0) { 1133 kmem_free(mem, size); 1134 mem = NULL; 1135 } 1136 } else { 1137 *dma_handle = 0; 1138 } 1139 return (mem); 1140 } 1141 1142 struct lkpi_devres_dmam_coherent { 1143 size_t size; 1144 dma_addr_t *handle; 1145 void *mem; 1146 }; 1147 1148 static void 1149 lkpi_dmam_free_coherent(struct device *dev, void *p) 1150 { 1151 struct lkpi_devres_dmam_coherent *dr; 1152 1153 dr = p; 1154 dma_free_coherent(dev, dr->size, dr->mem, *dr->handle); 1155 } 1156 1157 void * 1158 linuxkpi_dmam_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 1159 gfp_t flag) 1160 { 1161 struct lkpi_devres_dmam_coherent *dr; 1162 1163 dr = lkpi_devres_alloc(lkpi_dmam_free_coherent, 1164 sizeof(*dr), GFP_KERNEL | __GFP_ZERO); 1165 1166 if (dr == NULL) 1167 return (NULL); 1168 1169 dr->size = size; 1170 dr->mem = linux_dma_alloc_coherent(dev, size, dma_handle, flag); 1171 dr->handle = dma_handle; 1172 if (dr->mem == NULL) { 1173 lkpi_devres_free(dr); 1174 return (NULL); 1175 } 1176 1177 lkpi_devres_add(dev, dr); 1178 return (dr->mem); 1179 } 1180 1181 void 1182 linuxkpi_dma_sync(struct device *dev, dma_addr_t dma_addr, size_t size, 1183 bus_dmasync_op_t op) 1184 { 1185 struct linux_dma_priv *priv; 1186 struct linux_dma_obj *obj; 1187 1188 priv = dev->dma_priv; 1189 1190 if (pctrie_is_empty(&priv->ptree)) 1191 return; 1192 1193 DMA_PRIV_LOCK(priv); 1194 obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr); 1195 if (obj == NULL) { 1196 DMA_PRIV_UNLOCK(priv); 1197 return; 1198 } 1199 1200 bus_dmamap_sync(obj->dmat, obj->dmamap, op); 1201 DMA_PRIV_UNLOCK(priv); 1202 } 1203 1204 int 1205 linux_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents, 1206 enum dma_data_direction direction, unsigned long attrs __unused) 1207 { 1208 struct linux_dma_priv *priv; 1209 struct scatterlist *sg; 1210 int i, nseg; 1211 bus_dma_segment_t seg; 1212 1213 priv = dev->dma_priv; 1214 1215 DMA_PRIV_LOCK(priv); 1216 1217 /* create common DMA map in the first S/G entry */ 1218 if (bus_dmamap_create(priv->dmat, 0, &sgl->dma_map) != 0) { 1219 DMA_PRIV_UNLOCK(priv); 1220 return (0); 1221 } 1222 1223 /* load all S/G list entries */ 1224 for_each_sg(sgl, sg, nents, i) { 1225 nseg = -1; 1226 if (_bus_dmamap_load_phys(priv->dmat, sgl->dma_map, 1227 sg_phys(sg), sg->length, BUS_DMA_NOWAIT, 1228 &seg, &nseg) != 0) { 1229 bus_dmamap_unload(priv->dmat, sgl->dma_map); 1230 bus_dmamap_destroy(priv->dmat, sgl->dma_map); 1231 DMA_PRIV_UNLOCK(priv); 1232 return (0); 1233 } 1234 KASSERT(nseg == 0, 1235 ("More than one segment (nseg=%d)", nseg + 1)); 1236 1237 sg_dma_address(sg) = seg.ds_addr; 1238 } 1239 1240 switch (direction) { 1241 case DMA_BIDIRECTIONAL: 1242 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREWRITE); 1243 break; 1244 case DMA_TO_DEVICE: 1245 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREREAD); 1246 break; 1247 case DMA_FROM_DEVICE: 1248 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREWRITE); 1249 break; 1250 default: 1251 break; 1252 } 1253 1254 DMA_PRIV_UNLOCK(priv); 1255 1256 return (nents); 1257 } 1258 1259 void 1260 linux_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, 1261 int nents __unused, enum dma_data_direction direction, 1262 unsigned long attrs __unused) 1263 { 1264 struct linux_dma_priv *priv; 1265 1266 priv = dev->dma_priv; 1267 1268 DMA_PRIV_LOCK(priv); 1269 1270 switch (direction) { 1271 case DMA_BIDIRECTIONAL: 1272 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTREAD); 1273 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREREAD); 1274 break; 1275 case DMA_TO_DEVICE: 1276 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTWRITE); 1277 break; 1278 case DMA_FROM_DEVICE: 1279 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTREAD); 1280 break; 1281 default: 1282 break; 1283 } 1284 1285 bus_dmamap_unload(priv->dmat, sgl->dma_map); 1286 bus_dmamap_destroy(priv->dmat, sgl->dma_map); 1287 DMA_PRIV_UNLOCK(priv); 1288 } 1289 1290 struct dma_pool { 1291 struct device *pool_device; 1292 uma_zone_t pool_zone; 1293 struct mtx pool_lock; 1294 bus_dma_tag_t pool_dmat; 1295 size_t pool_entry_size; 1296 struct pctrie pool_ptree; 1297 }; 1298 1299 #define DMA_POOL_LOCK(pool) mtx_lock(&(pool)->pool_lock) 1300 #define DMA_POOL_UNLOCK(pool) mtx_unlock(&(pool)->pool_lock) 1301 1302 static inline int 1303 dma_pool_obj_ctor(void *mem, int size, void *arg, int flags) 1304 { 1305 struct linux_dma_obj *obj = mem; 1306 struct dma_pool *pool = arg; 1307 int error, nseg; 1308 bus_dma_segment_t seg; 1309 1310 nseg = -1; 1311 DMA_POOL_LOCK(pool); 1312 error = _bus_dmamap_load_phys(pool->pool_dmat, obj->dmamap, 1313 vtophys(obj->vaddr), pool->pool_entry_size, BUS_DMA_NOWAIT, 1314 &seg, &nseg); 1315 DMA_POOL_UNLOCK(pool); 1316 if (error != 0) { 1317 return (error); 1318 } 1319 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); 1320 obj->dma_addr = seg.ds_addr; 1321 1322 return (0); 1323 } 1324 1325 static void 1326 dma_pool_obj_dtor(void *mem, int size, void *arg) 1327 { 1328 struct linux_dma_obj *obj = mem; 1329 struct dma_pool *pool = arg; 1330 1331 DMA_POOL_LOCK(pool); 1332 bus_dmamap_unload(pool->pool_dmat, obj->dmamap); 1333 DMA_POOL_UNLOCK(pool); 1334 } 1335 1336 static int 1337 dma_pool_obj_import(void *arg, void **store, int count, int domain __unused, 1338 int flags) 1339 { 1340 struct dma_pool *pool = arg; 1341 struct linux_dma_obj *obj; 1342 int error, i; 1343 1344 for (i = 0; i < count; i++) { 1345 obj = uma_zalloc(linux_dma_obj_zone, flags); 1346 if (obj == NULL) 1347 break; 1348 1349 error = bus_dmamem_alloc(pool->pool_dmat, &obj->vaddr, 1350 BUS_DMA_NOWAIT, &obj->dmamap); 1351 if (error!= 0) { 1352 uma_zfree(linux_dma_obj_zone, obj); 1353 break; 1354 } 1355 1356 store[i] = obj; 1357 } 1358 1359 return (i); 1360 } 1361 1362 static void 1363 dma_pool_obj_release(void *arg, void **store, int count) 1364 { 1365 struct dma_pool *pool = arg; 1366 struct linux_dma_obj *obj; 1367 int i; 1368 1369 for (i = 0; i < count; i++) { 1370 obj = store[i]; 1371 bus_dmamem_free(pool->pool_dmat, obj->vaddr, obj->dmamap); 1372 uma_zfree(linux_dma_obj_zone, obj); 1373 } 1374 } 1375 1376 struct dma_pool * 1377 linux_dma_pool_create(char *name, struct device *dev, size_t size, 1378 size_t align, size_t boundary) 1379 { 1380 struct linux_dma_priv *priv; 1381 struct dma_pool *pool; 1382 1383 priv = dev->dma_priv; 1384 1385 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 1386 pool->pool_device = dev; 1387 pool->pool_entry_size = size; 1388 1389 if (bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 1390 align, boundary, /* alignment, boundary */ 1391 priv->dma_mask, /* lowaddr */ 1392 BUS_SPACE_MAXADDR, /* highaddr */ 1393 NULL, NULL, /* filtfunc, filtfuncarg */ 1394 size, /* maxsize */ 1395 1, /* nsegments */ 1396 size, /* maxsegsz */ 1397 0, /* flags */ 1398 NULL, NULL, /* lockfunc, lockfuncarg */ 1399 &pool->pool_dmat)) { 1400 kfree(pool); 1401 return (NULL); 1402 } 1403 1404 pool->pool_zone = uma_zcache_create(name, -1, dma_pool_obj_ctor, 1405 dma_pool_obj_dtor, NULL, NULL, dma_pool_obj_import, 1406 dma_pool_obj_release, pool, 0); 1407 1408 mtx_init(&pool->pool_lock, "lkpi-dma-pool", NULL, MTX_DEF); 1409 pctrie_init(&pool->pool_ptree); 1410 1411 return (pool); 1412 } 1413 1414 void 1415 linux_dma_pool_destroy(struct dma_pool *pool) 1416 { 1417 1418 uma_zdestroy(pool->pool_zone); 1419 bus_dma_tag_destroy(pool->pool_dmat); 1420 mtx_destroy(&pool->pool_lock); 1421 kfree(pool); 1422 } 1423 1424 void 1425 lkpi_dmam_pool_destroy(struct device *dev, void *p) 1426 { 1427 struct dma_pool *pool; 1428 1429 pool = *(struct dma_pool **)p; 1430 LINUX_DMA_PCTRIE_RECLAIM(&pool->pool_ptree); 1431 linux_dma_pool_destroy(pool); 1432 } 1433 1434 void * 1435 linux_dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, 1436 dma_addr_t *handle) 1437 { 1438 struct linux_dma_obj *obj; 1439 1440 obj = uma_zalloc_arg(pool->pool_zone, pool, mem_flags & GFP_NATIVE_MASK); 1441 if (obj == NULL) 1442 return (NULL); 1443 1444 DMA_POOL_LOCK(pool); 1445 if (LINUX_DMA_PCTRIE_INSERT(&pool->pool_ptree, obj) != 0) { 1446 DMA_POOL_UNLOCK(pool); 1447 uma_zfree_arg(pool->pool_zone, obj, pool); 1448 return (NULL); 1449 } 1450 DMA_POOL_UNLOCK(pool); 1451 1452 *handle = obj->dma_addr; 1453 return (obj->vaddr); 1454 } 1455 1456 void 1457 linux_dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma_addr) 1458 { 1459 struct linux_dma_obj *obj; 1460 1461 DMA_POOL_LOCK(pool); 1462 obj = LINUX_DMA_PCTRIE_LOOKUP(&pool->pool_ptree, dma_addr); 1463 if (obj == NULL) { 1464 DMA_POOL_UNLOCK(pool); 1465 return; 1466 } 1467 LINUX_DMA_PCTRIE_REMOVE(&pool->pool_ptree, dma_addr); 1468 DMA_POOL_UNLOCK(pool); 1469 1470 uma_zfree_arg(pool->pool_zone, obj, pool); 1471 } 1472 1473 static int 1474 linux_backlight_get_status(device_t dev, struct backlight_props *props) 1475 { 1476 struct pci_dev *pdev; 1477 1478 linux_set_current(curthread); 1479 pdev = device_get_softc(dev); 1480 1481 props->brightness = pdev->dev.bd->props.brightness; 1482 props->brightness = props->brightness * 100 / pdev->dev.bd->props.max_brightness; 1483 props->nlevels = 0; 1484 1485 return (0); 1486 } 1487 1488 static int 1489 linux_backlight_get_info(device_t dev, struct backlight_info *info) 1490 { 1491 struct pci_dev *pdev; 1492 1493 linux_set_current(curthread); 1494 pdev = device_get_softc(dev); 1495 1496 info->type = BACKLIGHT_TYPE_PANEL; 1497 strlcpy(info->name, pdev->dev.bd->name, BACKLIGHTMAXNAMELENGTH); 1498 return (0); 1499 } 1500 1501 static int 1502 linux_backlight_update_status(device_t dev, struct backlight_props *props) 1503 { 1504 struct pci_dev *pdev; 1505 1506 linux_set_current(curthread); 1507 pdev = device_get_softc(dev); 1508 1509 pdev->dev.bd->props.brightness = pdev->dev.bd->props.max_brightness * 1510 props->brightness / 100; 1511 pdev->dev.bd->props.power = props->brightness == 0 ? 1512 4/* FB_BLANK_POWERDOWN */ : 0/* FB_BLANK_UNBLANK */; 1513 return (pdev->dev.bd->ops->update_status(pdev->dev.bd)); 1514 } 1515 1516 struct backlight_device * 1517 linux_backlight_device_register(const char *name, struct device *dev, 1518 void *data, const struct backlight_ops *ops, struct backlight_properties *props) 1519 { 1520 1521 dev->bd = malloc(sizeof(*dev->bd), M_DEVBUF, M_WAITOK | M_ZERO); 1522 dev->bd->ops = ops; 1523 dev->bd->props.type = props->type; 1524 dev->bd->props.max_brightness = props->max_brightness; 1525 dev->bd->props.brightness = props->brightness; 1526 dev->bd->props.power = props->power; 1527 dev->bd->data = data; 1528 dev->bd->dev = dev; 1529 dev->bd->name = strdup(name, M_DEVBUF); 1530 1531 dev->backlight_dev = backlight_register(name, dev->bsddev); 1532 1533 return (dev->bd); 1534 } 1535 1536 void 1537 linux_backlight_device_unregister(struct backlight_device *bd) 1538 { 1539 1540 backlight_destroy(bd->dev->backlight_dev); 1541 free(bd->name, M_DEVBUF); 1542 free(bd, M_DEVBUF); 1543 } 1544