1 /*- 2 * Copyright (c) 2015-2016 Mellanox Technologies, Ltd. 3 * All rights reserved. 4 * Copyright (c) 2020-2022 The FreeBSD Foundation 5 * 6 * Portions of this software were developed by Björn Zeeb 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice unmodified, this list of conditions, and the following 14 * disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/bus.h> 34 #include <sys/malloc.h> 35 #include <sys/kernel.h> 36 #include <sys/sysctl.h> 37 #include <sys/lock.h> 38 #include <sys/mutex.h> 39 #include <sys/fcntl.h> 40 #include <sys/file.h> 41 #include <sys/filio.h> 42 #include <sys/pciio.h> 43 #include <sys/pctrie.h> 44 #include <sys/rwlock.h> 45 46 #include <vm/vm.h> 47 #include <vm/pmap.h> 48 49 #include <machine/stdarg.h> 50 51 #include <dev/pci/pcivar.h> 52 #include <dev/pci/pci_private.h> 53 #include <dev/pci/pci_iov.h> 54 #include <dev/backlight/backlight.h> 55 56 #include <linux/kernel.h> 57 #include <linux/kobject.h> 58 #include <linux/device.h> 59 #include <linux/slab.h> 60 #include <linux/module.h> 61 #include <linux/cdev.h> 62 #include <linux/file.h> 63 #include <linux/sysfs.h> 64 #include <linux/mm.h> 65 #include <linux/io.h> 66 #include <linux/vmalloc.h> 67 #include <linux/pci.h> 68 #include <linux/compat.h> 69 70 #include <linux/backlight.h> 71 72 #include "backlight_if.h" 73 #include "pcib_if.h" 74 75 /* Undef the linux function macro defined in linux/pci.h */ 76 #undef pci_get_class 77 78 extern int linuxkpi_debug; 79 80 SYSCTL_DECL(_compat_linuxkpi); 81 82 static counter_u64_t lkpi_pci_nseg1_fail; 83 SYSCTL_COUNTER_U64(_compat_linuxkpi, OID_AUTO, lkpi_pci_nseg1_fail, CTLFLAG_RD, 84 &lkpi_pci_nseg1_fail, "Count of busdma mapping failures of single-segment"); 85 86 static device_probe_t linux_pci_probe; 87 static device_attach_t linux_pci_attach; 88 static device_detach_t linux_pci_detach; 89 static device_suspend_t linux_pci_suspend; 90 static device_resume_t linux_pci_resume; 91 static device_shutdown_t linux_pci_shutdown; 92 static pci_iov_init_t linux_pci_iov_init; 93 static pci_iov_uninit_t linux_pci_iov_uninit; 94 static pci_iov_add_vf_t linux_pci_iov_add_vf; 95 static int linux_backlight_get_status(device_t dev, struct backlight_props *props); 96 static int linux_backlight_update_status(device_t dev, struct backlight_props *props); 97 static int linux_backlight_get_info(device_t dev, struct backlight_info *info); 98 99 static device_method_t pci_methods[] = { 100 DEVMETHOD(device_probe, linux_pci_probe), 101 DEVMETHOD(device_attach, linux_pci_attach), 102 DEVMETHOD(device_detach, linux_pci_detach), 103 DEVMETHOD(device_suspend, linux_pci_suspend), 104 DEVMETHOD(device_resume, linux_pci_resume), 105 DEVMETHOD(device_shutdown, linux_pci_shutdown), 106 DEVMETHOD(pci_iov_init, linux_pci_iov_init), 107 DEVMETHOD(pci_iov_uninit, linux_pci_iov_uninit), 108 DEVMETHOD(pci_iov_add_vf, linux_pci_iov_add_vf), 109 110 /* backlight interface */ 111 DEVMETHOD(backlight_update_status, linux_backlight_update_status), 112 DEVMETHOD(backlight_get_status, linux_backlight_get_status), 113 DEVMETHOD(backlight_get_info, linux_backlight_get_info), 114 DEVMETHOD_END 115 }; 116 117 const char *pci_power_names[] = { 118 "UNKNOWN", "D0", "D1", "D2", "D3hot", "D3cold" 119 }; 120 121 struct linux_dma_priv { 122 uint64_t dma_mask; 123 bus_dma_tag_t dmat; 124 uint64_t dma_coherent_mask; 125 bus_dma_tag_t dmat_coherent; 126 struct mtx lock; 127 struct pctrie ptree; 128 }; 129 #define DMA_PRIV_LOCK(priv) mtx_lock(&(priv)->lock) 130 #define DMA_PRIV_UNLOCK(priv) mtx_unlock(&(priv)->lock) 131 132 static int 133 linux_pdev_dma_uninit(struct pci_dev *pdev) 134 { 135 struct linux_dma_priv *priv; 136 137 priv = pdev->dev.dma_priv; 138 if (priv->dmat) 139 bus_dma_tag_destroy(priv->dmat); 140 if (priv->dmat_coherent) 141 bus_dma_tag_destroy(priv->dmat_coherent); 142 mtx_destroy(&priv->lock); 143 pdev->dev.dma_priv = NULL; 144 free(priv, M_DEVBUF); 145 return (0); 146 } 147 148 static int 149 linux_pdev_dma_init(struct pci_dev *pdev) 150 { 151 struct linux_dma_priv *priv; 152 int error; 153 154 priv = malloc(sizeof(*priv), M_DEVBUF, M_WAITOK | M_ZERO); 155 156 mtx_init(&priv->lock, "lkpi-priv-dma", NULL, MTX_DEF); 157 pctrie_init(&priv->ptree); 158 159 pdev->dev.dma_priv = priv; 160 161 /* Create a default DMA tags. */ 162 error = linux_dma_tag_init(&pdev->dev, DMA_BIT_MASK(64)); 163 if (error != 0) 164 goto err; 165 /* Coherent is lower 32bit only by default in Linux. */ 166 error = linux_dma_tag_init_coherent(&pdev->dev, DMA_BIT_MASK(32)); 167 if (error != 0) 168 goto err; 169 170 return (error); 171 172 err: 173 linux_pdev_dma_uninit(pdev); 174 return (error); 175 } 176 177 int 178 linux_dma_tag_init(struct device *dev, u64 dma_mask) 179 { 180 struct linux_dma_priv *priv; 181 int error; 182 183 priv = dev->dma_priv; 184 185 if (priv->dmat) { 186 if (priv->dma_mask == dma_mask) 187 return (0); 188 189 bus_dma_tag_destroy(priv->dmat); 190 } 191 192 priv->dma_mask = dma_mask; 193 194 error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 195 1, 0, /* alignment, boundary */ 196 dma_mask, /* lowaddr */ 197 BUS_SPACE_MAXADDR, /* highaddr */ 198 NULL, NULL, /* filtfunc, filtfuncarg */ 199 BUS_SPACE_MAXSIZE, /* maxsize */ 200 1, /* nsegments */ 201 BUS_SPACE_MAXSIZE, /* maxsegsz */ 202 0, /* flags */ 203 NULL, NULL, /* lockfunc, lockfuncarg */ 204 &priv->dmat); 205 return (-error); 206 } 207 208 int 209 linux_dma_tag_init_coherent(struct device *dev, u64 dma_mask) 210 { 211 struct linux_dma_priv *priv; 212 int error; 213 214 priv = dev->dma_priv; 215 216 if (priv->dmat_coherent) { 217 if (priv->dma_coherent_mask == dma_mask) 218 return (0); 219 220 bus_dma_tag_destroy(priv->dmat_coherent); 221 } 222 223 priv->dma_coherent_mask = dma_mask; 224 225 error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 226 1, 0, /* alignment, boundary */ 227 dma_mask, /* lowaddr */ 228 BUS_SPACE_MAXADDR, /* highaddr */ 229 NULL, NULL, /* filtfunc, filtfuncarg */ 230 BUS_SPACE_MAXSIZE, /* maxsize */ 231 1, /* nsegments */ 232 BUS_SPACE_MAXSIZE, /* maxsegsz */ 233 0, /* flags */ 234 NULL, NULL, /* lockfunc, lockfuncarg */ 235 &priv->dmat_coherent); 236 return (-error); 237 } 238 239 static struct pci_driver * 240 linux_pci_find(device_t dev, const struct pci_device_id **idp) 241 { 242 const struct pci_device_id *id; 243 struct pci_driver *pdrv; 244 uint16_t vendor; 245 uint16_t device; 246 uint16_t subvendor; 247 uint16_t subdevice; 248 249 vendor = pci_get_vendor(dev); 250 device = pci_get_device(dev); 251 subvendor = pci_get_subvendor(dev); 252 subdevice = pci_get_subdevice(dev); 253 254 spin_lock(&pci_lock); 255 list_for_each_entry(pdrv, &pci_drivers, node) { 256 for (id = pdrv->id_table; id->vendor != 0; id++) { 257 if (vendor == id->vendor && 258 (PCI_ANY_ID == id->device || device == id->device) && 259 (PCI_ANY_ID == id->subvendor || subvendor == id->subvendor) && 260 (PCI_ANY_ID == id->subdevice || subdevice == id->subdevice)) { 261 *idp = id; 262 spin_unlock(&pci_lock); 263 return (pdrv); 264 } 265 } 266 } 267 spin_unlock(&pci_lock); 268 return (NULL); 269 } 270 271 struct pci_dev * 272 lkpi_pci_get_device(uint16_t vendor, uint16_t device, struct pci_dev *odev) 273 { 274 struct pci_dev *pdev; 275 276 KASSERT(odev == NULL, ("%s: odev argument not yet supported\n", __func__)); 277 278 spin_lock(&pci_lock); 279 list_for_each_entry(pdev, &pci_devices, links) { 280 if (pdev->vendor == vendor && pdev->device == device) 281 break; 282 } 283 spin_unlock(&pci_lock); 284 285 return (pdev); 286 } 287 288 static void 289 lkpi_pci_dev_release(struct device *dev) 290 { 291 292 lkpi_devres_release_free_list(dev); 293 spin_lock_destroy(&dev->devres_lock); 294 } 295 296 static void 297 lkpifill_pci_dev(device_t dev, struct pci_dev *pdev) 298 { 299 300 pdev->devfn = PCI_DEVFN(pci_get_slot(dev), pci_get_function(dev)); 301 pdev->vendor = pci_get_vendor(dev); 302 pdev->device = pci_get_device(dev); 303 pdev->subsystem_vendor = pci_get_subvendor(dev); 304 pdev->subsystem_device = pci_get_subdevice(dev); 305 pdev->class = pci_get_class(dev); 306 pdev->revision = pci_get_revid(dev); 307 pdev->path_name = kasprintf(GFP_KERNEL, "%04d:%02d:%02d.%d", 308 pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev), 309 pci_get_function(dev)); 310 pdev->bus = malloc(sizeof(*pdev->bus), M_DEVBUF, M_WAITOK | M_ZERO); 311 /* 312 * This should be the upstream bridge; pci_upstream_bridge() 313 * handles that case on demand as otherwise we'll shadow the 314 * entire PCI hierarchy. 315 */ 316 pdev->bus->self = pdev; 317 pdev->bus->number = pci_get_bus(dev); 318 pdev->bus->domain = pci_get_domain(dev); 319 pdev->dev.bsddev = dev; 320 pdev->dev.parent = &linux_root_device; 321 pdev->dev.release = lkpi_pci_dev_release; 322 INIT_LIST_HEAD(&pdev->dev.irqents); 323 324 if (pci_msi_count(dev) > 0) 325 pdev->msi_desc = malloc(pci_msi_count(dev) * 326 sizeof(*pdev->msi_desc), M_DEVBUF, M_WAITOK | M_ZERO); 327 328 kobject_init(&pdev->dev.kobj, &linux_dev_ktype); 329 kobject_set_name(&pdev->dev.kobj, device_get_nameunit(dev)); 330 kobject_add(&pdev->dev.kobj, &linux_root_device.kobj, 331 kobject_name(&pdev->dev.kobj)); 332 spin_lock_init(&pdev->dev.devres_lock); 333 INIT_LIST_HEAD(&pdev->dev.devres_head); 334 } 335 336 static void 337 lkpinew_pci_dev_release(struct device *dev) 338 { 339 struct pci_dev *pdev; 340 int i; 341 342 pdev = to_pci_dev(dev); 343 if (pdev->root != NULL) 344 pci_dev_put(pdev->root); 345 if (pdev->bus->self != pdev) 346 pci_dev_put(pdev->bus->self); 347 free(pdev->bus, M_DEVBUF); 348 if (pdev->msi_desc != NULL) { 349 for (i = pci_msi_count(pdev->dev.bsddev) - 1; i >= 0; i--) 350 free(pdev->msi_desc[i], M_DEVBUF); 351 free(pdev->msi_desc, M_DEVBUF); 352 } 353 kfree(pdev->path_name); 354 free(pdev, M_DEVBUF); 355 } 356 357 struct pci_dev * 358 lkpinew_pci_dev(device_t dev) 359 { 360 struct pci_dev *pdev; 361 362 pdev = malloc(sizeof(*pdev), M_DEVBUF, M_WAITOK|M_ZERO); 363 lkpifill_pci_dev(dev, pdev); 364 pdev->dev.release = lkpinew_pci_dev_release; 365 366 return (pdev); 367 } 368 369 struct pci_dev * 370 lkpi_pci_get_class(unsigned int class, struct pci_dev *from) 371 { 372 device_t dev; 373 device_t devfrom = NULL; 374 struct pci_dev *pdev; 375 376 if (from != NULL) 377 devfrom = from->dev.bsddev; 378 379 dev = pci_find_class_from(class >> 16, (class >> 8) & 0xFF, devfrom); 380 if (dev == NULL) 381 return (NULL); 382 383 pdev = lkpinew_pci_dev(dev); 384 return (pdev); 385 } 386 387 struct pci_dev * 388 lkpi_pci_get_domain_bus_and_slot(int domain, unsigned int bus, 389 unsigned int devfn) 390 { 391 device_t dev; 392 struct pci_dev *pdev; 393 394 dev = pci_find_dbsf(domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); 395 if (dev == NULL) 396 return (NULL); 397 398 pdev = lkpinew_pci_dev(dev); 399 return (pdev); 400 } 401 402 static int 403 linux_pci_probe(device_t dev) 404 { 405 const struct pci_device_id *id; 406 struct pci_driver *pdrv; 407 408 if ((pdrv = linux_pci_find(dev, &id)) == NULL) 409 return (ENXIO); 410 if (device_get_driver(dev) != &pdrv->bsddriver) 411 return (ENXIO); 412 device_set_desc(dev, pdrv->name); 413 414 /* Assume BSS initialized (should never return BUS_PROBE_SPECIFIC). */ 415 if (pdrv->bsd_probe_return == 0) 416 return (BUS_PROBE_DEFAULT); 417 else 418 return (pdrv->bsd_probe_return); 419 } 420 421 static int 422 linux_pci_attach(device_t dev) 423 { 424 const struct pci_device_id *id; 425 struct pci_driver *pdrv; 426 struct pci_dev *pdev; 427 428 pdrv = linux_pci_find(dev, &id); 429 pdev = device_get_softc(dev); 430 431 MPASS(pdrv != NULL); 432 MPASS(pdev != NULL); 433 434 return (linux_pci_attach_device(dev, pdrv, id, pdev)); 435 } 436 437 int 438 linux_pci_attach_device(device_t dev, struct pci_driver *pdrv, 439 const struct pci_device_id *id, struct pci_dev *pdev) 440 { 441 struct resource_list_entry *rle; 442 device_t parent; 443 uintptr_t rid; 444 int error; 445 bool isdrm; 446 447 linux_set_current(curthread); 448 449 parent = device_get_parent(dev); 450 isdrm = pdrv != NULL && pdrv->isdrm; 451 452 if (isdrm) { 453 struct pci_devinfo *dinfo; 454 455 dinfo = device_get_ivars(parent); 456 device_set_ivars(dev, dinfo); 457 } 458 459 lkpifill_pci_dev(dev, pdev); 460 if (isdrm) 461 PCI_GET_ID(device_get_parent(parent), parent, PCI_ID_RID, &rid); 462 else 463 PCI_GET_ID(parent, dev, PCI_ID_RID, &rid); 464 pdev->devfn = rid; 465 pdev->pdrv = pdrv; 466 rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 0, false); 467 if (rle != NULL) 468 pdev->dev.irq = rle->start; 469 else 470 pdev->dev.irq = LINUX_IRQ_INVALID; 471 pdev->irq = pdev->dev.irq; 472 error = linux_pdev_dma_init(pdev); 473 if (error) 474 goto out_dma_init; 475 476 TAILQ_INIT(&pdev->mmio); 477 478 spin_lock(&pci_lock); 479 list_add(&pdev->links, &pci_devices); 480 spin_unlock(&pci_lock); 481 482 if (pdrv != NULL) { 483 error = pdrv->probe(pdev, id); 484 if (error) 485 goto out_probe; 486 } 487 return (0); 488 489 out_probe: 490 free(pdev->bus, M_DEVBUF); 491 linux_pdev_dma_uninit(pdev); 492 out_dma_init: 493 spin_lock(&pci_lock); 494 list_del(&pdev->links); 495 spin_unlock(&pci_lock); 496 put_device(&pdev->dev); 497 return (-error); 498 } 499 500 static int 501 linux_pci_detach(device_t dev) 502 { 503 struct pci_dev *pdev; 504 505 pdev = device_get_softc(dev); 506 507 MPASS(pdev != NULL); 508 509 device_set_desc(dev, NULL); 510 511 return (linux_pci_detach_device(pdev)); 512 } 513 514 int 515 linux_pci_detach_device(struct pci_dev *pdev) 516 { 517 518 linux_set_current(curthread); 519 520 if (pdev->pdrv != NULL) 521 pdev->pdrv->remove(pdev); 522 523 if (pdev->root != NULL) 524 pci_dev_put(pdev->root); 525 free(pdev->bus, M_DEVBUF); 526 linux_pdev_dma_uninit(pdev); 527 528 spin_lock(&pci_lock); 529 list_del(&pdev->links); 530 spin_unlock(&pci_lock); 531 put_device(&pdev->dev); 532 533 return (0); 534 } 535 536 static int 537 lkpi_pci_disable_dev(struct device *dev) 538 { 539 540 (void) pci_disable_io(dev->bsddev, SYS_RES_MEMORY); 541 (void) pci_disable_io(dev->bsddev, SYS_RES_IOPORT); 542 return (0); 543 } 544 545 struct pci_devres * 546 lkpi_pci_devres_get_alloc(struct pci_dev *pdev) 547 { 548 struct pci_devres *dr; 549 550 dr = lkpi_devres_find(&pdev->dev, lkpi_pci_devres_release, NULL, NULL); 551 if (dr == NULL) { 552 dr = lkpi_devres_alloc(lkpi_pci_devres_release, sizeof(*dr), 553 GFP_KERNEL | __GFP_ZERO); 554 if (dr != NULL) 555 lkpi_devres_add(&pdev->dev, dr); 556 } 557 558 return (dr); 559 } 560 561 void 562 lkpi_pci_devres_release(struct device *dev, void *p) 563 { 564 struct pci_devres *dr; 565 struct pci_dev *pdev; 566 int bar; 567 568 pdev = to_pci_dev(dev); 569 dr = p; 570 571 if (pdev->msix_enabled) 572 lkpi_pci_disable_msix(pdev); 573 if (pdev->msi_enabled) 574 lkpi_pci_disable_msi(pdev); 575 576 if (dr->enable_io && lkpi_pci_disable_dev(dev) == 0) 577 dr->enable_io = false; 578 579 if (dr->region_mask == 0) 580 return; 581 for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) { 582 583 if ((dr->region_mask & (1 << bar)) == 0) 584 continue; 585 pci_release_region(pdev, bar); 586 } 587 } 588 589 struct pcim_iomap_devres * 590 lkpi_pcim_iomap_devres_find(struct pci_dev *pdev) 591 { 592 struct pcim_iomap_devres *dr; 593 594 dr = lkpi_devres_find(&pdev->dev, lkpi_pcim_iomap_table_release, 595 NULL, NULL); 596 if (dr == NULL) { 597 dr = lkpi_devres_alloc(lkpi_pcim_iomap_table_release, 598 sizeof(*dr), GFP_KERNEL | __GFP_ZERO); 599 if (dr != NULL) 600 lkpi_devres_add(&pdev->dev, dr); 601 } 602 603 if (dr == NULL) 604 device_printf(pdev->dev.bsddev, "%s: NULL\n", __func__); 605 606 return (dr); 607 } 608 609 void 610 lkpi_pcim_iomap_table_release(struct device *dev, void *p) 611 { 612 struct pcim_iomap_devres *dr; 613 struct pci_dev *pdev; 614 int bar; 615 616 dr = p; 617 pdev = to_pci_dev(dev); 618 for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) { 619 620 if (dr->mmio_table[bar] == NULL) 621 continue; 622 623 pci_iounmap(pdev, dr->mmio_table[bar]); 624 } 625 } 626 627 static int 628 linux_pci_suspend(device_t dev) 629 { 630 const struct dev_pm_ops *pmops; 631 struct pm_message pm = { }; 632 struct pci_dev *pdev; 633 int error; 634 635 error = 0; 636 linux_set_current(curthread); 637 pdev = device_get_softc(dev); 638 pmops = pdev->pdrv->driver.pm; 639 640 if (pdev->pdrv->suspend != NULL) 641 error = -pdev->pdrv->suspend(pdev, pm); 642 else if (pmops != NULL && pmops->suspend != NULL) { 643 error = -pmops->suspend(&pdev->dev); 644 if (error == 0 && pmops->suspend_late != NULL) 645 error = -pmops->suspend_late(&pdev->dev); 646 if (error == 0 && pmops->suspend_noirq != NULL) 647 error = -pmops->suspend_noirq(&pdev->dev); 648 } 649 return (error); 650 } 651 652 static int 653 linux_pci_resume(device_t dev) 654 { 655 const struct dev_pm_ops *pmops; 656 struct pci_dev *pdev; 657 int error; 658 659 error = 0; 660 linux_set_current(curthread); 661 pdev = device_get_softc(dev); 662 pmops = pdev->pdrv->driver.pm; 663 664 if (pdev->pdrv->resume != NULL) 665 error = -pdev->pdrv->resume(pdev); 666 else if (pmops != NULL && pmops->resume != NULL) { 667 if (pmops->resume_early != NULL) 668 error = -pmops->resume_early(&pdev->dev); 669 if (error == 0 && pmops->resume != NULL) 670 error = -pmops->resume(&pdev->dev); 671 } 672 return (error); 673 } 674 675 static int 676 linux_pci_shutdown(device_t dev) 677 { 678 struct pci_dev *pdev; 679 680 linux_set_current(curthread); 681 pdev = device_get_softc(dev); 682 if (pdev->pdrv->shutdown != NULL) 683 pdev->pdrv->shutdown(pdev); 684 return (0); 685 } 686 687 static int 688 linux_pci_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *pf_config) 689 { 690 struct pci_dev *pdev; 691 int error; 692 693 linux_set_current(curthread); 694 pdev = device_get_softc(dev); 695 if (pdev->pdrv->bsd_iov_init != NULL) 696 error = pdev->pdrv->bsd_iov_init(dev, num_vfs, pf_config); 697 else 698 error = EINVAL; 699 return (error); 700 } 701 702 static void 703 linux_pci_iov_uninit(device_t dev) 704 { 705 struct pci_dev *pdev; 706 707 linux_set_current(curthread); 708 pdev = device_get_softc(dev); 709 if (pdev->pdrv->bsd_iov_uninit != NULL) 710 pdev->pdrv->bsd_iov_uninit(dev); 711 } 712 713 static int 714 linux_pci_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *vf_config) 715 { 716 struct pci_dev *pdev; 717 int error; 718 719 linux_set_current(curthread); 720 pdev = device_get_softc(dev); 721 if (pdev->pdrv->bsd_iov_add_vf != NULL) 722 error = pdev->pdrv->bsd_iov_add_vf(dev, vfnum, vf_config); 723 else 724 error = EINVAL; 725 return (error); 726 } 727 728 static int 729 _linux_pci_register_driver(struct pci_driver *pdrv, devclass_t dc) 730 { 731 int error; 732 733 linux_set_current(curthread); 734 spin_lock(&pci_lock); 735 list_add(&pdrv->node, &pci_drivers); 736 spin_unlock(&pci_lock); 737 if (pdrv->bsddriver.name == NULL) 738 pdrv->bsddriver.name = pdrv->name; 739 pdrv->bsddriver.methods = pci_methods; 740 pdrv->bsddriver.size = sizeof(struct pci_dev); 741 742 bus_topo_lock(); 743 error = devclass_add_driver(dc, &pdrv->bsddriver, 744 BUS_PASS_DEFAULT, &pdrv->bsdclass); 745 bus_topo_unlock(); 746 return (-error); 747 } 748 749 int 750 linux_pci_register_driver(struct pci_driver *pdrv) 751 { 752 devclass_t dc; 753 754 dc = devclass_find("pci"); 755 if (dc == NULL) 756 return (-ENXIO); 757 pdrv->isdrm = false; 758 return (_linux_pci_register_driver(pdrv, dc)); 759 } 760 761 struct resource_list_entry * 762 linux_pci_reserve_bar(struct pci_dev *pdev, struct resource_list *rl, 763 int type, int rid) 764 { 765 device_t dev; 766 struct resource *res; 767 768 KASSERT(type == SYS_RES_IOPORT || type == SYS_RES_MEMORY, 769 ("trying to reserve non-BAR type %d", type)); 770 771 dev = pdev->pdrv != NULL && pdev->pdrv->isdrm ? 772 device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev; 773 res = pci_reserve_map(device_get_parent(dev), dev, type, &rid, 0, ~0, 774 1, 1, 0); 775 if (res == NULL) 776 return (NULL); 777 return (resource_list_find(rl, type, rid)); 778 } 779 780 unsigned long 781 pci_resource_start(struct pci_dev *pdev, int bar) 782 { 783 struct resource_list_entry *rle; 784 rman_res_t newstart; 785 device_t dev; 786 int error; 787 788 if ((rle = linux_pci_get_bar(pdev, bar, true)) == NULL) 789 return (0); 790 dev = pdev->pdrv != NULL && pdev->pdrv->isdrm ? 791 device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev; 792 error = bus_translate_resource(dev, rle->type, rle->start, &newstart); 793 if (error != 0) { 794 device_printf(pdev->dev.bsddev, 795 "translate of %#jx failed: %d\n", 796 (uintmax_t)rle->start, error); 797 return (0); 798 } 799 return (newstart); 800 } 801 802 unsigned long 803 pci_resource_len(struct pci_dev *pdev, int bar) 804 { 805 struct resource_list_entry *rle; 806 807 if ((rle = linux_pci_get_bar(pdev, bar, true)) == NULL) 808 return (0); 809 return (rle->count); 810 } 811 812 int 813 pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) 814 { 815 struct resource *res; 816 struct pci_devres *dr; 817 struct pci_mmio_region *mmio; 818 int rid; 819 int type; 820 821 type = pci_resource_type(pdev, bar); 822 if (type < 0) 823 return (-ENODEV); 824 rid = PCIR_BAR(bar); 825 res = bus_alloc_resource_any(pdev->dev.bsddev, type, &rid, 826 RF_ACTIVE|RF_SHAREABLE); 827 if (res == NULL) { 828 device_printf(pdev->dev.bsddev, "%s: failed to alloc " 829 "bar %d type %d rid %d\n", 830 __func__, bar, type, PCIR_BAR(bar)); 831 return (-ENODEV); 832 } 833 834 /* 835 * It seems there is an implicit devres tracking on these if the device 836 * is managed; otherwise the resources are not automatiaclly freed on 837 * FreeBSD/LinuxKPI tough they should be/are expected to be by Linux 838 * drivers. 839 */ 840 dr = lkpi_pci_devres_find(pdev); 841 if (dr != NULL) { 842 dr->region_mask |= (1 << bar); 843 dr->region_table[bar] = res; 844 } 845 846 /* Even if the device is not managed we need to track it for iomap. */ 847 mmio = malloc(sizeof(*mmio), M_DEVBUF, M_WAITOK | M_ZERO); 848 mmio->rid = PCIR_BAR(bar); 849 mmio->type = type; 850 mmio->res = res; 851 TAILQ_INSERT_TAIL(&pdev->mmio, mmio, next); 852 853 return (0); 854 } 855 856 struct resource * 857 _lkpi_pci_iomap(struct pci_dev *pdev, int bar, int mmio_size __unused) 858 { 859 struct pci_mmio_region *mmio, *p; 860 int type; 861 862 type = pci_resource_type(pdev, bar); 863 if (type < 0) { 864 device_printf(pdev->dev.bsddev, "%s: bar %d type %d\n", 865 __func__, bar, type); 866 return (NULL); 867 } 868 869 /* 870 * Check for duplicate mappings. 871 * This can happen if a driver calls pci_request_region() first. 872 */ 873 TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) { 874 if (mmio->type == type && mmio->rid == PCIR_BAR(bar)) { 875 return (mmio->res); 876 } 877 } 878 879 mmio = malloc(sizeof(*mmio), M_DEVBUF, M_WAITOK | M_ZERO); 880 mmio->rid = PCIR_BAR(bar); 881 mmio->type = type; 882 mmio->res = bus_alloc_resource_any(pdev->dev.bsddev, mmio->type, 883 &mmio->rid, RF_ACTIVE|RF_SHAREABLE); 884 if (mmio->res == NULL) { 885 device_printf(pdev->dev.bsddev, "%s: failed to alloc " 886 "bar %d type %d rid %d\n", 887 __func__, bar, type, PCIR_BAR(bar)); 888 free(mmio, M_DEVBUF); 889 return (NULL); 890 } 891 TAILQ_INSERT_TAIL(&pdev->mmio, mmio, next); 892 893 return (mmio->res); 894 } 895 896 int 897 linux_pci_register_drm_driver(struct pci_driver *pdrv) 898 { 899 devclass_t dc; 900 901 dc = devclass_create("vgapci"); 902 if (dc == NULL) 903 return (-ENXIO); 904 pdrv->isdrm = true; 905 pdrv->name = "drmn"; 906 return (_linux_pci_register_driver(pdrv, dc)); 907 } 908 909 void 910 linux_pci_unregister_driver(struct pci_driver *pdrv) 911 { 912 devclass_t bus; 913 914 bus = devclass_find("pci"); 915 916 spin_lock(&pci_lock); 917 list_del(&pdrv->node); 918 spin_unlock(&pci_lock); 919 bus_topo_lock(); 920 if (bus != NULL) 921 devclass_delete_driver(bus, &pdrv->bsddriver); 922 bus_topo_unlock(); 923 } 924 925 void 926 linux_pci_unregister_drm_driver(struct pci_driver *pdrv) 927 { 928 devclass_t bus; 929 930 bus = devclass_find("vgapci"); 931 932 spin_lock(&pci_lock); 933 list_del(&pdrv->node); 934 spin_unlock(&pci_lock); 935 bus_topo_lock(); 936 if (bus != NULL) 937 devclass_delete_driver(bus, &pdrv->bsddriver); 938 bus_topo_unlock(); 939 } 940 941 int 942 pci_alloc_irq_vectors(struct pci_dev *pdev, int minv, int maxv, 943 unsigned int flags) 944 { 945 int error; 946 947 if (flags & PCI_IRQ_MSIX) { 948 struct msix_entry *entries; 949 int i; 950 951 entries = kcalloc(maxv, sizeof(*entries), GFP_KERNEL); 952 if (entries == NULL) { 953 error = -ENOMEM; 954 goto out; 955 } 956 for (i = 0; i < maxv; ++i) 957 entries[i].entry = i; 958 error = pci_enable_msix(pdev, entries, maxv); 959 out: 960 kfree(entries); 961 if (error == 0 && pdev->msix_enabled) 962 return (pdev->dev.irq_end - pdev->dev.irq_start); 963 } 964 if (flags & PCI_IRQ_MSI) { 965 if (pci_msi_count(pdev->dev.bsddev) < minv) 966 return (-ENOSPC); 967 error = _lkpi_pci_enable_msi_range(pdev, minv, maxv); 968 if (error == 0 && pdev->msi_enabled) 969 return (pdev->dev.irq_end - pdev->dev.irq_start); 970 } 971 if (flags & PCI_IRQ_LEGACY) { 972 if (pdev->irq) 973 return (1); 974 } 975 976 return (-EINVAL); 977 } 978 979 struct msi_desc * 980 lkpi_pci_msi_desc_alloc(int irq) 981 { 982 struct device *dev; 983 struct pci_dev *pdev; 984 struct msi_desc *desc; 985 struct pci_devinfo *dinfo; 986 struct pcicfg_msi *msi; 987 int vec; 988 989 dev = linux_pci_find_irq_dev(irq); 990 if (dev == NULL) 991 return (NULL); 992 993 pdev = to_pci_dev(dev); 994 995 if (pdev->msi_desc == NULL) 996 return (NULL); 997 998 if (irq < pdev->dev.irq_start || irq >= pdev->dev.irq_end) 999 return (NULL); 1000 1001 vec = pdev->dev.irq_start - irq; 1002 1003 if (pdev->msi_desc[vec] != NULL) 1004 return (pdev->msi_desc[vec]); 1005 1006 dinfo = device_get_ivars(dev->bsddev); 1007 msi = &dinfo->cfg.msi; 1008 1009 desc = malloc(sizeof(*desc), M_DEVBUF, M_WAITOK | M_ZERO); 1010 1011 desc->pci.msi_attrib.is_64 = 1012 (msi->msi_ctrl & PCIM_MSICTRL_64BIT) ? true : false; 1013 desc->msg.data = msi->msi_data; 1014 1015 pdev->msi_desc[vec] = desc; 1016 1017 return (desc); 1018 } 1019 1020 bool 1021 pci_device_is_present(struct pci_dev *pdev) 1022 { 1023 device_t dev; 1024 1025 dev = pdev->dev.bsddev; 1026 1027 return (bus_child_present(dev)); 1028 } 1029 1030 CTASSERT(sizeof(dma_addr_t) <= sizeof(uint64_t)); 1031 1032 struct linux_dma_obj { 1033 void *vaddr; 1034 uint64_t dma_addr; 1035 bus_dmamap_t dmamap; 1036 bus_dma_tag_t dmat; 1037 }; 1038 1039 static uma_zone_t linux_dma_trie_zone; 1040 static uma_zone_t linux_dma_obj_zone; 1041 1042 static void 1043 linux_dma_init(void *arg) 1044 { 1045 1046 linux_dma_trie_zone = uma_zcreate("linux_dma_pctrie", 1047 pctrie_node_size(), NULL, NULL, pctrie_zone_init, NULL, 1048 UMA_ALIGN_PTR, 0); 1049 linux_dma_obj_zone = uma_zcreate("linux_dma_object", 1050 sizeof(struct linux_dma_obj), NULL, NULL, NULL, NULL, 1051 UMA_ALIGN_PTR, 0); 1052 lkpi_pci_nseg1_fail = counter_u64_alloc(M_WAITOK); 1053 } 1054 SYSINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_init, NULL); 1055 1056 static void 1057 linux_dma_uninit(void *arg) 1058 { 1059 1060 counter_u64_free(lkpi_pci_nseg1_fail); 1061 uma_zdestroy(linux_dma_obj_zone); 1062 uma_zdestroy(linux_dma_trie_zone); 1063 } 1064 SYSUNINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_uninit, NULL); 1065 1066 static void * 1067 linux_dma_trie_alloc(struct pctrie *ptree) 1068 { 1069 1070 return (uma_zalloc(linux_dma_trie_zone, M_NOWAIT)); 1071 } 1072 1073 static void 1074 linux_dma_trie_free(struct pctrie *ptree, void *node) 1075 { 1076 1077 uma_zfree(linux_dma_trie_zone, node); 1078 } 1079 1080 PCTRIE_DEFINE(LINUX_DMA, linux_dma_obj, dma_addr, linux_dma_trie_alloc, 1081 linux_dma_trie_free); 1082 1083 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 1084 static dma_addr_t 1085 linux_dma_map_phys_common(struct device *dev, vm_paddr_t phys, size_t len, 1086 bus_dma_tag_t dmat) 1087 { 1088 struct linux_dma_priv *priv; 1089 struct linux_dma_obj *obj; 1090 int error, nseg; 1091 bus_dma_segment_t seg; 1092 1093 priv = dev->dma_priv; 1094 1095 /* 1096 * If the resultant mapping will be entirely 1:1 with the 1097 * physical address, short-circuit the remainder of the 1098 * bus_dma API. This avoids tracking collisions in the pctrie 1099 * with the additional benefit of reducing overhead. 1100 */ 1101 if (bus_dma_id_mapped(dmat, phys, len)) 1102 return (phys); 1103 1104 obj = uma_zalloc(linux_dma_obj_zone, M_NOWAIT); 1105 if (obj == NULL) { 1106 return (0); 1107 } 1108 obj->dmat = dmat; 1109 1110 DMA_PRIV_LOCK(priv); 1111 if (bus_dmamap_create(obj->dmat, 0, &obj->dmamap) != 0) { 1112 DMA_PRIV_UNLOCK(priv); 1113 uma_zfree(linux_dma_obj_zone, obj); 1114 return (0); 1115 } 1116 1117 nseg = -1; 1118 if (_bus_dmamap_load_phys(obj->dmat, obj->dmamap, phys, len, 1119 BUS_DMA_NOWAIT, &seg, &nseg) != 0) { 1120 bus_dmamap_destroy(obj->dmat, obj->dmamap); 1121 DMA_PRIV_UNLOCK(priv); 1122 uma_zfree(linux_dma_obj_zone, obj); 1123 counter_u64_add(lkpi_pci_nseg1_fail, 1); 1124 if (linuxkpi_debug) 1125 dump_stack(); 1126 return (0); 1127 } 1128 1129 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); 1130 obj->dma_addr = seg.ds_addr; 1131 1132 error = LINUX_DMA_PCTRIE_INSERT(&priv->ptree, obj); 1133 if (error != 0) { 1134 bus_dmamap_unload(obj->dmat, obj->dmamap); 1135 bus_dmamap_destroy(obj->dmat, obj->dmamap); 1136 DMA_PRIV_UNLOCK(priv); 1137 uma_zfree(linux_dma_obj_zone, obj); 1138 return (0); 1139 } 1140 DMA_PRIV_UNLOCK(priv); 1141 return (obj->dma_addr); 1142 } 1143 #else 1144 static dma_addr_t 1145 linux_dma_map_phys_common(struct device *dev __unused, vm_paddr_t phys, 1146 size_t len __unused, bus_dma_tag_t dmat __unused) 1147 { 1148 return (phys); 1149 } 1150 #endif 1151 1152 dma_addr_t 1153 linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len) 1154 { 1155 struct linux_dma_priv *priv; 1156 1157 priv = dev->dma_priv; 1158 return (linux_dma_map_phys_common(dev, phys, len, priv->dmat)); 1159 } 1160 1161 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 1162 void 1163 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len) 1164 { 1165 struct linux_dma_priv *priv; 1166 struct linux_dma_obj *obj; 1167 1168 priv = dev->dma_priv; 1169 1170 if (pctrie_is_empty(&priv->ptree)) 1171 return; 1172 1173 DMA_PRIV_LOCK(priv); 1174 obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr); 1175 if (obj == NULL) { 1176 DMA_PRIV_UNLOCK(priv); 1177 return; 1178 } 1179 LINUX_DMA_PCTRIE_REMOVE(&priv->ptree, dma_addr); 1180 bus_dmamap_unload(obj->dmat, obj->dmamap); 1181 bus_dmamap_destroy(obj->dmat, obj->dmamap); 1182 DMA_PRIV_UNLOCK(priv); 1183 1184 uma_zfree(linux_dma_obj_zone, obj); 1185 } 1186 #else 1187 void 1188 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len) 1189 { 1190 } 1191 #endif 1192 1193 void * 1194 linux_dma_alloc_coherent(struct device *dev, size_t size, 1195 dma_addr_t *dma_handle, gfp_t flag) 1196 { 1197 struct linux_dma_priv *priv; 1198 vm_paddr_t high; 1199 size_t align; 1200 void *mem; 1201 1202 if (dev == NULL || dev->dma_priv == NULL) { 1203 *dma_handle = 0; 1204 return (NULL); 1205 } 1206 priv = dev->dma_priv; 1207 if (priv->dma_coherent_mask) 1208 high = priv->dma_coherent_mask; 1209 else 1210 /* Coherent is lower 32bit only by default in Linux. */ 1211 high = BUS_SPACE_MAXADDR_32BIT; 1212 align = PAGE_SIZE << get_order(size); 1213 /* Always zero the allocation. */ 1214 flag |= M_ZERO; 1215 mem = kmem_alloc_contig(size, flag & GFP_NATIVE_MASK, 0, high, 1216 align, 0, VM_MEMATTR_DEFAULT); 1217 if (mem != NULL) { 1218 *dma_handle = linux_dma_map_phys_common(dev, vtophys(mem), size, 1219 priv->dmat_coherent); 1220 if (*dma_handle == 0) { 1221 kmem_free(mem, size); 1222 mem = NULL; 1223 } 1224 } else { 1225 *dma_handle = 0; 1226 } 1227 return (mem); 1228 } 1229 1230 struct lkpi_devres_dmam_coherent { 1231 size_t size; 1232 dma_addr_t *handle; 1233 void *mem; 1234 }; 1235 1236 static void 1237 lkpi_dmam_free_coherent(struct device *dev, void *p) 1238 { 1239 struct lkpi_devres_dmam_coherent *dr; 1240 1241 dr = p; 1242 dma_free_coherent(dev, dr->size, dr->mem, *dr->handle); 1243 } 1244 1245 void * 1246 linuxkpi_dmam_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 1247 gfp_t flag) 1248 { 1249 struct lkpi_devres_dmam_coherent *dr; 1250 1251 dr = lkpi_devres_alloc(lkpi_dmam_free_coherent, 1252 sizeof(*dr), GFP_KERNEL | __GFP_ZERO); 1253 1254 if (dr == NULL) 1255 return (NULL); 1256 1257 dr->size = size; 1258 dr->mem = linux_dma_alloc_coherent(dev, size, dma_handle, flag); 1259 dr->handle = dma_handle; 1260 if (dr->mem == NULL) { 1261 lkpi_devres_free(dr); 1262 return (NULL); 1263 } 1264 1265 lkpi_devres_add(dev, dr); 1266 return (dr->mem); 1267 } 1268 1269 void 1270 linuxkpi_dma_sync(struct device *dev, dma_addr_t dma_addr, size_t size, 1271 bus_dmasync_op_t op) 1272 { 1273 struct linux_dma_priv *priv; 1274 struct linux_dma_obj *obj; 1275 1276 priv = dev->dma_priv; 1277 1278 if (pctrie_is_empty(&priv->ptree)) 1279 return; 1280 1281 DMA_PRIV_LOCK(priv); 1282 obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr); 1283 if (obj == NULL) { 1284 DMA_PRIV_UNLOCK(priv); 1285 return; 1286 } 1287 1288 bus_dmamap_sync(obj->dmat, obj->dmamap, op); 1289 DMA_PRIV_UNLOCK(priv); 1290 } 1291 1292 int 1293 linux_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents, 1294 enum dma_data_direction direction, unsigned long attrs __unused) 1295 { 1296 struct linux_dma_priv *priv; 1297 struct scatterlist *sg; 1298 int i, nseg; 1299 bus_dma_segment_t seg; 1300 1301 priv = dev->dma_priv; 1302 1303 DMA_PRIV_LOCK(priv); 1304 1305 /* create common DMA map in the first S/G entry */ 1306 if (bus_dmamap_create(priv->dmat, 0, &sgl->dma_map) != 0) { 1307 DMA_PRIV_UNLOCK(priv); 1308 return (0); 1309 } 1310 1311 /* load all S/G list entries */ 1312 for_each_sg(sgl, sg, nents, i) { 1313 nseg = -1; 1314 if (_bus_dmamap_load_phys(priv->dmat, sgl->dma_map, 1315 sg_phys(sg), sg->length, BUS_DMA_NOWAIT, 1316 &seg, &nseg) != 0) { 1317 bus_dmamap_unload(priv->dmat, sgl->dma_map); 1318 bus_dmamap_destroy(priv->dmat, sgl->dma_map); 1319 DMA_PRIV_UNLOCK(priv); 1320 return (0); 1321 } 1322 KASSERT(nseg == 0, 1323 ("More than one segment (nseg=%d)", nseg + 1)); 1324 1325 sg_dma_address(sg) = seg.ds_addr; 1326 } 1327 1328 switch (direction) { 1329 case DMA_BIDIRECTIONAL: 1330 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREWRITE); 1331 break; 1332 case DMA_TO_DEVICE: 1333 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREREAD); 1334 break; 1335 case DMA_FROM_DEVICE: 1336 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREWRITE); 1337 break; 1338 default: 1339 break; 1340 } 1341 1342 DMA_PRIV_UNLOCK(priv); 1343 1344 return (nents); 1345 } 1346 1347 void 1348 linux_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, 1349 int nents __unused, enum dma_data_direction direction, 1350 unsigned long attrs __unused) 1351 { 1352 struct linux_dma_priv *priv; 1353 1354 priv = dev->dma_priv; 1355 1356 DMA_PRIV_LOCK(priv); 1357 1358 switch (direction) { 1359 case DMA_BIDIRECTIONAL: 1360 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTREAD); 1361 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREREAD); 1362 break; 1363 case DMA_TO_DEVICE: 1364 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTWRITE); 1365 break; 1366 case DMA_FROM_DEVICE: 1367 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTREAD); 1368 break; 1369 default: 1370 break; 1371 } 1372 1373 bus_dmamap_unload(priv->dmat, sgl->dma_map); 1374 bus_dmamap_destroy(priv->dmat, sgl->dma_map); 1375 DMA_PRIV_UNLOCK(priv); 1376 } 1377 1378 struct dma_pool { 1379 struct device *pool_device; 1380 uma_zone_t pool_zone; 1381 struct mtx pool_lock; 1382 bus_dma_tag_t pool_dmat; 1383 size_t pool_entry_size; 1384 struct pctrie pool_ptree; 1385 }; 1386 1387 #define DMA_POOL_LOCK(pool) mtx_lock(&(pool)->pool_lock) 1388 #define DMA_POOL_UNLOCK(pool) mtx_unlock(&(pool)->pool_lock) 1389 1390 static inline int 1391 dma_pool_obj_ctor(void *mem, int size, void *arg, int flags) 1392 { 1393 struct linux_dma_obj *obj = mem; 1394 struct dma_pool *pool = arg; 1395 int error, nseg; 1396 bus_dma_segment_t seg; 1397 1398 nseg = -1; 1399 DMA_POOL_LOCK(pool); 1400 error = _bus_dmamap_load_phys(pool->pool_dmat, obj->dmamap, 1401 vtophys(obj->vaddr), pool->pool_entry_size, BUS_DMA_NOWAIT, 1402 &seg, &nseg); 1403 DMA_POOL_UNLOCK(pool); 1404 if (error != 0) { 1405 return (error); 1406 } 1407 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); 1408 obj->dma_addr = seg.ds_addr; 1409 1410 return (0); 1411 } 1412 1413 static void 1414 dma_pool_obj_dtor(void *mem, int size, void *arg) 1415 { 1416 struct linux_dma_obj *obj = mem; 1417 struct dma_pool *pool = arg; 1418 1419 DMA_POOL_LOCK(pool); 1420 bus_dmamap_unload(pool->pool_dmat, obj->dmamap); 1421 DMA_POOL_UNLOCK(pool); 1422 } 1423 1424 static int 1425 dma_pool_obj_import(void *arg, void **store, int count, int domain __unused, 1426 int flags) 1427 { 1428 struct dma_pool *pool = arg; 1429 struct linux_dma_obj *obj; 1430 int error, i; 1431 1432 for (i = 0; i < count; i++) { 1433 obj = uma_zalloc(linux_dma_obj_zone, flags); 1434 if (obj == NULL) 1435 break; 1436 1437 error = bus_dmamem_alloc(pool->pool_dmat, &obj->vaddr, 1438 BUS_DMA_NOWAIT, &obj->dmamap); 1439 if (error!= 0) { 1440 uma_zfree(linux_dma_obj_zone, obj); 1441 break; 1442 } 1443 1444 store[i] = obj; 1445 } 1446 1447 return (i); 1448 } 1449 1450 static void 1451 dma_pool_obj_release(void *arg, void **store, int count) 1452 { 1453 struct dma_pool *pool = arg; 1454 struct linux_dma_obj *obj; 1455 int i; 1456 1457 for (i = 0; i < count; i++) { 1458 obj = store[i]; 1459 bus_dmamem_free(pool->pool_dmat, obj->vaddr, obj->dmamap); 1460 uma_zfree(linux_dma_obj_zone, obj); 1461 } 1462 } 1463 1464 struct dma_pool * 1465 linux_dma_pool_create(char *name, struct device *dev, size_t size, 1466 size_t align, size_t boundary) 1467 { 1468 struct linux_dma_priv *priv; 1469 struct dma_pool *pool; 1470 1471 priv = dev->dma_priv; 1472 1473 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 1474 pool->pool_device = dev; 1475 pool->pool_entry_size = size; 1476 1477 if (bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 1478 align, boundary, /* alignment, boundary */ 1479 priv->dma_mask, /* lowaddr */ 1480 BUS_SPACE_MAXADDR, /* highaddr */ 1481 NULL, NULL, /* filtfunc, filtfuncarg */ 1482 size, /* maxsize */ 1483 1, /* nsegments */ 1484 size, /* maxsegsz */ 1485 0, /* flags */ 1486 NULL, NULL, /* lockfunc, lockfuncarg */ 1487 &pool->pool_dmat)) { 1488 kfree(pool); 1489 return (NULL); 1490 } 1491 1492 pool->pool_zone = uma_zcache_create(name, -1, dma_pool_obj_ctor, 1493 dma_pool_obj_dtor, NULL, NULL, dma_pool_obj_import, 1494 dma_pool_obj_release, pool, 0); 1495 1496 mtx_init(&pool->pool_lock, "lkpi-dma-pool", NULL, MTX_DEF); 1497 pctrie_init(&pool->pool_ptree); 1498 1499 return (pool); 1500 } 1501 1502 void 1503 linux_dma_pool_destroy(struct dma_pool *pool) 1504 { 1505 1506 uma_zdestroy(pool->pool_zone); 1507 bus_dma_tag_destroy(pool->pool_dmat); 1508 mtx_destroy(&pool->pool_lock); 1509 kfree(pool); 1510 } 1511 1512 void 1513 lkpi_dmam_pool_destroy(struct device *dev, void *p) 1514 { 1515 struct dma_pool *pool; 1516 1517 pool = *(struct dma_pool **)p; 1518 LINUX_DMA_PCTRIE_RECLAIM(&pool->pool_ptree); 1519 linux_dma_pool_destroy(pool); 1520 } 1521 1522 void * 1523 linux_dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, 1524 dma_addr_t *handle) 1525 { 1526 struct linux_dma_obj *obj; 1527 1528 obj = uma_zalloc_arg(pool->pool_zone, pool, mem_flags & GFP_NATIVE_MASK); 1529 if (obj == NULL) 1530 return (NULL); 1531 1532 DMA_POOL_LOCK(pool); 1533 if (LINUX_DMA_PCTRIE_INSERT(&pool->pool_ptree, obj) != 0) { 1534 DMA_POOL_UNLOCK(pool); 1535 uma_zfree_arg(pool->pool_zone, obj, pool); 1536 return (NULL); 1537 } 1538 DMA_POOL_UNLOCK(pool); 1539 1540 *handle = obj->dma_addr; 1541 return (obj->vaddr); 1542 } 1543 1544 void 1545 linux_dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma_addr) 1546 { 1547 struct linux_dma_obj *obj; 1548 1549 DMA_POOL_LOCK(pool); 1550 obj = LINUX_DMA_PCTRIE_LOOKUP(&pool->pool_ptree, dma_addr); 1551 if (obj == NULL) { 1552 DMA_POOL_UNLOCK(pool); 1553 return; 1554 } 1555 LINUX_DMA_PCTRIE_REMOVE(&pool->pool_ptree, dma_addr); 1556 DMA_POOL_UNLOCK(pool); 1557 1558 uma_zfree_arg(pool->pool_zone, obj, pool); 1559 } 1560 1561 static int 1562 linux_backlight_get_status(device_t dev, struct backlight_props *props) 1563 { 1564 struct pci_dev *pdev; 1565 1566 linux_set_current(curthread); 1567 pdev = device_get_softc(dev); 1568 1569 props->brightness = pdev->dev.bd->props.brightness; 1570 props->brightness = props->brightness * 100 / pdev->dev.bd->props.max_brightness; 1571 props->nlevels = 0; 1572 1573 return (0); 1574 } 1575 1576 static int 1577 linux_backlight_get_info(device_t dev, struct backlight_info *info) 1578 { 1579 struct pci_dev *pdev; 1580 1581 linux_set_current(curthread); 1582 pdev = device_get_softc(dev); 1583 1584 info->type = BACKLIGHT_TYPE_PANEL; 1585 strlcpy(info->name, pdev->dev.bd->name, BACKLIGHTMAXNAMELENGTH); 1586 return (0); 1587 } 1588 1589 static int 1590 linux_backlight_update_status(device_t dev, struct backlight_props *props) 1591 { 1592 struct pci_dev *pdev; 1593 1594 linux_set_current(curthread); 1595 pdev = device_get_softc(dev); 1596 1597 pdev->dev.bd->props.brightness = pdev->dev.bd->props.max_brightness * 1598 props->brightness / 100; 1599 pdev->dev.bd->props.power = props->brightness == 0 ? 1600 4/* FB_BLANK_POWERDOWN */ : 0/* FB_BLANK_UNBLANK */; 1601 return (pdev->dev.bd->ops->update_status(pdev->dev.bd)); 1602 } 1603 1604 struct backlight_device * 1605 linux_backlight_device_register(const char *name, struct device *dev, 1606 void *data, const struct backlight_ops *ops, struct backlight_properties *props) 1607 { 1608 1609 dev->bd = malloc(sizeof(*dev->bd), M_DEVBUF, M_WAITOK | M_ZERO); 1610 dev->bd->ops = ops; 1611 dev->bd->props.type = props->type; 1612 dev->bd->props.max_brightness = props->max_brightness; 1613 dev->bd->props.brightness = props->brightness; 1614 dev->bd->props.power = props->power; 1615 dev->bd->data = data; 1616 dev->bd->dev = dev; 1617 dev->bd->name = strdup(name, M_DEVBUF); 1618 1619 dev->backlight_dev = backlight_register(name, dev->bsddev); 1620 1621 return (dev->bd); 1622 } 1623 1624 void 1625 linux_backlight_device_unregister(struct backlight_device *bd) 1626 { 1627 1628 backlight_destroy(bd->dev->backlight_dev); 1629 free(bd->name, M_DEVBUF); 1630 free(bd, M_DEVBUF); 1631 } 1632