1 /*- 2 * Copyright (c) 2015-2016 Mellanox Technologies, Ltd. 3 * All rights reserved. 4 * Copyright (c) 2020-2022 The FreeBSD Foundation 5 * 6 * Portions of this software were developed by Björn Zeeb 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice unmodified, this list of conditions, and the following 14 * disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/bus.h> 35 #include <sys/malloc.h> 36 #include <sys/kernel.h> 37 #include <sys/sysctl.h> 38 #include <sys/lock.h> 39 #include <sys/mutex.h> 40 #include <sys/fcntl.h> 41 #include <sys/file.h> 42 #include <sys/filio.h> 43 #include <sys/pciio.h> 44 #include <sys/pctrie.h> 45 #include <sys/rwlock.h> 46 47 #include <vm/vm.h> 48 #include <vm/pmap.h> 49 50 #include <machine/stdarg.h> 51 52 #include <dev/pci/pcivar.h> 53 #include <dev/pci/pci_private.h> 54 #include <dev/pci/pci_iov.h> 55 #include <dev/backlight/backlight.h> 56 57 #include <linux/kernel.h> 58 #include <linux/kobject.h> 59 #include <linux/device.h> 60 #include <linux/slab.h> 61 #include <linux/module.h> 62 #include <linux/cdev.h> 63 #include <linux/file.h> 64 #include <linux/sysfs.h> 65 #include <linux/mm.h> 66 #include <linux/io.h> 67 #include <linux/vmalloc.h> 68 #include <linux/pci.h> 69 #include <linux/compat.h> 70 71 #include <linux/backlight.h> 72 73 #include "backlight_if.h" 74 #include "pcib_if.h" 75 76 /* Undef the linux function macro defined in linux/pci.h */ 77 #undef pci_get_class 78 79 extern int linuxkpi_debug; 80 81 SYSCTL_DECL(_compat_linuxkpi); 82 83 static counter_u64_t lkpi_pci_nseg1_fail; 84 SYSCTL_COUNTER_U64(_compat_linuxkpi, OID_AUTO, lkpi_pci_nseg1_fail, CTLFLAG_RD, 85 &lkpi_pci_nseg1_fail, "Count of busdma mapping failures of single-segment"); 86 87 static device_probe_t linux_pci_probe; 88 static device_attach_t linux_pci_attach; 89 static device_detach_t linux_pci_detach; 90 static device_suspend_t linux_pci_suspend; 91 static device_resume_t linux_pci_resume; 92 static device_shutdown_t linux_pci_shutdown; 93 static pci_iov_init_t linux_pci_iov_init; 94 static pci_iov_uninit_t linux_pci_iov_uninit; 95 static pci_iov_add_vf_t linux_pci_iov_add_vf; 96 static int linux_backlight_get_status(device_t dev, struct backlight_props *props); 97 static int linux_backlight_update_status(device_t dev, struct backlight_props *props); 98 static int linux_backlight_get_info(device_t dev, struct backlight_info *info); 99 100 static device_method_t pci_methods[] = { 101 DEVMETHOD(device_probe, linux_pci_probe), 102 DEVMETHOD(device_attach, linux_pci_attach), 103 DEVMETHOD(device_detach, linux_pci_detach), 104 DEVMETHOD(device_suspend, linux_pci_suspend), 105 DEVMETHOD(device_resume, linux_pci_resume), 106 DEVMETHOD(device_shutdown, linux_pci_shutdown), 107 DEVMETHOD(pci_iov_init, linux_pci_iov_init), 108 DEVMETHOD(pci_iov_uninit, linux_pci_iov_uninit), 109 DEVMETHOD(pci_iov_add_vf, linux_pci_iov_add_vf), 110 111 /* backlight interface */ 112 DEVMETHOD(backlight_update_status, linux_backlight_update_status), 113 DEVMETHOD(backlight_get_status, linux_backlight_get_status), 114 DEVMETHOD(backlight_get_info, linux_backlight_get_info), 115 DEVMETHOD_END 116 }; 117 118 const char *pci_power_names[] = { 119 "UNKNOWN", "D0", "D1", "D2", "D3hot", "D3cold" 120 }; 121 122 struct linux_dma_priv { 123 uint64_t dma_mask; 124 bus_dma_tag_t dmat; 125 uint64_t dma_coherent_mask; 126 bus_dma_tag_t dmat_coherent; 127 struct mtx lock; 128 struct pctrie ptree; 129 }; 130 #define DMA_PRIV_LOCK(priv) mtx_lock(&(priv)->lock) 131 #define DMA_PRIV_UNLOCK(priv) mtx_unlock(&(priv)->lock) 132 133 static int 134 linux_pdev_dma_uninit(struct pci_dev *pdev) 135 { 136 struct linux_dma_priv *priv; 137 138 priv = pdev->dev.dma_priv; 139 if (priv->dmat) 140 bus_dma_tag_destroy(priv->dmat); 141 if (priv->dmat_coherent) 142 bus_dma_tag_destroy(priv->dmat_coherent); 143 mtx_destroy(&priv->lock); 144 pdev->dev.dma_priv = NULL; 145 free(priv, M_DEVBUF); 146 return (0); 147 } 148 149 static int 150 linux_pdev_dma_init(struct pci_dev *pdev) 151 { 152 struct linux_dma_priv *priv; 153 int error; 154 155 priv = malloc(sizeof(*priv), M_DEVBUF, M_WAITOK | M_ZERO); 156 157 mtx_init(&priv->lock, "lkpi-priv-dma", NULL, MTX_DEF); 158 pctrie_init(&priv->ptree); 159 160 pdev->dev.dma_priv = priv; 161 162 /* Create a default DMA tags. */ 163 error = linux_dma_tag_init(&pdev->dev, DMA_BIT_MASK(64)); 164 if (error != 0) 165 goto err; 166 /* Coherent is lower 32bit only by default in Linux. */ 167 error = linux_dma_tag_init_coherent(&pdev->dev, DMA_BIT_MASK(32)); 168 if (error != 0) 169 goto err; 170 171 return (error); 172 173 err: 174 linux_pdev_dma_uninit(pdev); 175 return (error); 176 } 177 178 int 179 linux_dma_tag_init(struct device *dev, u64 dma_mask) 180 { 181 struct linux_dma_priv *priv; 182 int error; 183 184 priv = dev->dma_priv; 185 186 if (priv->dmat) { 187 if (priv->dma_mask == dma_mask) 188 return (0); 189 190 bus_dma_tag_destroy(priv->dmat); 191 } 192 193 priv->dma_mask = dma_mask; 194 195 error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 196 1, 0, /* alignment, boundary */ 197 dma_mask, /* lowaddr */ 198 BUS_SPACE_MAXADDR, /* highaddr */ 199 NULL, NULL, /* filtfunc, filtfuncarg */ 200 BUS_SPACE_MAXSIZE, /* maxsize */ 201 1, /* nsegments */ 202 BUS_SPACE_MAXSIZE, /* maxsegsz */ 203 0, /* flags */ 204 NULL, NULL, /* lockfunc, lockfuncarg */ 205 &priv->dmat); 206 return (-error); 207 } 208 209 int 210 linux_dma_tag_init_coherent(struct device *dev, u64 dma_mask) 211 { 212 struct linux_dma_priv *priv; 213 int error; 214 215 priv = dev->dma_priv; 216 217 if (priv->dmat_coherent) { 218 if (priv->dma_coherent_mask == dma_mask) 219 return (0); 220 221 bus_dma_tag_destroy(priv->dmat_coherent); 222 } 223 224 priv->dma_coherent_mask = dma_mask; 225 226 error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 227 1, 0, /* alignment, boundary */ 228 dma_mask, /* lowaddr */ 229 BUS_SPACE_MAXADDR, /* highaddr */ 230 NULL, NULL, /* filtfunc, filtfuncarg */ 231 BUS_SPACE_MAXSIZE, /* maxsize */ 232 1, /* nsegments */ 233 BUS_SPACE_MAXSIZE, /* maxsegsz */ 234 0, /* flags */ 235 NULL, NULL, /* lockfunc, lockfuncarg */ 236 &priv->dmat_coherent); 237 return (-error); 238 } 239 240 static struct pci_driver * 241 linux_pci_find(device_t dev, const struct pci_device_id **idp) 242 { 243 const struct pci_device_id *id; 244 struct pci_driver *pdrv; 245 uint16_t vendor; 246 uint16_t device; 247 uint16_t subvendor; 248 uint16_t subdevice; 249 250 vendor = pci_get_vendor(dev); 251 device = pci_get_device(dev); 252 subvendor = pci_get_subvendor(dev); 253 subdevice = pci_get_subdevice(dev); 254 255 spin_lock(&pci_lock); 256 list_for_each_entry(pdrv, &pci_drivers, node) { 257 for (id = pdrv->id_table; id->vendor != 0; id++) { 258 if (vendor == id->vendor && 259 (PCI_ANY_ID == id->device || device == id->device) && 260 (PCI_ANY_ID == id->subvendor || subvendor == id->subvendor) && 261 (PCI_ANY_ID == id->subdevice || subdevice == id->subdevice)) { 262 *idp = id; 263 spin_unlock(&pci_lock); 264 return (pdrv); 265 } 266 } 267 } 268 spin_unlock(&pci_lock); 269 return (NULL); 270 } 271 272 struct pci_dev * 273 lkpi_pci_get_device(uint16_t vendor, uint16_t device, struct pci_dev *odev) 274 { 275 struct pci_dev *pdev; 276 277 KASSERT(odev == NULL, ("%s: odev argument not yet supported\n", __func__)); 278 279 spin_lock(&pci_lock); 280 list_for_each_entry(pdev, &pci_devices, links) { 281 if (pdev->vendor == vendor && pdev->device == device) 282 break; 283 } 284 spin_unlock(&pci_lock); 285 286 return (pdev); 287 } 288 289 static void 290 lkpi_pci_dev_release(struct device *dev) 291 { 292 293 lkpi_devres_release_free_list(dev); 294 spin_lock_destroy(&dev->devres_lock); 295 } 296 297 static void 298 lkpifill_pci_dev(device_t dev, struct pci_dev *pdev) 299 { 300 301 pdev->devfn = PCI_DEVFN(pci_get_slot(dev), pci_get_function(dev)); 302 pdev->vendor = pci_get_vendor(dev); 303 pdev->device = pci_get_device(dev); 304 pdev->subsystem_vendor = pci_get_subvendor(dev); 305 pdev->subsystem_device = pci_get_subdevice(dev); 306 pdev->class = pci_get_class(dev); 307 pdev->revision = pci_get_revid(dev); 308 pdev->path_name = kasprintf(M_WAITOK, "%04d:%02d:%02d.%d", 309 pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev), 310 pci_get_function(dev)); 311 pdev->bus = malloc(sizeof(*pdev->bus), M_DEVBUF, M_WAITOK | M_ZERO); 312 /* 313 * This should be the upstream bridge; pci_upstream_bridge() 314 * handles that case on demand as otherwise we'll shadow the 315 * entire PCI hierarchy. 316 */ 317 pdev->bus->self = pdev; 318 pdev->bus->number = pci_get_bus(dev); 319 pdev->bus->domain = pci_get_domain(dev); 320 pdev->dev.bsddev = dev; 321 pdev->dev.parent = &linux_root_device; 322 pdev->dev.release = lkpi_pci_dev_release; 323 INIT_LIST_HEAD(&pdev->dev.irqents); 324 325 if (pci_msi_count(dev) > 0) 326 pdev->msi_desc = malloc(pci_msi_count(dev) * 327 sizeof(*pdev->msi_desc), M_DEVBUF, M_WAITOK | M_ZERO); 328 329 kobject_init(&pdev->dev.kobj, &linux_dev_ktype); 330 kobject_set_name(&pdev->dev.kobj, device_get_nameunit(dev)); 331 kobject_add(&pdev->dev.kobj, &linux_root_device.kobj, 332 kobject_name(&pdev->dev.kobj)); 333 spin_lock_init(&pdev->dev.devres_lock); 334 INIT_LIST_HEAD(&pdev->dev.devres_head); 335 } 336 337 static void 338 lkpinew_pci_dev_release(struct device *dev) 339 { 340 struct pci_dev *pdev; 341 int i; 342 343 pdev = to_pci_dev(dev); 344 if (pdev->root != NULL) 345 pci_dev_put(pdev->root); 346 if (pdev->bus->self != pdev) 347 pci_dev_put(pdev->bus->self); 348 free(pdev->bus, M_DEVBUF); 349 if (pdev->msi_desc != NULL) { 350 for (i = pci_msi_count(pdev->dev.bsddev) - 1; i >= 0; i--) 351 free(pdev->msi_desc[i], M_DEVBUF); 352 free(pdev->msi_desc, M_DEVBUF); 353 } 354 kfree(pdev->path_name); 355 free(pdev, M_DEVBUF); 356 } 357 358 struct pci_dev * 359 lkpinew_pci_dev(device_t dev) 360 { 361 struct pci_dev *pdev; 362 363 pdev = malloc(sizeof(*pdev), M_DEVBUF, M_WAITOK|M_ZERO); 364 lkpifill_pci_dev(dev, pdev); 365 pdev->dev.release = lkpinew_pci_dev_release; 366 367 return (pdev); 368 } 369 370 struct pci_dev * 371 lkpi_pci_get_class(unsigned int class, struct pci_dev *from) 372 { 373 device_t dev; 374 device_t devfrom = NULL; 375 struct pci_dev *pdev; 376 377 if (from != NULL) 378 devfrom = from->dev.bsddev; 379 380 dev = pci_find_class_from(class >> 16, (class >> 8) & 0xFF, devfrom); 381 if (dev == NULL) 382 return (NULL); 383 384 pdev = lkpinew_pci_dev(dev); 385 return (pdev); 386 } 387 388 struct pci_dev * 389 lkpi_pci_get_domain_bus_and_slot(int domain, unsigned int bus, 390 unsigned int devfn) 391 { 392 device_t dev; 393 struct pci_dev *pdev; 394 395 dev = pci_find_dbsf(domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); 396 if (dev == NULL) 397 return (NULL); 398 399 pdev = lkpinew_pci_dev(dev); 400 return (pdev); 401 } 402 403 static int 404 linux_pci_probe(device_t dev) 405 { 406 const struct pci_device_id *id; 407 struct pci_driver *pdrv; 408 409 if ((pdrv = linux_pci_find(dev, &id)) == NULL) 410 return (ENXIO); 411 if (device_get_driver(dev) != &pdrv->bsddriver) 412 return (ENXIO); 413 device_set_desc(dev, pdrv->name); 414 415 /* Assume BSS initialized (should never return BUS_PROBE_SPECIFIC). */ 416 if (pdrv->bsd_probe_return == 0) 417 return (BUS_PROBE_DEFAULT); 418 else 419 return (pdrv->bsd_probe_return); 420 } 421 422 static int 423 linux_pci_attach(device_t dev) 424 { 425 const struct pci_device_id *id; 426 struct pci_driver *pdrv; 427 struct pci_dev *pdev; 428 429 pdrv = linux_pci_find(dev, &id); 430 pdev = device_get_softc(dev); 431 432 MPASS(pdrv != NULL); 433 MPASS(pdev != NULL); 434 435 return (linux_pci_attach_device(dev, pdrv, id, pdev)); 436 } 437 438 int 439 linux_pci_attach_device(device_t dev, struct pci_driver *pdrv, 440 const struct pci_device_id *id, struct pci_dev *pdev) 441 { 442 struct resource_list_entry *rle; 443 device_t parent; 444 uintptr_t rid; 445 int error; 446 bool isdrm; 447 448 linux_set_current(curthread); 449 450 parent = device_get_parent(dev); 451 isdrm = pdrv != NULL && pdrv->isdrm; 452 453 if (isdrm) { 454 struct pci_devinfo *dinfo; 455 456 dinfo = device_get_ivars(parent); 457 device_set_ivars(dev, dinfo); 458 } 459 460 lkpifill_pci_dev(dev, pdev); 461 if (isdrm) 462 PCI_GET_ID(device_get_parent(parent), parent, PCI_ID_RID, &rid); 463 else 464 PCI_GET_ID(parent, dev, PCI_ID_RID, &rid); 465 pdev->devfn = rid; 466 pdev->pdrv = pdrv; 467 rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 0, false); 468 if (rle != NULL) 469 pdev->dev.irq = rle->start; 470 else 471 pdev->dev.irq = LINUX_IRQ_INVALID; 472 pdev->irq = pdev->dev.irq; 473 error = linux_pdev_dma_init(pdev); 474 if (error) 475 goto out_dma_init; 476 477 TAILQ_INIT(&pdev->mmio); 478 479 spin_lock(&pci_lock); 480 list_add(&pdev->links, &pci_devices); 481 spin_unlock(&pci_lock); 482 483 if (pdrv != NULL) { 484 error = pdrv->probe(pdev, id); 485 if (error) 486 goto out_probe; 487 } 488 return (0); 489 490 out_probe: 491 free(pdev->bus, M_DEVBUF); 492 linux_pdev_dma_uninit(pdev); 493 out_dma_init: 494 spin_lock(&pci_lock); 495 list_del(&pdev->links); 496 spin_unlock(&pci_lock); 497 put_device(&pdev->dev); 498 return (-error); 499 } 500 501 static int 502 linux_pci_detach(device_t dev) 503 { 504 struct pci_dev *pdev; 505 506 pdev = device_get_softc(dev); 507 508 MPASS(pdev != NULL); 509 510 device_set_desc(dev, NULL); 511 512 return (linux_pci_detach_device(pdev)); 513 } 514 515 int 516 linux_pci_detach_device(struct pci_dev *pdev) 517 { 518 519 linux_set_current(curthread); 520 521 if (pdev->pdrv != NULL) 522 pdev->pdrv->remove(pdev); 523 524 if (pdev->root != NULL) 525 pci_dev_put(pdev->root); 526 free(pdev->bus, M_DEVBUF); 527 linux_pdev_dma_uninit(pdev); 528 529 spin_lock(&pci_lock); 530 list_del(&pdev->links); 531 spin_unlock(&pci_lock); 532 put_device(&pdev->dev); 533 534 return (0); 535 } 536 537 static int 538 lkpi_pci_disable_dev(struct device *dev) 539 { 540 541 (void) pci_disable_io(dev->bsddev, SYS_RES_MEMORY); 542 (void) pci_disable_io(dev->bsddev, SYS_RES_IOPORT); 543 return (0); 544 } 545 546 struct pci_devres * 547 lkpi_pci_devres_get_alloc(struct pci_dev *pdev) 548 { 549 struct pci_devres *dr; 550 551 dr = lkpi_devres_find(&pdev->dev, lkpi_pci_devres_release, NULL, NULL); 552 if (dr == NULL) { 553 dr = lkpi_devres_alloc(lkpi_pci_devres_release, sizeof(*dr), 554 GFP_KERNEL | __GFP_ZERO); 555 if (dr != NULL) 556 lkpi_devres_add(&pdev->dev, dr); 557 } 558 559 return (dr); 560 } 561 562 void 563 lkpi_pci_devres_release(struct device *dev, void *p) 564 { 565 struct pci_devres *dr; 566 struct pci_dev *pdev; 567 int bar; 568 569 pdev = to_pci_dev(dev); 570 dr = p; 571 572 if (pdev->msix_enabled) 573 lkpi_pci_disable_msix(pdev); 574 if (pdev->msi_enabled) 575 lkpi_pci_disable_msi(pdev); 576 577 if (dr->enable_io && lkpi_pci_disable_dev(dev) == 0) 578 dr->enable_io = false; 579 580 if (dr->region_mask == 0) 581 return; 582 for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) { 583 584 if ((dr->region_mask & (1 << bar)) == 0) 585 continue; 586 pci_release_region(pdev, bar); 587 } 588 } 589 590 struct pcim_iomap_devres * 591 lkpi_pcim_iomap_devres_find(struct pci_dev *pdev) 592 { 593 struct pcim_iomap_devres *dr; 594 595 dr = lkpi_devres_find(&pdev->dev, lkpi_pcim_iomap_table_release, 596 NULL, NULL); 597 if (dr == NULL) { 598 dr = lkpi_devres_alloc(lkpi_pcim_iomap_table_release, 599 sizeof(*dr), GFP_KERNEL | __GFP_ZERO); 600 if (dr != NULL) 601 lkpi_devres_add(&pdev->dev, dr); 602 } 603 604 if (dr == NULL) 605 device_printf(pdev->dev.bsddev, "%s: NULL\n", __func__); 606 607 return (dr); 608 } 609 610 void 611 lkpi_pcim_iomap_table_release(struct device *dev, void *p) 612 { 613 struct pcim_iomap_devres *dr; 614 struct pci_dev *pdev; 615 int bar; 616 617 dr = p; 618 pdev = to_pci_dev(dev); 619 for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) { 620 621 if (dr->mmio_table[bar] == NULL) 622 continue; 623 624 pci_iounmap(pdev, dr->mmio_table[bar]); 625 } 626 } 627 628 static int 629 linux_pci_suspend(device_t dev) 630 { 631 const struct dev_pm_ops *pmops; 632 struct pm_message pm = { }; 633 struct pci_dev *pdev; 634 int error; 635 636 error = 0; 637 linux_set_current(curthread); 638 pdev = device_get_softc(dev); 639 pmops = pdev->pdrv->driver.pm; 640 641 if (pdev->pdrv->suspend != NULL) 642 error = -pdev->pdrv->suspend(pdev, pm); 643 else if (pmops != NULL && pmops->suspend != NULL) { 644 error = -pmops->suspend(&pdev->dev); 645 if (error == 0 && pmops->suspend_late != NULL) 646 error = -pmops->suspend_late(&pdev->dev); 647 } 648 return (error); 649 } 650 651 static int 652 linux_pci_resume(device_t dev) 653 { 654 const struct dev_pm_ops *pmops; 655 struct pci_dev *pdev; 656 int error; 657 658 error = 0; 659 linux_set_current(curthread); 660 pdev = device_get_softc(dev); 661 pmops = pdev->pdrv->driver.pm; 662 663 if (pdev->pdrv->resume != NULL) 664 error = -pdev->pdrv->resume(pdev); 665 else if (pmops != NULL && pmops->resume != NULL) { 666 if (pmops->resume_early != NULL) 667 error = -pmops->resume_early(&pdev->dev); 668 if (error == 0 && pmops->resume != NULL) 669 error = -pmops->resume(&pdev->dev); 670 } 671 return (error); 672 } 673 674 static int 675 linux_pci_shutdown(device_t dev) 676 { 677 struct pci_dev *pdev; 678 679 linux_set_current(curthread); 680 pdev = device_get_softc(dev); 681 if (pdev->pdrv->shutdown != NULL) 682 pdev->pdrv->shutdown(pdev); 683 return (0); 684 } 685 686 static int 687 linux_pci_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *pf_config) 688 { 689 struct pci_dev *pdev; 690 int error; 691 692 linux_set_current(curthread); 693 pdev = device_get_softc(dev); 694 if (pdev->pdrv->bsd_iov_init != NULL) 695 error = pdev->pdrv->bsd_iov_init(dev, num_vfs, pf_config); 696 else 697 error = EINVAL; 698 return (error); 699 } 700 701 static void 702 linux_pci_iov_uninit(device_t dev) 703 { 704 struct pci_dev *pdev; 705 706 linux_set_current(curthread); 707 pdev = device_get_softc(dev); 708 if (pdev->pdrv->bsd_iov_uninit != NULL) 709 pdev->pdrv->bsd_iov_uninit(dev); 710 } 711 712 static int 713 linux_pci_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *vf_config) 714 { 715 struct pci_dev *pdev; 716 int error; 717 718 linux_set_current(curthread); 719 pdev = device_get_softc(dev); 720 if (pdev->pdrv->bsd_iov_add_vf != NULL) 721 error = pdev->pdrv->bsd_iov_add_vf(dev, vfnum, vf_config); 722 else 723 error = EINVAL; 724 return (error); 725 } 726 727 static int 728 _linux_pci_register_driver(struct pci_driver *pdrv, devclass_t dc) 729 { 730 int error; 731 732 linux_set_current(curthread); 733 spin_lock(&pci_lock); 734 list_add(&pdrv->node, &pci_drivers); 735 spin_unlock(&pci_lock); 736 if (pdrv->bsddriver.name == NULL) 737 pdrv->bsddriver.name = pdrv->name; 738 pdrv->bsddriver.methods = pci_methods; 739 pdrv->bsddriver.size = sizeof(struct pci_dev); 740 741 bus_topo_lock(); 742 error = devclass_add_driver(dc, &pdrv->bsddriver, 743 BUS_PASS_DEFAULT, &pdrv->bsdclass); 744 bus_topo_unlock(); 745 return (-error); 746 } 747 748 int 749 linux_pci_register_driver(struct pci_driver *pdrv) 750 { 751 devclass_t dc; 752 753 dc = devclass_find("pci"); 754 if (dc == NULL) 755 return (-ENXIO); 756 pdrv->isdrm = false; 757 return (_linux_pci_register_driver(pdrv, dc)); 758 } 759 760 struct resource_list_entry * 761 linux_pci_reserve_bar(struct pci_dev *pdev, struct resource_list *rl, 762 int type, int rid) 763 { 764 device_t dev; 765 struct resource *res; 766 767 KASSERT(type == SYS_RES_IOPORT || type == SYS_RES_MEMORY, 768 ("trying to reserve non-BAR type %d", type)); 769 770 dev = pdev->pdrv != NULL && pdev->pdrv->isdrm ? 771 device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev; 772 res = pci_reserve_map(device_get_parent(dev), dev, type, &rid, 0, ~0, 773 1, 1, 0); 774 if (res == NULL) 775 return (NULL); 776 return (resource_list_find(rl, type, rid)); 777 } 778 779 unsigned long 780 pci_resource_start(struct pci_dev *pdev, int bar) 781 { 782 struct resource_list_entry *rle; 783 rman_res_t newstart; 784 device_t dev; 785 int error; 786 787 if ((rle = linux_pci_get_bar(pdev, bar, true)) == NULL) 788 return (0); 789 dev = pdev->pdrv != NULL && pdev->pdrv->isdrm ? 790 device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev; 791 error = bus_translate_resource(dev, rle->type, rle->start, &newstart); 792 if (error != 0) { 793 device_printf(pdev->dev.bsddev, 794 "translate of %#jx failed: %d\n", 795 (uintmax_t)rle->start, error); 796 return (0); 797 } 798 return (newstart); 799 } 800 801 unsigned long 802 pci_resource_len(struct pci_dev *pdev, int bar) 803 { 804 struct resource_list_entry *rle; 805 806 if ((rle = linux_pci_get_bar(pdev, bar, true)) == NULL) 807 return (0); 808 return (rle->count); 809 } 810 811 int 812 pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) 813 { 814 struct resource *res; 815 struct pci_devres *dr; 816 struct pci_mmio_region *mmio; 817 int rid; 818 int type; 819 820 type = pci_resource_type(pdev, bar); 821 if (type < 0) 822 return (-ENODEV); 823 rid = PCIR_BAR(bar); 824 res = bus_alloc_resource_any(pdev->dev.bsddev, type, &rid, 825 RF_ACTIVE|RF_SHAREABLE); 826 if (res == NULL) { 827 device_printf(pdev->dev.bsddev, "%s: failed to alloc " 828 "bar %d type %d rid %d\n", 829 __func__, bar, type, PCIR_BAR(bar)); 830 return (-ENODEV); 831 } 832 833 /* 834 * It seems there is an implicit devres tracking on these if the device 835 * is managed; otherwise the resources are not automatiaclly freed on 836 * FreeBSD/LinuxKPI tough they should be/are expected to be by Linux 837 * drivers. 838 */ 839 dr = lkpi_pci_devres_find(pdev); 840 if (dr != NULL) { 841 dr->region_mask |= (1 << bar); 842 dr->region_table[bar] = res; 843 } 844 845 /* Even if the device is not managed we need to track it for iomap. */ 846 mmio = malloc(sizeof(*mmio), M_DEVBUF, M_WAITOK | M_ZERO); 847 mmio->rid = PCIR_BAR(bar); 848 mmio->type = type; 849 mmio->res = res; 850 TAILQ_INSERT_TAIL(&pdev->mmio, mmio, next); 851 852 return (0); 853 } 854 855 struct resource * 856 _lkpi_pci_iomap(struct pci_dev *pdev, int bar, int mmio_size __unused) 857 { 858 struct pci_mmio_region *mmio, *p; 859 int type; 860 861 type = pci_resource_type(pdev, bar); 862 if (type < 0) { 863 device_printf(pdev->dev.bsddev, "%s: bar %d type %d\n", 864 __func__, bar, type); 865 return (NULL); 866 } 867 868 /* 869 * Check for duplicate mappings. 870 * This can happen if a driver calls pci_request_region() first. 871 */ 872 TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) { 873 if (mmio->type == type && mmio->rid == PCIR_BAR(bar)) { 874 return (mmio->res); 875 } 876 } 877 878 mmio = malloc(sizeof(*mmio), M_DEVBUF, M_WAITOK | M_ZERO); 879 mmio->rid = PCIR_BAR(bar); 880 mmio->type = type; 881 mmio->res = bus_alloc_resource_any(pdev->dev.bsddev, mmio->type, 882 &mmio->rid, RF_ACTIVE|RF_SHAREABLE); 883 if (mmio->res == NULL) { 884 device_printf(pdev->dev.bsddev, "%s: failed to alloc " 885 "bar %d type %d rid %d\n", 886 __func__, bar, type, PCIR_BAR(bar)); 887 free(mmio, M_DEVBUF); 888 return (NULL); 889 } 890 TAILQ_INSERT_TAIL(&pdev->mmio, mmio, next); 891 892 return (mmio->res); 893 } 894 895 int 896 linux_pci_register_drm_driver(struct pci_driver *pdrv) 897 { 898 devclass_t dc; 899 900 dc = devclass_create("vgapci"); 901 if (dc == NULL) 902 return (-ENXIO); 903 pdrv->isdrm = true; 904 pdrv->name = "drmn"; 905 return (_linux_pci_register_driver(pdrv, dc)); 906 } 907 908 void 909 linux_pci_unregister_driver(struct pci_driver *pdrv) 910 { 911 devclass_t bus; 912 913 bus = devclass_find("pci"); 914 915 spin_lock(&pci_lock); 916 list_del(&pdrv->node); 917 spin_unlock(&pci_lock); 918 bus_topo_lock(); 919 if (bus != NULL) 920 devclass_delete_driver(bus, &pdrv->bsddriver); 921 bus_topo_unlock(); 922 } 923 924 void 925 linux_pci_unregister_drm_driver(struct pci_driver *pdrv) 926 { 927 devclass_t bus; 928 929 bus = devclass_find("vgapci"); 930 931 spin_lock(&pci_lock); 932 list_del(&pdrv->node); 933 spin_unlock(&pci_lock); 934 bus_topo_lock(); 935 if (bus != NULL) 936 devclass_delete_driver(bus, &pdrv->bsddriver); 937 bus_topo_unlock(); 938 } 939 940 int 941 pci_alloc_irq_vectors(struct pci_dev *pdev, int minv, int maxv, 942 unsigned int flags) 943 { 944 int error; 945 946 if (flags & PCI_IRQ_MSIX) { 947 struct msix_entry *entries; 948 int i; 949 950 entries = kcalloc(maxv, sizeof(*entries), GFP_KERNEL); 951 if (entries == NULL) { 952 error = -ENOMEM; 953 goto out; 954 } 955 for (i = 0; i < maxv; ++i) 956 entries[i].entry = i; 957 error = pci_enable_msix(pdev, entries, maxv); 958 out: 959 kfree(entries); 960 if (error == 0 && pdev->msix_enabled) 961 return (pdev->dev.irq_end - pdev->dev.irq_start); 962 } 963 if (flags & PCI_IRQ_MSI) { 964 if (pci_msi_count(pdev->dev.bsddev) < minv) 965 return (-ENOSPC); 966 error = _lkpi_pci_enable_msi_range(pdev, minv, maxv); 967 if (error == 0 && pdev->msi_enabled) 968 return (pdev->dev.irq_end - pdev->dev.irq_start); 969 } 970 if (flags & PCI_IRQ_LEGACY) { 971 if (pdev->irq) 972 return (1); 973 } 974 975 return (-EINVAL); 976 } 977 978 struct msi_desc * 979 lkpi_pci_msi_desc_alloc(int irq) 980 { 981 struct device *dev; 982 struct pci_dev *pdev; 983 struct msi_desc *desc; 984 struct pci_devinfo *dinfo; 985 struct pcicfg_msi *msi; 986 int vec; 987 988 dev = linux_pci_find_irq_dev(irq); 989 if (dev == NULL) 990 return (NULL); 991 992 pdev = to_pci_dev(dev); 993 994 if (pdev->msi_desc == NULL) 995 return (NULL); 996 997 if (irq < pdev->dev.irq_start || irq >= pdev->dev.irq_end) 998 return (NULL); 999 1000 vec = pdev->dev.irq_start - irq; 1001 1002 if (pdev->msi_desc[vec] != NULL) 1003 return (pdev->msi_desc[vec]); 1004 1005 dinfo = device_get_ivars(dev->bsddev); 1006 msi = &dinfo->cfg.msi; 1007 1008 desc = malloc(sizeof(*desc), M_DEVBUF, M_WAITOK | M_ZERO); 1009 1010 desc->pci.msi_attrib.is_64 = 1011 (msi->msi_ctrl & PCIM_MSICTRL_64BIT) ? true : false; 1012 desc->msg.data = msi->msi_data; 1013 1014 pdev->msi_desc[vec] = desc; 1015 1016 return (desc); 1017 } 1018 1019 bool 1020 pci_device_is_present(struct pci_dev *pdev) 1021 { 1022 device_t dev; 1023 1024 dev = pdev->dev.bsddev; 1025 1026 return (bus_child_present(dev)); 1027 } 1028 1029 CTASSERT(sizeof(dma_addr_t) <= sizeof(uint64_t)); 1030 1031 struct linux_dma_obj { 1032 void *vaddr; 1033 uint64_t dma_addr; 1034 bus_dmamap_t dmamap; 1035 bus_dma_tag_t dmat; 1036 }; 1037 1038 static uma_zone_t linux_dma_trie_zone; 1039 static uma_zone_t linux_dma_obj_zone; 1040 1041 static void 1042 linux_dma_init(void *arg) 1043 { 1044 1045 linux_dma_trie_zone = uma_zcreate("linux_dma_pctrie", 1046 pctrie_node_size(), NULL, NULL, pctrie_zone_init, NULL, 1047 UMA_ALIGN_PTR, 0); 1048 linux_dma_obj_zone = uma_zcreate("linux_dma_object", 1049 sizeof(struct linux_dma_obj), NULL, NULL, NULL, NULL, 1050 UMA_ALIGN_PTR, 0); 1051 lkpi_pci_nseg1_fail = counter_u64_alloc(M_WAITOK); 1052 } 1053 SYSINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_init, NULL); 1054 1055 static void 1056 linux_dma_uninit(void *arg) 1057 { 1058 1059 counter_u64_free(lkpi_pci_nseg1_fail); 1060 uma_zdestroy(linux_dma_obj_zone); 1061 uma_zdestroy(linux_dma_trie_zone); 1062 } 1063 SYSUNINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_uninit, NULL); 1064 1065 static void * 1066 linux_dma_trie_alloc(struct pctrie *ptree) 1067 { 1068 1069 return (uma_zalloc(linux_dma_trie_zone, M_NOWAIT)); 1070 } 1071 1072 static void 1073 linux_dma_trie_free(struct pctrie *ptree, void *node) 1074 { 1075 1076 uma_zfree(linux_dma_trie_zone, node); 1077 } 1078 1079 PCTRIE_DEFINE(LINUX_DMA, linux_dma_obj, dma_addr, linux_dma_trie_alloc, 1080 linux_dma_trie_free); 1081 1082 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 1083 static dma_addr_t 1084 linux_dma_map_phys_common(struct device *dev, vm_paddr_t phys, size_t len, 1085 bus_dma_tag_t dmat) 1086 { 1087 struct linux_dma_priv *priv; 1088 struct linux_dma_obj *obj; 1089 int error, nseg; 1090 bus_dma_segment_t seg; 1091 1092 priv = dev->dma_priv; 1093 1094 /* 1095 * If the resultant mapping will be entirely 1:1 with the 1096 * physical address, short-circuit the remainder of the 1097 * bus_dma API. This avoids tracking collisions in the pctrie 1098 * with the additional benefit of reducing overhead. 1099 */ 1100 if (bus_dma_id_mapped(dmat, phys, len)) 1101 return (phys); 1102 1103 obj = uma_zalloc(linux_dma_obj_zone, M_NOWAIT); 1104 if (obj == NULL) { 1105 return (0); 1106 } 1107 obj->dmat = dmat; 1108 1109 DMA_PRIV_LOCK(priv); 1110 if (bus_dmamap_create(obj->dmat, 0, &obj->dmamap) != 0) { 1111 DMA_PRIV_UNLOCK(priv); 1112 uma_zfree(linux_dma_obj_zone, obj); 1113 return (0); 1114 } 1115 1116 nseg = -1; 1117 if (_bus_dmamap_load_phys(obj->dmat, obj->dmamap, phys, len, 1118 BUS_DMA_NOWAIT, &seg, &nseg) != 0) { 1119 bus_dmamap_destroy(obj->dmat, obj->dmamap); 1120 DMA_PRIV_UNLOCK(priv); 1121 uma_zfree(linux_dma_obj_zone, obj); 1122 counter_u64_add(lkpi_pci_nseg1_fail, 1); 1123 if (linuxkpi_debug) 1124 dump_stack(); 1125 return (0); 1126 } 1127 1128 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); 1129 obj->dma_addr = seg.ds_addr; 1130 1131 error = LINUX_DMA_PCTRIE_INSERT(&priv->ptree, obj); 1132 if (error != 0) { 1133 bus_dmamap_unload(obj->dmat, obj->dmamap); 1134 bus_dmamap_destroy(obj->dmat, obj->dmamap); 1135 DMA_PRIV_UNLOCK(priv); 1136 uma_zfree(linux_dma_obj_zone, obj); 1137 return (0); 1138 } 1139 DMA_PRIV_UNLOCK(priv); 1140 return (obj->dma_addr); 1141 } 1142 #else 1143 static dma_addr_t 1144 linux_dma_map_phys_common(struct device *dev __unused, vm_paddr_t phys, 1145 size_t len __unused, bus_dma_tag_t dmat __unused) 1146 { 1147 return (phys); 1148 } 1149 #endif 1150 1151 dma_addr_t 1152 linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len) 1153 { 1154 struct linux_dma_priv *priv; 1155 1156 priv = dev->dma_priv; 1157 return (linux_dma_map_phys_common(dev, phys, len, priv->dmat)); 1158 } 1159 1160 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 1161 void 1162 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len) 1163 { 1164 struct linux_dma_priv *priv; 1165 struct linux_dma_obj *obj; 1166 1167 priv = dev->dma_priv; 1168 1169 if (pctrie_is_empty(&priv->ptree)) 1170 return; 1171 1172 DMA_PRIV_LOCK(priv); 1173 obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr); 1174 if (obj == NULL) { 1175 DMA_PRIV_UNLOCK(priv); 1176 return; 1177 } 1178 LINUX_DMA_PCTRIE_REMOVE(&priv->ptree, dma_addr); 1179 bus_dmamap_unload(obj->dmat, obj->dmamap); 1180 bus_dmamap_destroy(obj->dmat, obj->dmamap); 1181 DMA_PRIV_UNLOCK(priv); 1182 1183 uma_zfree(linux_dma_obj_zone, obj); 1184 } 1185 #else 1186 void 1187 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len) 1188 { 1189 } 1190 #endif 1191 1192 void * 1193 linux_dma_alloc_coherent(struct device *dev, size_t size, 1194 dma_addr_t *dma_handle, gfp_t flag) 1195 { 1196 struct linux_dma_priv *priv; 1197 vm_paddr_t high; 1198 size_t align; 1199 void *mem; 1200 1201 if (dev == NULL || dev->dma_priv == NULL) { 1202 *dma_handle = 0; 1203 return (NULL); 1204 } 1205 priv = dev->dma_priv; 1206 if (priv->dma_coherent_mask) 1207 high = priv->dma_coherent_mask; 1208 else 1209 /* Coherent is lower 32bit only by default in Linux. */ 1210 high = BUS_SPACE_MAXADDR_32BIT; 1211 align = PAGE_SIZE << get_order(size); 1212 /* Always zero the allocation. */ 1213 flag |= M_ZERO; 1214 mem = kmem_alloc_contig(size, flag & GFP_NATIVE_MASK, 0, high, 1215 align, 0, VM_MEMATTR_DEFAULT); 1216 if (mem != NULL) { 1217 *dma_handle = linux_dma_map_phys_common(dev, vtophys(mem), size, 1218 priv->dmat_coherent); 1219 if (*dma_handle == 0) { 1220 kmem_free(mem, size); 1221 mem = NULL; 1222 } 1223 } else { 1224 *dma_handle = 0; 1225 } 1226 return (mem); 1227 } 1228 1229 struct lkpi_devres_dmam_coherent { 1230 size_t size; 1231 dma_addr_t *handle; 1232 void *mem; 1233 }; 1234 1235 static void 1236 lkpi_dmam_free_coherent(struct device *dev, void *p) 1237 { 1238 struct lkpi_devres_dmam_coherent *dr; 1239 1240 dr = p; 1241 dma_free_coherent(dev, dr->size, dr->mem, *dr->handle); 1242 } 1243 1244 void * 1245 linuxkpi_dmam_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 1246 gfp_t flag) 1247 { 1248 struct lkpi_devres_dmam_coherent *dr; 1249 1250 dr = lkpi_devres_alloc(lkpi_dmam_free_coherent, 1251 sizeof(*dr), GFP_KERNEL | __GFP_ZERO); 1252 1253 if (dr == NULL) 1254 return (NULL); 1255 1256 dr->size = size; 1257 dr->mem = linux_dma_alloc_coherent(dev, size, dma_handle, flag); 1258 dr->handle = dma_handle; 1259 if (dr->mem == NULL) { 1260 lkpi_devres_free(dr); 1261 return (NULL); 1262 } 1263 1264 lkpi_devres_add(dev, dr); 1265 return (dr->mem); 1266 } 1267 1268 void 1269 linuxkpi_dma_sync(struct device *dev, dma_addr_t dma_addr, size_t size, 1270 bus_dmasync_op_t op) 1271 { 1272 struct linux_dma_priv *priv; 1273 struct linux_dma_obj *obj; 1274 1275 priv = dev->dma_priv; 1276 1277 if (pctrie_is_empty(&priv->ptree)) 1278 return; 1279 1280 DMA_PRIV_LOCK(priv); 1281 obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr); 1282 if (obj == NULL) { 1283 DMA_PRIV_UNLOCK(priv); 1284 return; 1285 } 1286 1287 bus_dmamap_sync(obj->dmat, obj->dmamap, op); 1288 DMA_PRIV_UNLOCK(priv); 1289 } 1290 1291 int 1292 linux_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents, 1293 enum dma_data_direction direction, unsigned long attrs __unused) 1294 { 1295 struct linux_dma_priv *priv; 1296 struct scatterlist *sg; 1297 int i, nseg; 1298 bus_dma_segment_t seg; 1299 1300 priv = dev->dma_priv; 1301 1302 DMA_PRIV_LOCK(priv); 1303 1304 /* create common DMA map in the first S/G entry */ 1305 if (bus_dmamap_create(priv->dmat, 0, &sgl->dma_map) != 0) { 1306 DMA_PRIV_UNLOCK(priv); 1307 return (0); 1308 } 1309 1310 /* load all S/G list entries */ 1311 for_each_sg(sgl, sg, nents, i) { 1312 nseg = -1; 1313 if (_bus_dmamap_load_phys(priv->dmat, sgl->dma_map, 1314 sg_phys(sg), sg->length, BUS_DMA_NOWAIT, 1315 &seg, &nseg) != 0) { 1316 bus_dmamap_unload(priv->dmat, sgl->dma_map); 1317 bus_dmamap_destroy(priv->dmat, sgl->dma_map); 1318 DMA_PRIV_UNLOCK(priv); 1319 return (0); 1320 } 1321 KASSERT(nseg == 0, 1322 ("More than one segment (nseg=%d)", nseg + 1)); 1323 1324 sg_dma_address(sg) = seg.ds_addr; 1325 } 1326 1327 switch (direction) { 1328 case DMA_BIDIRECTIONAL: 1329 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREWRITE); 1330 break; 1331 case DMA_TO_DEVICE: 1332 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREREAD); 1333 break; 1334 case DMA_FROM_DEVICE: 1335 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREWRITE); 1336 break; 1337 default: 1338 break; 1339 } 1340 1341 DMA_PRIV_UNLOCK(priv); 1342 1343 return (nents); 1344 } 1345 1346 void 1347 linux_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, 1348 int nents __unused, enum dma_data_direction direction, 1349 unsigned long attrs __unused) 1350 { 1351 struct linux_dma_priv *priv; 1352 1353 priv = dev->dma_priv; 1354 1355 DMA_PRIV_LOCK(priv); 1356 1357 switch (direction) { 1358 case DMA_BIDIRECTIONAL: 1359 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTREAD); 1360 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREREAD); 1361 break; 1362 case DMA_TO_DEVICE: 1363 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTWRITE); 1364 break; 1365 case DMA_FROM_DEVICE: 1366 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTREAD); 1367 break; 1368 default: 1369 break; 1370 } 1371 1372 bus_dmamap_unload(priv->dmat, sgl->dma_map); 1373 bus_dmamap_destroy(priv->dmat, sgl->dma_map); 1374 DMA_PRIV_UNLOCK(priv); 1375 } 1376 1377 struct dma_pool { 1378 struct device *pool_device; 1379 uma_zone_t pool_zone; 1380 struct mtx pool_lock; 1381 bus_dma_tag_t pool_dmat; 1382 size_t pool_entry_size; 1383 struct pctrie pool_ptree; 1384 }; 1385 1386 #define DMA_POOL_LOCK(pool) mtx_lock(&(pool)->pool_lock) 1387 #define DMA_POOL_UNLOCK(pool) mtx_unlock(&(pool)->pool_lock) 1388 1389 static inline int 1390 dma_pool_obj_ctor(void *mem, int size, void *arg, int flags) 1391 { 1392 struct linux_dma_obj *obj = mem; 1393 struct dma_pool *pool = arg; 1394 int error, nseg; 1395 bus_dma_segment_t seg; 1396 1397 nseg = -1; 1398 DMA_POOL_LOCK(pool); 1399 error = _bus_dmamap_load_phys(pool->pool_dmat, obj->dmamap, 1400 vtophys(obj->vaddr), pool->pool_entry_size, BUS_DMA_NOWAIT, 1401 &seg, &nseg); 1402 DMA_POOL_UNLOCK(pool); 1403 if (error != 0) { 1404 return (error); 1405 } 1406 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); 1407 obj->dma_addr = seg.ds_addr; 1408 1409 return (0); 1410 } 1411 1412 static void 1413 dma_pool_obj_dtor(void *mem, int size, void *arg) 1414 { 1415 struct linux_dma_obj *obj = mem; 1416 struct dma_pool *pool = arg; 1417 1418 DMA_POOL_LOCK(pool); 1419 bus_dmamap_unload(pool->pool_dmat, obj->dmamap); 1420 DMA_POOL_UNLOCK(pool); 1421 } 1422 1423 static int 1424 dma_pool_obj_import(void *arg, void **store, int count, int domain __unused, 1425 int flags) 1426 { 1427 struct dma_pool *pool = arg; 1428 struct linux_dma_obj *obj; 1429 int error, i; 1430 1431 for (i = 0; i < count; i++) { 1432 obj = uma_zalloc(linux_dma_obj_zone, flags); 1433 if (obj == NULL) 1434 break; 1435 1436 error = bus_dmamem_alloc(pool->pool_dmat, &obj->vaddr, 1437 BUS_DMA_NOWAIT, &obj->dmamap); 1438 if (error!= 0) { 1439 uma_zfree(linux_dma_obj_zone, obj); 1440 break; 1441 } 1442 1443 store[i] = obj; 1444 } 1445 1446 return (i); 1447 } 1448 1449 static void 1450 dma_pool_obj_release(void *arg, void **store, int count) 1451 { 1452 struct dma_pool *pool = arg; 1453 struct linux_dma_obj *obj; 1454 int i; 1455 1456 for (i = 0; i < count; i++) { 1457 obj = store[i]; 1458 bus_dmamem_free(pool->pool_dmat, obj->vaddr, obj->dmamap); 1459 uma_zfree(linux_dma_obj_zone, obj); 1460 } 1461 } 1462 1463 struct dma_pool * 1464 linux_dma_pool_create(char *name, struct device *dev, size_t size, 1465 size_t align, size_t boundary) 1466 { 1467 struct linux_dma_priv *priv; 1468 struct dma_pool *pool; 1469 1470 priv = dev->dma_priv; 1471 1472 pool = kzalloc(sizeof(*pool), M_WAITOK); 1473 pool->pool_device = dev; 1474 pool->pool_entry_size = size; 1475 1476 if (bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 1477 align, boundary, /* alignment, boundary */ 1478 priv->dma_mask, /* lowaddr */ 1479 BUS_SPACE_MAXADDR, /* highaddr */ 1480 NULL, NULL, /* filtfunc, filtfuncarg */ 1481 size, /* maxsize */ 1482 1, /* nsegments */ 1483 size, /* maxsegsz */ 1484 0, /* flags */ 1485 NULL, NULL, /* lockfunc, lockfuncarg */ 1486 &pool->pool_dmat)) { 1487 kfree(pool); 1488 return (NULL); 1489 } 1490 1491 pool->pool_zone = uma_zcache_create(name, -1, dma_pool_obj_ctor, 1492 dma_pool_obj_dtor, NULL, NULL, dma_pool_obj_import, 1493 dma_pool_obj_release, pool, 0); 1494 1495 mtx_init(&pool->pool_lock, "lkpi-dma-pool", NULL, MTX_DEF); 1496 pctrie_init(&pool->pool_ptree); 1497 1498 return (pool); 1499 } 1500 1501 void 1502 linux_dma_pool_destroy(struct dma_pool *pool) 1503 { 1504 1505 uma_zdestroy(pool->pool_zone); 1506 bus_dma_tag_destroy(pool->pool_dmat); 1507 mtx_destroy(&pool->pool_lock); 1508 kfree(pool); 1509 } 1510 1511 void 1512 lkpi_dmam_pool_destroy(struct device *dev, void *p) 1513 { 1514 struct dma_pool *pool; 1515 1516 pool = *(struct dma_pool **)p; 1517 LINUX_DMA_PCTRIE_RECLAIM(&pool->pool_ptree); 1518 linux_dma_pool_destroy(pool); 1519 } 1520 1521 void * 1522 linux_dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, 1523 dma_addr_t *handle) 1524 { 1525 struct linux_dma_obj *obj; 1526 1527 obj = uma_zalloc_arg(pool->pool_zone, pool, mem_flags & GFP_NATIVE_MASK); 1528 if (obj == NULL) 1529 return (NULL); 1530 1531 DMA_POOL_LOCK(pool); 1532 if (LINUX_DMA_PCTRIE_INSERT(&pool->pool_ptree, obj) != 0) { 1533 DMA_POOL_UNLOCK(pool); 1534 uma_zfree_arg(pool->pool_zone, obj, pool); 1535 return (NULL); 1536 } 1537 DMA_POOL_UNLOCK(pool); 1538 1539 *handle = obj->dma_addr; 1540 return (obj->vaddr); 1541 } 1542 1543 void 1544 linux_dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma_addr) 1545 { 1546 struct linux_dma_obj *obj; 1547 1548 DMA_POOL_LOCK(pool); 1549 obj = LINUX_DMA_PCTRIE_LOOKUP(&pool->pool_ptree, dma_addr); 1550 if (obj == NULL) { 1551 DMA_POOL_UNLOCK(pool); 1552 return; 1553 } 1554 LINUX_DMA_PCTRIE_REMOVE(&pool->pool_ptree, dma_addr); 1555 DMA_POOL_UNLOCK(pool); 1556 1557 uma_zfree_arg(pool->pool_zone, obj, pool); 1558 } 1559 1560 static int 1561 linux_backlight_get_status(device_t dev, struct backlight_props *props) 1562 { 1563 struct pci_dev *pdev; 1564 1565 linux_set_current(curthread); 1566 pdev = device_get_softc(dev); 1567 1568 props->brightness = pdev->dev.bd->props.brightness; 1569 props->brightness = props->brightness * 100 / pdev->dev.bd->props.max_brightness; 1570 props->nlevels = 0; 1571 1572 return (0); 1573 } 1574 1575 static int 1576 linux_backlight_get_info(device_t dev, struct backlight_info *info) 1577 { 1578 struct pci_dev *pdev; 1579 1580 linux_set_current(curthread); 1581 pdev = device_get_softc(dev); 1582 1583 info->type = BACKLIGHT_TYPE_PANEL; 1584 strlcpy(info->name, pdev->dev.bd->name, BACKLIGHTMAXNAMELENGTH); 1585 return (0); 1586 } 1587 1588 static int 1589 linux_backlight_update_status(device_t dev, struct backlight_props *props) 1590 { 1591 struct pci_dev *pdev; 1592 1593 linux_set_current(curthread); 1594 pdev = device_get_softc(dev); 1595 1596 pdev->dev.bd->props.brightness = pdev->dev.bd->props.max_brightness * 1597 props->brightness / 100; 1598 pdev->dev.bd->props.power = props->brightness == 0 ? 1599 4/* FB_BLANK_POWERDOWN */ : 0/* FB_BLANK_UNBLANK */; 1600 return (pdev->dev.bd->ops->update_status(pdev->dev.bd)); 1601 } 1602 1603 struct backlight_device * 1604 linux_backlight_device_register(const char *name, struct device *dev, 1605 void *data, const struct backlight_ops *ops, struct backlight_properties *props) 1606 { 1607 1608 dev->bd = malloc(sizeof(*dev->bd), M_DEVBUF, M_WAITOK | M_ZERO); 1609 dev->bd->ops = ops; 1610 dev->bd->props.type = props->type; 1611 dev->bd->props.max_brightness = props->max_brightness; 1612 dev->bd->props.brightness = props->brightness; 1613 dev->bd->props.power = props->power; 1614 dev->bd->data = data; 1615 dev->bd->dev = dev; 1616 dev->bd->name = strdup(name, M_DEVBUF); 1617 1618 dev->backlight_dev = backlight_register(name, dev->bsddev); 1619 1620 return (dev->bd); 1621 } 1622 1623 void 1624 linux_backlight_device_unregister(struct backlight_device *bd) 1625 { 1626 1627 backlight_destroy(bd->dev->backlight_dev); 1628 free(bd->name, M_DEVBUF); 1629 free(bd, M_DEVBUF); 1630 } 1631