1 /*- 2 * Copyright (c) 2015-2016 Mellanox Technologies, Ltd. 3 * All rights reserved. 4 * Copyright (c) 2020-2022 The FreeBSD Foundation 5 * 6 * Portions of this software were developed by Björn Zeeb 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice unmodified, this list of conditions, and the following 14 * disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/bus.h> 37 #include <sys/malloc.h> 38 #include <sys/kernel.h> 39 #include <sys/sysctl.h> 40 #include <sys/lock.h> 41 #include <sys/mutex.h> 42 #include <sys/fcntl.h> 43 #include <sys/file.h> 44 #include <sys/filio.h> 45 #include <sys/pciio.h> 46 #include <sys/pctrie.h> 47 #include <sys/rwlock.h> 48 49 #include <vm/vm.h> 50 #include <vm/pmap.h> 51 52 #include <machine/stdarg.h> 53 54 #include <dev/pci/pcivar.h> 55 #include <dev/pci/pci_private.h> 56 #include <dev/pci/pci_iov.h> 57 #include <dev/backlight/backlight.h> 58 59 #include <linux/kernel.h> 60 #include <linux/kobject.h> 61 #include <linux/device.h> 62 #include <linux/slab.h> 63 #include <linux/module.h> 64 #include <linux/cdev.h> 65 #include <linux/file.h> 66 #include <linux/sysfs.h> 67 #include <linux/mm.h> 68 #include <linux/io.h> 69 #include <linux/vmalloc.h> 70 #include <linux/pci.h> 71 #include <linux/compat.h> 72 73 #include <linux/backlight.h> 74 75 #include "backlight_if.h" 76 #include "pcib_if.h" 77 78 /* Undef the linux function macro defined in linux/pci.h */ 79 #undef pci_get_class 80 81 extern int linuxkpi_debug; 82 83 SYSCTL_DECL(_compat_linuxkpi); 84 85 static counter_u64_t lkpi_pci_nseg1_fail; 86 SYSCTL_COUNTER_U64(_compat_linuxkpi, OID_AUTO, lkpi_pci_nseg1_fail, CTLFLAG_RD, 87 &lkpi_pci_nseg1_fail, "Count of busdma mapping failures of single-segment"); 88 89 static device_probe_t linux_pci_probe; 90 static device_attach_t linux_pci_attach; 91 static device_detach_t linux_pci_detach; 92 static device_suspend_t linux_pci_suspend; 93 static device_resume_t linux_pci_resume; 94 static device_shutdown_t linux_pci_shutdown; 95 static pci_iov_init_t linux_pci_iov_init; 96 static pci_iov_uninit_t linux_pci_iov_uninit; 97 static pci_iov_add_vf_t linux_pci_iov_add_vf; 98 static int linux_backlight_get_status(device_t dev, struct backlight_props *props); 99 static int linux_backlight_update_status(device_t dev, struct backlight_props *props); 100 static int linux_backlight_get_info(device_t dev, struct backlight_info *info); 101 102 static device_method_t pci_methods[] = { 103 DEVMETHOD(device_probe, linux_pci_probe), 104 DEVMETHOD(device_attach, linux_pci_attach), 105 DEVMETHOD(device_detach, linux_pci_detach), 106 DEVMETHOD(device_suspend, linux_pci_suspend), 107 DEVMETHOD(device_resume, linux_pci_resume), 108 DEVMETHOD(device_shutdown, linux_pci_shutdown), 109 DEVMETHOD(pci_iov_init, linux_pci_iov_init), 110 DEVMETHOD(pci_iov_uninit, linux_pci_iov_uninit), 111 DEVMETHOD(pci_iov_add_vf, linux_pci_iov_add_vf), 112 113 /* backlight interface */ 114 DEVMETHOD(backlight_update_status, linux_backlight_update_status), 115 DEVMETHOD(backlight_get_status, linux_backlight_get_status), 116 DEVMETHOD(backlight_get_info, linux_backlight_get_info), 117 DEVMETHOD_END 118 }; 119 120 const char *pci_power_names[] = { 121 "UNKNOWN", "D0", "D1", "D2", "D3hot", "D3cold" 122 }; 123 124 struct linux_dma_priv { 125 uint64_t dma_mask; 126 bus_dma_tag_t dmat; 127 uint64_t dma_coherent_mask; 128 bus_dma_tag_t dmat_coherent; 129 struct mtx lock; 130 struct pctrie ptree; 131 }; 132 #define DMA_PRIV_LOCK(priv) mtx_lock(&(priv)->lock) 133 #define DMA_PRIV_UNLOCK(priv) mtx_unlock(&(priv)->lock) 134 135 static int 136 linux_pdev_dma_uninit(struct pci_dev *pdev) 137 { 138 struct linux_dma_priv *priv; 139 140 priv = pdev->dev.dma_priv; 141 if (priv->dmat) 142 bus_dma_tag_destroy(priv->dmat); 143 if (priv->dmat_coherent) 144 bus_dma_tag_destroy(priv->dmat_coherent); 145 mtx_destroy(&priv->lock); 146 pdev->dev.dma_priv = NULL; 147 free(priv, M_DEVBUF); 148 return (0); 149 } 150 151 static int 152 linux_pdev_dma_init(struct pci_dev *pdev) 153 { 154 struct linux_dma_priv *priv; 155 int error; 156 157 priv = malloc(sizeof(*priv), M_DEVBUF, M_WAITOK | M_ZERO); 158 159 mtx_init(&priv->lock, "lkpi-priv-dma", NULL, MTX_DEF); 160 pctrie_init(&priv->ptree); 161 162 pdev->dev.dma_priv = priv; 163 164 /* Create a default DMA tags. */ 165 error = linux_dma_tag_init(&pdev->dev, DMA_BIT_MASK(64)); 166 if (error != 0) 167 goto err; 168 /* Coherent is lower 32bit only by default in Linux. */ 169 error = linux_dma_tag_init_coherent(&pdev->dev, DMA_BIT_MASK(32)); 170 if (error != 0) 171 goto err; 172 173 return (error); 174 175 err: 176 linux_pdev_dma_uninit(pdev); 177 return (error); 178 } 179 180 int 181 linux_dma_tag_init(struct device *dev, u64 dma_mask) 182 { 183 struct linux_dma_priv *priv; 184 int error; 185 186 priv = dev->dma_priv; 187 188 if (priv->dmat) { 189 if (priv->dma_mask == dma_mask) 190 return (0); 191 192 bus_dma_tag_destroy(priv->dmat); 193 } 194 195 priv->dma_mask = dma_mask; 196 197 error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 198 1, 0, /* alignment, boundary */ 199 dma_mask, /* lowaddr */ 200 BUS_SPACE_MAXADDR, /* highaddr */ 201 NULL, NULL, /* filtfunc, filtfuncarg */ 202 BUS_SPACE_MAXSIZE, /* maxsize */ 203 1, /* nsegments */ 204 BUS_SPACE_MAXSIZE, /* maxsegsz */ 205 0, /* flags */ 206 NULL, NULL, /* lockfunc, lockfuncarg */ 207 &priv->dmat); 208 return (-error); 209 } 210 211 int 212 linux_dma_tag_init_coherent(struct device *dev, u64 dma_mask) 213 { 214 struct linux_dma_priv *priv; 215 int error; 216 217 priv = dev->dma_priv; 218 219 if (priv->dmat_coherent) { 220 if (priv->dma_coherent_mask == dma_mask) 221 return (0); 222 223 bus_dma_tag_destroy(priv->dmat_coherent); 224 } 225 226 priv->dma_coherent_mask = dma_mask; 227 228 error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 229 1, 0, /* alignment, boundary */ 230 dma_mask, /* lowaddr */ 231 BUS_SPACE_MAXADDR, /* highaddr */ 232 NULL, NULL, /* filtfunc, filtfuncarg */ 233 BUS_SPACE_MAXSIZE, /* maxsize */ 234 1, /* nsegments */ 235 BUS_SPACE_MAXSIZE, /* maxsegsz */ 236 0, /* flags */ 237 NULL, NULL, /* lockfunc, lockfuncarg */ 238 &priv->dmat_coherent); 239 return (-error); 240 } 241 242 static struct pci_driver * 243 linux_pci_find(device_t dev, const struct pci_device_id **idp) 244 { 245 const struct pci_device_id *id; 246 struct pci_driver *pdrv; 247 uint16_t vendor; 248 uint16_t device; 249 uint16_t subvendor; 250 uint16_t subdevice; 251 252 vendor = pci_get_vendor(dev); 253 device = pci_get_device(dev); 254 subvendor = pci_get_subvendor(dev); 255 subdevice = pci_get_subdevice(dev); 256 257 spin_lock(&pci_lock); 258 list_for_each_entry(pdrv, &pci_drivers, node) { 259 for (id = pdrv->id_table; id->vendor != 0; id++) { 260 if (vendor == id->vendor && 261 (PCI_ANY_ID == id->device || device == id->device) && 262 (PCI_ANY_ID == id->subvendor || subvendor == id->subvendor) && 263 (PCI_ANY_ID == id->subdevice || subdevice == id->subdevice)) { 264 *idp = id; 265 spin_unlock(&pci_lock); 266 return (pdrv); 267 } 268 } 269 } 270 spin_unlock(&pci_lock); 271 return (NULL); 272 } 273 274 struct pci_dev * 275 lkpi_pci_get_device(uint16_t vendor, uint16_t device, struct pci_dev *odev) 276 { 277 struct pci_dev *pdev; 278 279 KASSERT(odev == NULL, ("%s: odev argument not yet supported\n", __func__)); 280 281 spin_lock(&pci_lock); 282 list_for_each_entry(pdev, &pci_devices, links) { 283 if (pdev->vendor == vendor && pdev->device == device) 284 break; 285 } 286 spin_unlock(&pci_lock); 287 288 return (pdev); 289 } 290 291 static void 292 lkpi_pci_dev_release(struct device *dev) 293 { 294 295 lkpi_devres_release_free_list(dev); 296 spin_lock_destroy(&dev->devres_lock); 297 } 298 299 static void 300 lkpifill_pci_dev(device_t dev, struct pci_dev *pdev) 301 { 302 303 pdev->devfn = PCI_DEVFN(pci_get_slot(dev), pci_get_function(dev)); 304 pdev->vendor = pci_get_vendor(dev); 305 pdev->device = pci_get_device(dev); 306 pdev->subsystem_vendor = pci_get_subvendor(dev); 307 pdev->subsystem_device = pci_get_subdevice(dev); 308 pdev->class = pci_get_class(dev); 309 pdev->revision = pci_get_revid(dev); 310 pdev->bus = malloc(sizeof(*pdev->bus), M_DEVBUF, M_WAITOK | M_ZERO); 311 /* 312 * This should be the upstream bridge; pci_upstream_bridge() 313 * handles that case on demand as otherwise we'll shadow the 314 * entire PCI hierarchy. 315 */ 316 pdev->bus->self = pdev; 317 pdev->bus->number = pci_get_bus(dev); 318 pdev->bus->domain = pci_get_domain(dev); 319 pdev->dev.bsddev = dev; 320 pdev->dev.parent = &linux_root_device; 321 pdev->dev.release = lkpi_pci_dev_release; 322 INIT_LIST_HEAD(&pdev->dev.irqents); 323 324 if (pci_msi_count(dev) > 0) 325 pdev->msi_desc = malloc(pci_msi_count(dev) * 326 sizeof(*pdev->msi_desc), M_DEVBUF, M_WAITOK | M_ZERO); 327 328 kobject_init(&pdev->dev.kobj, &linux_dev_ktype); 329 kobject_set_name(&pdev->dev.kobj, device_get_nameunit(dev)); 330 kobject_add(&pdev->dev.kobj, &linux_root_device.kobj, 331 kobject_name(&pdev->dev.kobj)); 332 spin_lock_init(&pdev->dev.devres_lock); 333 INIT_LIST_HEAD(&pdev->dev.devres_head); 334 } 335 336 static void 337 lkpinew_pci_dev_release(struct device *dev) 338 { 339 struct pci_dev *pdev; 340 int i; 341 342 pdev = to_pci_dev(dev); 343 if (pdev->root != NULL) 344 pci_dev_put(pdev->root); 345 if (pdev->bus->self != pdev) 346 pci_dev_put(pdev->bus->self); 347 free(pdev->bus, M_DEVBUF); 348 if (pdev->msi_desc != NULL) { 349 for (i = pci_msi_count(pdev->dev.bsddev) - 1; i >= 0; i--) 350 free(pdev->msi_desc[i], M_DEVBUF); 351 free(pdev->msi_desc, M_DEVBUF); 352 } 353 free(pdev, M_DEVBUF); 354 } 355 356 struct pci_dev * 357 lkpinew_pci_dev(device_t dev) 358 { 359 struct pci_dev *pdev; 360 361 pdev = malloc(sizeof(*pdev), M_DEVBUF, M_WAITOK|M_ZERO); 362 lkpifill_pci_dev(dev, pdev); 363 pdev->dev.release = lkpinew_pci_dev_release; 364 365 return (pdev); 366 } 367 368 struct pci_dev * 369 lkpi_pci_get_class(unsigned int class, struct pci_dev *from) 370 { 371 device_t dev; 372 device_t devfrom = NULL; 373 struct pci_dev *pdev; 374 375 if (from != NULL) 376 devfrom = from->dev.bsddev; 377 378 dev = pci_find_class_from(class >> 16, (class >> 8) & 0xFF, devfrom); 379 if (dev == NULL) 380 return (NULL); 381 382 pdev = lkpinew_pci_dev(dev); 383 return (pdev); 384 } 385 386 struct pci_dev * 387 lkpi_pci_get_domain_bus_and_slot(int domain, unsigned int bus, 388 unsigned int devfn) 389 { 390 device_t dev; 391 struct pci_dev *pdev; 392 393 dev = pci_find_dbsf(domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); 394 if (dev == NULL) 395 return (NULL); 396 397 pdev = lkpinew_pci_dev(dev); 398 return (pdev); 399 } 400 401 static int 402 linux_pci_probe(device_t dev) 403 { 404 const struct pci_device_id *id; 405 struct pci_driver *pdrv; 406 407 if ((pdrv = linux_pci_find(dev, &id)) == NULL) 408 return (ENXIO); 409 if (device_get_driver(dev) != &pdrv->bsddriver) 410 return (ENXIO); 411 device_set_desc(dev, pdrv->name); 412 413 /* Assume BSS initialized (should never return BUS_PROBE_SPECIFIC). */ 414 if (pdrv->bsd_probe_return == 0) 415 return (BUS_PROBE_DEFAULT); 416 else 417 return (pdrv->bsd_probe_return); 418 } 419 420 static int 421 linux_pci_attach(device_t dev) 422 { 423 const struct pci_device_id *id; 424 struct pci_driver *pdrv; 425 struct pci_dev *pdev; 426 427 pdrv = linux_pci_find(dev, &id); 428 pdev = device_get_softc(dev); 429 430 MPASS(pdrv != NULL); 431 MPASS(pdev != NULL); 432 433 return (linux_pci_attach_device(dev, pdrv, id, pdev)); 434 } 435 436 int 437 linux_pci_attach_device(device_t dev, struct pci_driver *pdrv, 438 const struct pci_device_id *id, struct pci_dev *pdev) 439 { 440 struct resource_list_entry *rle; 441 device_t parent; 442 uintptr_t rid; 443 int error; 444 bool isdrm; 445 446 linux_set_current(curthread); 447 448 parent = device_get_parent(dev); 449 isdrm = pdrv != NULL && pdrv->isdrm; 450 451 if (isdrm) { 452 struct pci_devinfo *dinfo; 453 454 dinfo = device_get_ivars(parent); 455 device_set_ivars(dev, dinfo); 456 } 457 458 lkpifill_pci_dev(dev, pdev); 459 if (isdrm) 460 PCI_GET_ID(device_get_parent(parent), parent, PCI_ID_RID, &rid); 461 else 462 PCI_GET_ID(parent, dev, PCI_ID_RID, &rid); 463 pdev->devfn = rid; 464 pdev->pdrv = pdrv; 465 rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 0, false); 466 if (rle != NULL) 467 pdev->dev.irq = rle->start; 468 else 469 pdev->dev.irq = LINUX_IRQ_INVALID; 470 pdev->irq = pdev->dev.irq; 471 error = linux_pdev_dma_init(pdev); 472 if (error) 473 goto out_dma_init; 474 475 TAILQ_INIT(&pdev->mmio); 476 477 spin_lock(&pci_lock); 478 list_add(&pdev->links, &pci_devices); 479 spin_unlock(&pci_lock); 480 481 if (pdrv != NULL) { 482 error = pdrv->probe(pdev, id); 483 if (error) 484 goto out_probe; 485 } 486 return (0); 487 488 out_probe: 489 free(pdev->bus, M_DEVBUF); 490 linux_pdev_dma_uninit(pdev); 491 out_dma_init: 492 spin_lock(&pci_lock); 493 list_del(&pdev->links); 494 spin_unlock(&pci_lock); 495 put_device(&pdev->dev); 496 return (-error); 497 } 498 499 static int 500 linux_pci_detach(device_t dev) 501 { 502 struct pci_dev *pdev; 503 504 pdev = device_get_softc(dev); 505 506 MPASS(pdev != NULL); 507 508 device_set_desc(dev, NULL); 509 510 return (linux_pci_detach_device(pdev)); 511 } 512 513 int 514 linux_pci_detach_device(struct pci_dev *pdev) 515 { 516 517 linux_set_current(curthread); 518 519 if (pdev->pdrv != NULL) 520 pdev->pdrv->remove(pdev); 521 522 if (pdev->root != NULL) 523 pci_dev_put(pdev->root); 524 free(pdev->bus, M_DEVBUF); 525 linux_pdev_dma_uninit(pdev); 526 527 spin_lock(&pci_lock); 528 list_del(&pdev->links); 529 spin_unlock(&pci_lock); 530 put_device(&pdev->dev); 531 532 return (0); 533 } 534 535 static int 536 lkpi_pci_disable_dev(struct device *dev) 537 { 538 539 (void) pci_disable_io(dev->bsddev, SYS_RES_MEMORY); 540 (void) pci_disable_io(dev->bsddev, SYS_RES_IOPORT); 541 return (0); 542 } 543 544 struct pci_devres * 545 lkpi_pci_devres_get_alloc(struct pci_dev *pdev) 546 { 547 struct pci_devres *dr; 548 549 dr = lkpi_devres_find(&pdev->dev, lkpi_pci_devres_release, NULL, NULL); 550 if (dr == NULL) { 551 dr = lkpi_devres_alloc(lkpi_pci_devres_release, sizeof(*dr), 552 GFP_KERNEL | __GFP_ZERO); 553 if (dr != NULL) 554 lkpi_devres_add(&pdev->dev, dr); 555 } 556 557 return (dr); 558 } 559 560 void 561 lkpi_pci_devres_release(struct device *dev, void *p) 562 { 563 struct pci_devres *dr; 564 struct pci_dev *pdev; 565 int bar; 566 567 pdev = to_pci_dev(dev); 568 dr = p; 569 570 if (pdev->msix_enabled) 571 lkpi_pci_disable_msix(pdev); 572 if (pdev->msi_enabled) 573 lkpi_pci_disable_msi(pdev); 574 575 if (dr->enable_io && lkpi_pci_disable_dev(dev) == 0) 576 dr->enable_io = false; 577 578 if (dr->region_mask == 0) 579 return; 580 for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) { 581 582 if ((dr->region_mask & (1 << bar)) == 0) 583 continue; 584 pci_release_region(pdev, bar); 585 } 586 } 587 588 struct pcim_iomap_devres * 589 lkpi_pcim_iomap_devres_find(struct pci_dev *pdev) 590 { 591 struct pcim_iomap_devres *dr; 592 593 dr = lkpi_devres_find(&pdev->dev, lkpi_pcim_iomap_table_release, 594 NULL, NULL); 595 if (dr == NULL) { 596 dr = lkpi_devres_alloc(lkpi_pcim_iomap_table_release, 597 sizeof(*dr), GFP_KERNEL | __GFP_ZERO); 598 if (dr != NULL) 599 lkpi_devres_add(&pdev->dev, dr); 600 } 601 602 if (dr == NULL) 603 device_printf(pdev->dev.bsddev, "%s: NULL\n", __func__); 604 605 return (dr); 606 } 607 608 void 609 lkpi_pcim_iomap_table_release(struct device *dev, void *p) 610 { 611 struct pcim_iomap_devres *dr; 612 struct pci_dev *pdev; 613 int bar; 614 615 dr = p; 616 pdev = to_pci_dev(dev); 617 for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) { 618 619 if (dr->mmio_table[bar] == NULL) 620 continue; 621 622 pci_iounmap(pdev, dr->mmio_table[bar]); 623 } 624 } 625 626 static int 627 linux_pci_suspend(device_t dev) 628 { 629 const struct dev_pm_ops *pmops; 630 struct pm_message pm = { }; 631 struct pci_dev *pdev; 632 int error; 633 634 error = 0; 635 linux_set_current(curthread); 636 pdev = device_get_softc(dev); 637 pmops = pdev->pdrv->driver.pm; 638 639 if (pdev->pdrv->suspend != NULL) 640 error = -pdev->pdrv->suspend(pdev, pm); 641 else if (pmops != NULL && pmops->suspend != NULL) { 642 error = -pmops->suspend(&pdev->dev); 643 if (error == 0 && pmops->suspend_late != NULL) 644 error = -pmops->suspend_late(&pdev->dev); 645 } 646 return (error); 647 } 648 649 static int 650 linux_pci_resume(device_t dev) 651 { 652 const struct dev_pm_ops *pmops; 653 struct pci_dev *pdev; 654 int error; 655 656 error = 0; 657 linux_set_current(curthread); 658 pdev = device_get_softc(dev); 659 pmops = pdev->pdrv->driver.pm; 660 661 if (pdev->pdrv->resume != NULL) 662 error = -pdev->pdrv->resume(pdev); 663 else if (pmops != NULL && pmops->resume != NULL) { 664 if (pmops->resume_early != NULL) 665 error = -pmops->resume_early(&pdev->dev); 666 if (error == 0 && pmops->resume != NULL) 667 error = -pmops->resume(&pdev->dev); 668 } 669 return (error); 670 } 671 672 static int 673 linux_pci_shutdown(device_t dev) 674 { 675 struct pci_dev *pdev; 676 677 linux_set_current(curthread); 678 pdev = device_get_softc(dev); 679 if (pdev->pdrv->shutdown != NULL) 680 pdev->pdrv->shutdown(pdev); 681 return (0); 682 } 683 684 static int 685 linux_pci_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *pf_config) 686 { 687 struct pci_dev *pdev; 688 int error; 689 690 linux_set_current(curthread); 691 pdev = device_get_softc(dev); 692 if (pdev->pdrv->bsd_iov_init != NULL) 693 error = pdev->pdrv->bsd_iov_init(dev, num_vfs, pf_config); 694 else 695 error = EINVAL; 696 return (error); 697 } 698 699 static void 700 linux_pci_iov_uninit(device_t dev) 701 { 702 struct pci_dev *pdev; 703 704 linux_set_current(curthread); 705 pdev = device_get_softc(dev); 706 if (pdev->pdrv->bsd_iov_uninit != NULL) 707 pdev->pdrv->bsd_iov_uninit(dev); 708 } 709 710 static int 711 linux_pci_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *vf_config) 712 { 713 struct pci_dev *pdev; 714 int error; 715 716 linux_set_current(curthread); 717 pdev = device_get_softc(dev); 718 if (pdev->pdrv->bsd_iov_add_vf != NULL) 719 error = pdev->pdrv->bsd_iov_add_vf(dev, vfnum, vf_config); 720 else 721 error = EINVAL; 722 return (error); 723 } 724 725 static int 726 _linux_pci_register_driver(struct pci_driver *pdrv, devclass_t dc) 727 { 728 int error; 729 730 linux_set_current(curthread); 731 spin_lock(&pci_lock); 732 list_add(&pdrv->node, &pci_drivers); 733 spin_unlock(&pci_lock); 734 if (pdrv->bsddriver.name == NULL) 735 pdrv->bsddriver.name = pdrv->name; 736 pdrv->bsddriver.methods = pci_methods; 737 pdrv->bsddriver.size = sizeof(struct pci_dev); 738 739 bus_topo_lock(); 740 error = devclass_add_driver(dc, &pdrv->bsddriver, 741 BUS_PASS_DEFAULT, &pdrv->bsdclass); 742 bus_topo_unlock(); 743 return (-error); 744 } 745 746 int 747 linux_pci_register_driver(struct pci_driver *pdrv) 748 { 749 devclass_t dc; 750 751 dc = devclass_find("pci"); 752 if (dc == NULL) 753 return (-ENXIO); 754 pdrv->isdrm = false; 755 return (_linux_pci_register_driver(pdrv, dc)); 756 } 757 758 struct resource_list_entry * 759 linux_pci_reserve_bar(struct pci_dev *pdev, struct resource_list *rl, 760 int type, int rid) 761 { 762 device_t dev; 763 struct resource *res; 764 765 KASSERT(type == SYS_RES_IOPORT || type == SYS_RES_MEMORY, 766 ("trying to reserve non-BAR type %d", type)); 767 768 dev = pdev->pdrv != NULL && pdev->pdrv->isdrm ? 769 device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev; 770 res = pci_reserve_map(device_get_parent(dev), dev, type, &rid, 0, ~0, 771 1, 1, 0); 772 if (res == NULL) 773 return (NULL); 774 return (resource_list_find(rl, type, rid)); 775 } 776 777 unsigned long 778 pci_resource_start(struct pci_dev *pdev, int bar) 779 { 780 struct resource_list_entry *rle; 781 rman_res_t newstart; 782 device_t dev; 783 int error; 784 785 if ((rle = linux_pci_get_bar(pdev, bar, true)) == NULL) 786 return (0); 787 dev = pdev->pdrv != NULL && pdev->pdrv->isdrm ? 788 device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev; 789 error = bus_translate_resource(dev, rle->type, rle->start, &newstart); 790 if (error != 0) { 791 device_printf(pdev->dev.bsddev, 792 "translate of %#jx failed: %d\n", 793 (uintmax_t)rle->start, error); 794 return (0); 795 } 796 return (newstart); 797 } 798 799 unsigned long 800 pci_resource_len(struct pci_dev *pdev, int bar) 801 { 802 struct resource_list_entry *rle; 803 804 if ((rle = linux_pci_get_bar(pdev, bar, true)) == NULL) 805 return (0); 806 return (rle->count); 807 } 808 809 int 810 pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) 811 { 812 struct resource *res; 813 struct pci_devres *dr; 814 struct pci_mmio_region *mmio; 815 int rid; 816 int type; 817 818 type = pci_resource_type(pdev, bar); 819 if (type < 0) 820 return (-ENODEV); 821 rid = PCIR_BAR(bar); 822 res = bus_alloc_resource_any(pdev->dev.bsddev, type, &rid, 823 RF_ACTIVE|RF_SHAREABLE); 824 if (res == NULL) { 825 device_printf(pdev->dev.bsddev, "%s: failed to alloc " 826 "bar %d type %d rid %d\n", 827 __func__, bar, type, PCIR_BAR(bar)); 828 return (-ENODEV); 829 } 830 831 /* 832 * It seems there is an implicit devres tracking on these if the device 833 * is managed; otherwise the resources are not automatiaclly freed on 834 * FreeBSD/LinuxKPI tough they should be/are expected to be by Linux 835 * drivers. 836 */ 837 dr = lkpi_pci_devres_find(pdev); 838 if (dr != NULL) { 839 dr->region_mask |= (1 << bar); 840 dr->region_table[bar] = res; 841 } 842 843 /* Even if the device is not managed we need to track it for iomap. */ 844 mmio = malloc(sizeof(*mmio), M_DEVBUF, M_WAITOK | M_ZERO); 845 mmio->rid = PCIR_BAR(bar); 846 mmio->type = type; 847 mmio->res = res; 848 TAILQ_INSERT_TAIL(&pdev->mmio, mmio, next); 849 850 return (0); 851 } 852 853 struct resource * 854 _lkpi_pci_iomap(struct pci_dev *pdev, int bar, int mmio_size __unused) 855 { 856 struct pci_mmio_region *mmio, *p; 857 int type; 858 859 type = pci_resource_type(pdev, bar); 860 if (type < 0) { 861 device_printf(pdev->dev.bsddev, "%s: bar %d type %d\n", 862 __func__, bar, type); 863 return (NULL); 864 } 865 866 /* 867 * Check for duplicate mappings. 868 * This can happen if a driver calls pci_request_region() first. 869 */ 870 TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) { 871 if (mmio->type == type && mmio->rid == PCIR_BAR(bar)) { 872 return (mmio->res); 873 } 874 } 875 876 mmio = malloc(sizeof(*mmio), M_DEVBUF, M_WAITOK | M_ZERO); 877 mmio->rid = PCIR_BAR(bar); 878 mmio->type = type; 879 mmio->res = bus_alloc_resource_any(pdev->dev.bsddev, mmio->type, 880 &mmio->rid, RF_ACTIVE|RF_SHAREABLE); 881 if (mmio->res == NULL) { 882 device_printf(pdev->dev.bsddev, "%s: failed to alloc " 883 "bar %d type %d rid %d\n", 884 __func__, bar, type, PCIR_BAR(bar)); 885 free(mmio, M_DEVBUF); 886 return (NULL); 887 } 888 TAILQ_INSERT_TAIL(&pdev->mmio, mmio, next); 889 890 return (mmio->res); 891 } 892 893 int 894 linux_pci_register_drm_driver(struct pci_driver *pdrv) 895 { 896 devclass_t dc; 897 898 dc = devclass_create("vgapci"); 899 if (dc == NULL) 900 return (-ENXIO); 901 pdrv->isdrm = true; 902 pdrv->name = "drmn"; 903 return (_linux_pci_register_driver(pdrv, dc)); 904 } 905 906 void 907 linux_pci_unregister_driver(struct pci_driver *pdrv) 908 { 909 devclass_t bus; 910 911 bus = devclass_find("pci"); 912 913 spin_lock(&pci_lock); 914 list_del(&pdrv->node); 915 spin_unlock(&pci_lock); 916 bus_topo_lock(); 917 if (bus != NULL) 918 devclass_delete_driver(bus, &pdrv->bsddriver); 919 bus_topo_unlock(); 920 } 921 922 void 923 linux_pci_unregister_drm_driver(struct pci_driver *pdrv) 924 { 925 devclass_t bus; 926 927 bus = devclass_find("vgapci"); 928 929 spin_lock(&pci_lock); 930 list_del(&pdrv->node); 931 spin_unlock(&pci_lock); 932 bus_topo_lock(); 933 if (bus != NULL) 934 devclass_delete_driver(bus, &pdrv->bsddriver); 935 bus_topo_unlock(); 936 } 937 938 int 939 pci_alloc_irq_vectors(struct pci_dev *pdev, int minv, int maxv, 940 unsigned int flags) 941 { 942 int error; 943 944 if (flags & PCI_IRQ_MSIX) { 945 struct msix_entry *entries; 946 int i; 947 948 entries = kcalloc(maxv, sizeof(*entries), GFP_KERNEL); 949 if (entries == NULL) { 950 error = -ENOMEM; 951 goto out; 952 } 953 for (i = 0; i < maxv; ++i) 954 entries[i].entry = i; 955 error = pci_enable_msix(pdev, entries, maxv); 956 out: 957 kfree(entries); 958 if (error == 0 && pdev->msix_enabled) 959 return (pdev->dev.irq_end - pdev->dev.irq_start); 960 } 961 if (flags & PCI_IRQ_MSI) { 962 if (pci_msi_count(pdev->dev.bsddev) < minv) 963 return (-ENOSPC); 964 error = _lkpi_pci_enable_msi_range(pdev, minv, maxv); 965 if (error == 0 && pdev->msi_enabled) 966 return (pdev->dev.irq_end - pdev->dev.irq_start); 967 } 968 if (flags & PCI_IRQ_LEGACY) { 969 if (pdev->irq) 970 return (1); 971 } 972 973 return (-EINVAL); 974 } 975 976 struct msi_desc * 977 lkpi_pci_msi_desc_alloc(int irq) 978 { 979 struct device *dev; 980 struct pci_dev *pdev; 981 struct msi_desc *desc; 982 struct pci_devinfo *dinfo; 983 struct pcicfg_msi *msi; 984 int vec; 985 986 dev = linux_pci_find_irq_dev(irq); 987 if (dev == NULL) 988 return (NULL); 989 990 pdev = to_pci_dev(dev); 991 992 if (pdev->msi_desc == NULL) 993 return (NULL); 994 995 if (irq < pdev->dev.irq_start || irq >= pdev->dev.irq_end) 996 return (NULL); 997 998 vec = pdev->dev.irq_start - irq; 999 1000 if (pdev->msi_desc[vec] != NULL) 1001 return (pdev->msi_desc[vec]); 1002 1003 dinfo = device_get_ivars(dev->bsddev); 1004 msi = &dinfo->cfg.msi; 1005 1006 desc = malloc(sizeof(*desc), M_DEVBUF, M_WAITOK | M_ZERO); 1007 1008 desc->msi_attrib.is_64 = 1009 (msi->msi_ctrl & PCIM_MSICTRL_64BIT) ? true : false; 1010 desc->msg.data = msi->msi_data; 1011 1012 pdev->msi_desc[vec] = desc; 1013 1014 return (desc); 1015 } 1016 1017 bool 1018 pci_device_is_present(struct pci_dev *pdev) 1019 { 1020 device_t dev; 1021 1022 dev = pdev->dev.bsddev; 1023 1024 return (bus_child_present(dev)); 1025 } 1026 1027 CTASSERT(sizeof(dma_addr_t) <= sizeof(uint64_t)); 1028 1029 struct linux_dma_obj { 1030 void *vaddr; 1031 uint64_t dma_addr; 1032 bus_dmamap_t dmamap; 1033 bus_dma_tag_t dmat; 1034 }; 1035 1036 static uma_zone_t linux_dma_trie_zone; 1037 static uma_zone_t linux_dma_obj_zone; 1038 1039 static void 1040 linux_dma_init(void *arg) 1041 { 1042 1043 linux_dma_trie_zone = uma_zcreate("linux_dma_pctrie", 1044 pctrie_node_size(), NULL, NULL, pctrie_zone_init, NULL, 1045 UMA_ALIGN_PTR, 0); 1046 linux_dma_obj_zone = uma_zcreate("linux_dma_object", 1047 sizeof(struct linux_dma_obj), NULL, NULL, NULL, NULL, 1048 UMA_ALIGN_PTR, 0); 1049 lkpi_pci_nseg1_fail = counter_u64_alloc(M_WAITOK); 1050 } 1051 SYSINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_init, NULL); 1052 1053 static void 1054 linux_dma_uninit(void *arg) 1055 { 1056 1057 counter_u64_free(lkpi_pci_nseg1_fail); 1058 uma_zdestroy(linux_dma_obj_zone); 1059 uma_zdestroy(linux_dma_trie_zone); 1060 } 1061 SYSUNINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_uninit, NULL); 1062 1063 static void * 1064 linux_dma_trie_alloc(struct pctrie *ptree) 1065 { 1066 1067 return (uma_zalloc(linux_dma_trie_zone, M_NOWAIT)); 1068 } 1069 1070 static void 1071 linux_dma_trie_free(struct pctrie *ptree, void *node) 1072 { 1073 1074 uma_zfree(linux_dma_trie_zone, node); 1075 } 1076 1077 PCTRIE_DEFINE(LINUX_DMA, linux_dma_obj, dma_addr, linux_dma_trie_alloc, 1078 linux_dma_trie_free); 1079 1080 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 1081 static dma_addr_t 1082 linux_dma_map_phys_common(struct device *dev, vm_paddr_t phys, size_t len, 1083 bus_dma_tag_t dmat) 1084 { 1085 struct linux_dma_priv *priv; 1086 struct linux_dma_obj *obj; 1087 int error, nseg; 1088 bus_dma_segment_t seg; 1089 1090 priv = dev->dma_priv; 1091 1092 /* 1093 * If the resultant mapping will be entirely 1:1 with the 1094 * physical address, short-circuit the remainder of the 1095 * bus_dma API. This avoids tracking collisions in the pctrie 1096 * with the additional benefit of reducing overhead. 1097 */ 1098 if (bus_dma_id_mapped(dmat, phys, len)) 1099 return (phys); 1100 1101 obj = uma_zalloc(linux_dma_obj_zone, M_NOWAIT); 1102 if (obj == NULL) { 1103 return (0); 1104 } 1105 obj->dmat = dmat; 1106 1107 DMA_PRIV_LOCK(priv); 1108 if (bus_dmamap_create(obj->dmat, 0, &obj->dmamap) != 0) { 1109 DMA_PRIV_UNLOCK(priv); 1110 uma_zfree(linux_dma_obj_zone, obj); 1111 return (0); 1112 } 1113 1114 nseg = -1; 1115 if (_bus_dmamap_load_phys(obj->dmat, obj->dmamap, phys, len, 1116 BUS_DMA_NOWAIT, &seg, &nseg) != 0) { 1117 bus_dmamap_destroy(obj->dmat, obj->dmamap); 1118 DMA_PRIV_UNLOCK(priv); 1119 uma_zfree(linux_dma_obj_zone, obj); 1120 counter_u64_add(lkpi_pci_nseg1_fail, 1); 1121 if (linuxkpi_debug) 1122 dump_stack(); 1123 return (0); 1124 } 1125 1126 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); 1127 obj->dma_addr = seg.ds_addr; 1128 1129 error = LINUX_DMA_PCTRIE_INSERT(&priv->ptree, obj); 1130 if (error != 0) { 1131 bus_dmamap_unload(obj->dmat, obj->dmamap); 1132 bus_dmamap_destroy(obj->dmat, obj->dmamap); 1133 DMA_PRIV_UNLOCK(priv); 1134 uma_zfree(linux_dma_obj_zone, obj); 1135 return (0); 1136 } 1137 DMA_PRIV_UNLOCK(priv); 1138 return (obj->dma_addr); 1139 } 1140 #else 1141 static dma_addr_t 1142 linux_dma_map_phys_common(struct device *dev __unused, vm_paddr_t phys, 1143 size_t len __unused, bus_dma_tag_t dmat __unused) 1144 { 1145 return (phys); 1146 } 1147 #endif 1148 1149 dma_addr_t 1150 linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len) 1151 { 1152 struct linux_dma_priv *priv; 1153 1154 priv = dev->dma_priv; 1155 return (linux_dma_map_phys_common(dev, phys, len, priv->dmat)); 1156 } 1157 1158 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 1159 void 1160 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len) 1161 { 1162 struct linux_dma_priv *priv; 1163 struct linux_dma_obj *obj; 1164 1165 priv = dev->dma_priv; 1166 1167 if (pctrie_is_empty(&priv->ptree)) 1168 return; 1169 1170 DMA_PRIV_LOCK(priv); 1171 obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr); 1172 if (obj == NULL) { 1173 DMA_PRIV_UNLOCK(priv); 1174 return; 1175 } 1176 LINUX_DMA_PCTRIE_REMOVE(&priv->ptree, dma_addr); 1177 bus_dmamap_unload(obj->dmat, obj->dmamap); 1178 bus_dmamap_destroy(obj->dmat, obj->dmamap); 1179 DMA_PRIV_UNLOCK(priv); 1180 1181 uma_zfree(linux_dma_obj_zone, obj); 1182 } 1183 #else 1184 void 1185 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len) 1186 { 1187 } 1188 #endif 1189 1190 void * 1191 linux_dma_alloc_coherent(struct device *dev, size_t size, 1192 dma_addr_t *dma_handle, gfp_t flag) 1193 { 1194 struct linux_dma_priv *priv; 1195 vm_paddr_t high; 1196 size_t align; 1197 void *mem; 1198 1199 if (dev == NULL || dev->dma_priv == NULL) { 1200 *dma_handle = 0; 1201 return (NULL); 1202 } 1203 priv = dev->dma_priv; 1204 if (priv->dma_coherent_mask) 1205 high = priv->dma_coherent_mask; 1206 else 1207 /* Coherent is lower 32bit only by default in Linux. */ 1208 high = BUS_SPACE_MAXADDR_32BIT; 1209 align = PAGE_SIZE << get_order(size); 1210 /* Always zero the allocation. */ 1211 flag |= M_ZERO; 1212 mem = kmem_alloc_contig(size, flag & GFP_NATIVE_MASK, 0, high, 1213 align, 0, VM_MEMATTR_DEFAULT); 1214 if (mem != NULL) { 1215 *dma_handle = linux_dma_map_phys_common(dev, vtophys(mem), size, 1216 priv->dmat_coherent); 1217 if (*dma_handle == 0) { 1218 kmem_free(mem, size); 1219 mem = NULL; 1220 } 1221 } else { 1222 *dma_handle = 0; 1223 } 1224 return (mem); 1225 } 1226 1227 struct lkpi_devres_dmam_coherent { 1228 size_t size; 1229 dma_addr_t *handle; 1230 void *mem; 1231 }; 1232 1233 static void 1234 lkpi_dmam_free_coherent(struct device *dev, void *p) 1235 { 1236 struct lkpi_devres_dmam_coherent *dr; 1237 1238 dr = p; 1239 dma_free_coherent(dev, dr->size, dr->mem, *dr->handle); 1240 } 1241 1242 void * 1243 linuxkpi_dmam_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 1244 gfp_t flag) 1245 { 1246 struct lkpi_devres_dmam_coherent *dr; 1247 1248 dr = lkpi_devres_alloc(lkpi_dmam_free_coherent, 1249 sizeof(*dr), GFP_KERNEL | __GFP_ZERO); 1250 1251 if (dr == NULL) 1252 return (NULL); 1253 1254 dr->size = size; 1255 dr->mem = linux_dma_alloc_coherent(dev, size, dma_handle, flag); 1256 dr->handle = dma_handle; 1257 if (dr->mem == NULL) { 1258 lkpi_devres_free(dr); 1259 return (NULL); 1260 } 1261 1262 lkpi_devres_add(dev, dr); 1263 return (dr->mem); 1264 } 1265 1266 void 1267 linuxkpi_dma_sync(struct device *dev, dma_addr_t dma_addr, size_t size, 1268 bus_dmasync_op_t op) 1269 { 1270 struct linux_dma_priv *priv; 1271 struct linux_dma_obj *obj; 1272 1273 priv = dev->dma_priv; 1274 1275 if (pctrie_is_empty(&priv->ptree)) 1276 return; 1277 1278 DMA_PRIV_LOCK(priv); 1279 obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr); 1280 if (obj == NULL) { 1281 DMA_PRIV_UNLOCK(priv); 1282 return; 1283 } 1284 1285 bus_dmamap_sync(obj->dmat, obj->dmamap, op); 1286 DMA_PRIV_UNLOCK(priv); 1287 } 1288 1289 int 1290 linux_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents, 1291 enum dma_data_direction direction, unsigned long attrs __unused) 1292 { 1293 struct linux_dma_priv *priv; 1294 struct scatterlist *sg; 1295 int i, nseg; 1296 bus_dma_segment_t seg; 1297 1298 priv = dev->dma_priv; 1299 1300 DMA_PRIV_LOCK(priv); 1301 1302 /* create common DMA map in the first S/G entry */ 1303 if (bus_dmamap_create(priv->dmat, 0, &sgl->dma_map) != 0) { 1304 DMA_PRIV_UNLOCK(priv); 1305 return (0); 1306 } 1307 1308 /* load all S/G list entries */ 1309 for_each_sg(sgl, sg, nents, i) { 1310 nseg = -1; 1311 if (_bus_dmamap_load_phys(priv->dmat, sgl->dma_map, 1312 sg_phys(sg), sg->length, BUS_DMA_NOWAIT, 1313 &seg, &nseg) != 0) { 1314 bus_dmamap_unload(priv->dmat, sgl->dma_map); 1315 bus_dmamap_destroy(priv->dmat, sgl->dma_map); 1316 DMA_PRIV_UNLOCK(priv); 1317 return (0); 1318 } 1319 KASSERT(nseg == 0, 1320 ("More than one segment (nseg=%d)", nseg + 1)); 1321 1322 sg_dma_address(sg) = seg.ds_addr; 1323 } 1324 1325 switch (direction) { 1326 case DMA_BIDIRECTIONAL: 1327 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREWRITE); 1328 break; 1329 case DMA_TO_DEVICE: 1330 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREREAD); 1331 break; 1332 case DMA_FROM_DEVICE: 1333 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREWRITE); 1334 break; 1335 default: 1336 break; 1337 } 1338 1339 DMA_PRIV_UNLOCK(priv); 1340 1341 return (nents); 1342 } 1343 1344 void 1345 linux_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, 1346 int nents __unused, enum dma_data_direction direction, 1347 unsigned long attrs __unused) 1348 { 1349 struct linux_dma_priv *priv; 1350 1351 priv = dev->dma_priv; 1352 1353 DMA_PRIV_LOCK(priv); 1354 1355 switch (direction) { 1356 case DMA_BIDIRECTIONAL: 1357 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTREAD); 1358 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREREAD); 1359 break; 1360 case DMA_TO_DEVICE: 1361 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTWRITE); 1362 break; 1363 case DMA_FROM_DEVICE: 1364 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTREAD); 1365 break; 1366 default: 1367 break; 1368 } 1369 1370 bus_dmamap_unload(priv->dmat, sgl->dma_map); 1371 bus_dmamap_destroy(priv->dmat, sgl->dma_map); 1372 DMA_PRIV_UNLOCK(priv); 1373 } 1374 1375 struct dma_pool { 1376 struct device *pool_device; 1377 uma_zone_t pool_zone; 1378 struct mtx pool_lock; 1379 bus_dma_tag_t pool_dmat; 1380 size_t pool_entry_size; 1381 struct pctrie pool_ptree; 1382 }; 1383 1384 #define DMA_POOL_LOCK(pool) mtx_lock(&(pool)->pool_lock) 1385 #define DMA_POOL_UNLOCK(pool) mtx_unlock(&(pool)->pool_lock) 1386 1387 static inline int 1388 dma_pool_obj_ctor(void *mem, int size, void *arg, int flags) 1389 { 1390 struct linux_dma_obj *obj = mem; 1391 struct dma_pool *pool = arg; 1392 int error, nseg; 1393 bus_dma_segment_t seg; 1394 1395 nseg = -1; 1396 DMA_POOL_LOCK(pool); 1397 error = _bus_dmamap_load_phys(pool->pool_dmat, obj->dmamap, 1398 vtophys(obj->vaddr), pool->pool_entry_size, BUS_DMA_NOWAIT, 1399 &seg, &nseg); 1400 DMA_POOL_UNLOCK(pool); 1401 if (error != 0) { 1402 return (error); 1403 } 1404 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); 1405 obj->dma_addr = seg.ds_addr; 1406 1407 return (0); 1408 } 1409 1410 static void 1411 dma_pool_obj_dtor(void *mem, int size, void *arg) 1412 { 1413 struct linux_dma_obj *obj = mem; 1414 struct dma_pool *pool = arg; 1415 1416 DMA_POOL_LOCK(pool); 1417 bus_dmamap_unload(pool->pool_dmat, obj->dmamap); 1418 DMA_POOL_UNLOCK(pool); 1419 } 1420 1421 static int 1422 dma_pool_obj_import(void *arg, void **store, int count, int domain __unused, 1423 int flags) 1424 { 1425 struct dma_pool *pool = arg; 1426 struct linux_dma_obj *obj; 1427 int error, i; 1428 1429 for (i = 0; i < count; i++) { 1430 obj = uma_zalloc(linux_dma_obj_zone, flags); 1431 if (obj == NULL) 1432 break; 1433 1434 error = bus_dmamem_alloc(pool->pool_dmat, &obj->vaddr, 1435 BUS_DMA_NOWAIT, &obj->dmamap); 1436 if (error!= 0) { 1437 uma_zfree(linux_dma_obj_zone, obj); 1438 break; 1439 } 1440 1441 store[i] = obj; 1442 } 1443 1444 return (i); 1445 } 1446 1447 static void 1448 dma_pool_obj_release(void *arg, void **store, int count) 1449 { 1450 struct dma_pool *pool = arg; 1451 struct linux_dma_obj *obj; 1452 int i; 1453 1454 for (i = 0; i < count; i++) { 1455 obj = store[i]; 1456 bus_dmamem_free(pool->pool_dmat, obj->vaddr, obj->dmamap); 1457 uma_zfree(linux_dma_obj_zone, obj); 1458 } 1459 } 1460 1461 struct dma_pool * 1462 linux_dma_pool_create(char *name, struct device *dev, size_t size, 1463 size_t align, size_t boundary) 1464 { 1465 struct linux_dma_priv *priv; 1466 struct dma_pool *pool; 1467 1468 priv = dev->dma_priv; 1469 1470 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 1471 pool->pool_device = dev; 1472 pool->pool_entry_size = size; 1473 1474 if (bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 1475 align, boundary, /* alignment, boundary */ 1476 priv->dma_mask, /* lowaddr */ 1477 BUS_SPACE_MAXADDR, /* highaddr */ 1478 NULL, NULL, /* filtfunc, filtfuncarg */ 1479 size, /* maxsize */ 1480 1, /* nsegments */ 1481 size, /* maxsegsz */ 1482 0, /* flags */ 1483 NULL, NULL, /* lockfunc, lockfuncarg */ 1484 &pool->pool_dmat)) { 1485 kfree(pool); 1486 return (NULL); 1487 } 1488 1489 pool->pool_zone = uma_zcache_create(name, -1, dma_pool_obj_ctor, 1490 dma_pool_obj_dtor, NULL, NULL, dma_pool_obj_import, 1491 dma_pool_obj_release, pool, 0); 1492 1493 mtx_init(&pool->pool_lock, "lkpi-dma-pool", NULL, MTX_DEF); 1494 pctrie_init(&pool->pool_ptree); 1495 1496 return (pool); 1497 } 1498 1499 void 1500 linux_dma_pool_destroy(struct dma_pool *pool) 1501 { 1502 1503 uma_zdestroy(pool->pool_zone); 1504 bus_dma_tag_destroy(pool->pool_dmat); 1505 mtx_destroy(&pool->pool_lock); 1506 kfree(pool); 1507 } 1508 1509 void 1510 lkpi_dmam_pool_destroy(struct device *dev, void *p) 1511 { 1512 struct dma_pool *pool; 1513 1514 pool = *(struct dma_pool **)p; 1515 LINUX_DMA_PCTRIE_RECLAIM(&pool->pool_ptree); 1516 linux_dma_pool_destroy(pool); 1517 } 1518 1519 void * 1520 linux_dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, 1521 dma_addr_t *handle) 1522 { 1523 struct linux_dma_obj *obj; 1524 1525 obj = uma_zalloc_arg(pool->pool_zone, pool, mem_flags & GFP_NATIVE_MASK); 1526 if (obj == NULL) 1527 return (NULL); 1528 1529 DMA_POOL_LOCK(pool); 1530 if (LINUX_DMA_PCTRIE_INSERT(&pool->pool_ptree, obj) != 0) { 1531 DMA_POOL_UNLOCK(pool); 1532 uma_zfree_arg(pool->pool_zone, obj, pool); 1533 return (NULL); 1534 } 1535 DMA_POOL_UNLOCK(pool); 1536 1537 *handle = obj->dma_addr; 1538 return (obj->vaddr); 1539 } 1540 1541 void 1542 linux_dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma_addr) 1543 { 1544 struct linux_dma_obj *obj; 1545 1546 DMA_POOL_LOCK(pool); 1547 obj = LINUX_DMA_PCTRIE_LOOKUP(&pool->pool_ptree, dma_addr); 1548 if (obj == NULL) { 1549 DMA_POOL_UNLOCK(pool); 1550 return; 1551 } 1552 LINUX_DMA_PCTRIE_REMOVE(&pool->pool_ptree, dma_addr); 1553 DMA_POOL_UNLOCK(pool); 1554 1555 uma_zfree_arg(pool->pool_zone, obj, pool); 1556 } 1557 1558 static int 1559 linux_backlight_get_status(device_t dev, struct backlight_props *props) 1560 { 1561 struct pci_dev *pdev; 1562 1563 linux_set_current(curthread); 1564 pdev = device_get_softc(dev); 1565 1566 props->brightness = pdev->dev.bd->props.brightness; 1567 props->brightness = props->brightness * 100 / pdev->dev.bd->props.max_brightness; 1568 props->nlevels = 0; 1569 1570 return (0); 1571 } 1572 1573 static int 1574 linux_backlight_get_info(device_t dev, struct backlight_info *info) 1575 { 1576 struct pci_dev *pdev; 1577 1578 linux_set_current(curthread); 1579 pdev = device_get_softc(dev); 1580 1581 info->type = BACKLIGHT_TYPE_PANEL; 1582 strlcpy(info->name, pdev->dev.bd->name, BACKLIGHTMAXNAMELENGTH); 1583 return (0); 1584 } 1585 1586 static int 1587 linux_backlight_update_status(device_t dev, struct backlight_props *props) 1588 { 1589 struct pci_dev *pdev; 1590 1591 linux_set_current(curthread); 1592 pdev = device_get_softc(dev); 1593 1594 pdev->dev.bd->props.brightness = pdev->dev.bd->props.max_brightness * 1595 props->brightness / 100; 1596 pdev->dev.bd->props.power = props->brightness == 0 ? 1597 4/* FB_BLANK_POWERDOWN */ : 0/* FB_BLANK_UNBLANK */; 1598 return (pdev->dev.bd->ops->update_status(pdev->dev.bd)); 1599 } 1600 1601 struct backlight_device * 1602 linux_backlight_device_register(const char *name, struct device *dev, 1603 void *data, const struct backlight_ops *ops, struct backlight_properties *props) 1604 { 1605 1606 dev->bd = malloc(sizeof(*dev->bd), M_DEVBUF, M_WAITOK | M_ZERO); 1607 dev->bd->ops = ops; 1608 dev->bd->props.type = props->type; 1609 dev->bd->props.max_brightness = props->max_brightness; 1610 dev->bd->props.brightness = props->brightness; 1611 dev->bd->props.power = props->power; 1612 dev->bd->data = data; 1613 dev->bd->dev = dev; 1614 dev->bd->name = strdup(name, M_DEVBUF); 1615 1616 dev->backlight_dev = backlight_register(name, dev->bsddev); 1617 1618 return (dev->bd); 1619 } 1620 1621 void 1622 linux_backlight_device_unregister(struct backlight_device *bd) 1623 { 1624 1625 backlight_destroy(bd->dev->backlight_dev); 1626 free(bd->name, M_DEVBUF); 1627 free(bd, M_DEVBUF); 1628 } 1629