1 /*- 2 * Copyright (c) 2015-2016 Mellanox Technologies, Ltd. 3 * All rights reserved. 4 * Copyright (c) 2020-2022 The FreeBSD Foundation 5 * 6 * Portions of this software were developed by Björn Zeeb 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice unmodified, this list of conditions, and the following 14 * disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/bus.h> 37 #include <sys/malloc.h> 38 #include <sys/kernel.h> 39 #include <sys/sysctl.h> 40 #include <sys/lock.h> 41 #include <sys/mutex.h> 42 #include <sys/fcntl.h> 43 #include <sys/file.h> 44 #include <sys/filio.h> 45 #include <sys/pciio.h> 46 #include <sys/pctrie.h> 47 #include <sys/rwlock.h> 48 49 #include <vm/vm.h> 50 #include <vm/pmap.h> 51 52 #include <machine/stdarg.h> 53 54 #include <dev/pci/pcivar.h> 55 #include <dev/pci/pci_private.h> 56 #include <dev/pci/pci_iov.h> 57 #include <dev/backlight/backlight.h> 58 59 #include <linux/kernel.h> 60 #include <linux/kobject.h> 61 #include <linux/device.h> 62 #include <linux/slab.h> 63 #include <linux/module.h> 64 #include <linux/cdev.h> 65 #include <linux/file.h> 66 #include <linux/sysfs.h> 67 #include <linux/mm.h> 68 #include <linux/io.h> 69 #include <linux/vmalloc.h> 70 #include <linux/pci.h> 71 #include <linux/compat.h> 72 73 #include <linux/backlight.h> 74 75 #include "backlight_if.h" 76 #include "pcib_if.h" 77 78 /* Undef the linux function macro defined in linux/pci.h */ 79 #undef pci_get_class 80 81 extern int linuxkpi_debug; 82 83 SYSCTL_DECL(_compat_linuxkpi); 84 85 static counter_u64_t lkpi_pci_nseg1_fail; 86 SYSCTL_COUNTER_U64(_compat_linuxkpi, OID_AUTO, lkpi_pci_nseg1_fail, CTLFLAG_RD, 87 &lkpi_pci_nseg1_fail, "Count of busdma mapping failures of single-segment"); 88 89 static device_probe_t linux_pci_probe; 90 static device_attach_t linux_pci_attach; 91 static device_detach_t linux_pci_detach; 92 static device_suspend_t linux_pci_suspend; 93 static device_resume_t linux_pci_resume; 94 static device_shutdown_t linux_pci_shutdown; 95 static pci_iov_init_t linux_pci_iov_init; 96 static pci_iov_uninit_t linux_pci_iov_uninit; 97 static pci_iov_add_vf_t linux_pci_iov_add_vf; 98 static int linux_backlight_get_status(device_t dev, struct backlight_props *props); 99 static int linux_backlight_update_status(device_t dev, struct backlight_props *props); 100 static int linux_backlight_get_info(device_t dev, struct backlight_info *info); 101 102 static device_method_t pci_methods[] = { 103 DEVMETHOD(device_probe, linux_pci_probe), 104 DEVMETHOD(device_attach, linux_pci_attach), 105 DEVMETHOD(device_detach, linux_pci_detach), 106 DEVMETHOD(device_suspend, linux_pci_suspend), 107 DEVMETHOD(device_resume, linux_pci_resume), 108 DEVMETHOD(device_shutdown, linux_pci_shutdown), 109 DEVMETHOD(pci_iov_init, linux_pci_iov_init), 110 DEVMETHOD(pci_iov_uninit, linux_pci_iov_uninit), 111 DEVMETHOD(pci_iov_add_vf, linux_pci_iov_add_vf), 112 113 /* backlight interface */ 114 DEVMETHOD(backlight_update_status, linux_backlight_update_status), 115 DEVMETHOD(backlight_get_status, linux_backlight_get_status), 116 DEVMETHOD(backlight_get_info, linux_backlight_get_info), 117 DEVMETHOD_END 118 }; 119 120 const char *pci_power_names[] = { 121 "UNKNOWN", "D0", "D1", "D2", "D3hot", "D3cold" 122 }; 123 124 struct linux_dma_priv { 125 uint64_t dma_mask; 126 bus_dma_tag_t dmat; 127 uint64_t dma_coherent_mask; 128 bus_dma_tag_t dmat_coherent; 129 struct mtx lock; 130 struct pctrie ptree; 131 }; 132 #define DMA_PRIV_LOCK(priv) mtx_lock(&(priv)->lock) 133 #define DMA_PRIV_UNLOCK(priv) mtx_unlock(&(priv)->lock) 134 135 static int 136 linux_pdev_dma_uninit(struct pci_dev *pdev) 137 { 138 struct linux_dma_priv *priv; 139 140 priv = pdev->dev.dma_priv; 141 if (priv->dmat) 142 bus_dma_tag_destroy(priv->dmat); 143 if (priv->dmat_coherent) 144 bus_dma_tag_destroy(priv->dmat_coherent); 145 mtx_destroy(&priv->lock); 146 pdev->dev.dma_priv = NULL; 147 free(priv, M_DEVBUF); 148 return (0); 149 } 150 151 static int 152 linux_pdev_dma_init(struct pci_dev *pdev) 153 { 154 struct linux_dma_priv *priv; 155 int error; 156 157 priv = malloc(sizeof(*priv), M_DEVBUF, M_WAITOK | M_ZERO); 158 159 mtx_init(&priv->lock, "lkpi-priv-dma", NULL, MTX_DEF); 160 pctrie_init(&priv->ptree); 161 162 pdev->dev.dma_priv = priv; 163 164 /* Create a default DMA tags. */ 165 error = linux_dma_tag_init(&pdev->dev, DMA_BIT_MASK(64)); 166 if (error != 0) 167 goto err; 168 /* Coherent is lower 32bit only by default in Linux. */ 169 error = linux_dma_tag_init_coherent(&pdev->dev, DMA_BIT_MASK(32)); 170 if (error != 0) 171 goto err; 172 173 return (error); 174 175 err: 176 linux_pdev_dma_uninit(pdev); 177 return (error); 178 } 179 180 int 181 linux_dma_tag_init(struct device *dev, u64 dma_mask) 182 { 183 struct linux_dma_priv *priv; 184 int error; 185 186 priv = dev->dma_priv; 187 188 if (priv->dmat) { 189 if (priv->dma_mask == dma_mask) 190 return (0); 191 192 bus_dma_tag_destroy(priv->dmat); 193 } 194 195 priv->dma_mask = dma_mask; 196 197 error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 198 1, 0, /* alignment, boundary */ 199 dma_mask, /* lowaddr */ 200 BUS_SPACE_MAXADDR, /* highaddr */ 201 NULL, NULL, /* filtfunc, filtfuncarg */ 202 BUS_SPACE_MAXSIZE, /* maxsize */ 203 1, /* nsegments */ 204 BUS_SPACE_MAXSIZE, /* maxsegsz */ 205 0, /* flags */ 206 NULL, NULL, /* lockfunc, lockfuncarg */ 207 &priv->dmat); 208 return (-error); 209 } 210 211 int 212 linux_dma_tag_init_coherent(struct device *dev, u64 dma_mask) 213 { 214 struct linux_dma_priv *priv; 215 int error; 216 217 priv = dev->dma_priv; 218 219 if (priv->dmat_coherent) { 220 if (priv->dma_coherent_mask == dma_mask) 221 return (0); 222 223 bus_dma_tag_destroy(priv->dmat_coherent); 224 } 225 226 priv->dma_coherent_mask = dma_mask; 227 228 error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 229 1, 0, /* alignment, boundary */ 230 dma_mask, /* lowaddr */ 231 BUS_SPACE_MAXADDR, /* highaddr */ 232 NULL, NULL, /* filtfunc, filtfuncarg */ 233 BUS_SPACE_MAXSIZE, /* maxsize */ 234 1, /* nsegments */ 235 BUS_SPACE_MAXSIZE, /* maxsegsz */ 236 0, /* flags */ 237 NULL, NULL, /* lockfunc, lockfuncarg */ 238 &priv->dmat_coherent); 239 return (-error); 240 } 241 242 static struct pci_driver * 243 linux_pci_find(device_t dev, const struct pci_device_id **idp) 244 { 245 const struct pci_device_id *id; 246 struct pci_driver *pdrv; 247 uint16_t vendor; 248 uint16_t device; 249 uint16_t subvendor; 250 uint16_t subdevice; 251 252 vendor = pci_get_vendor(dev); 253 device = pci_get_device(dev); 254 subvendor = pci_get_subvendor(dev); 255 subdevice = pci_get_subdevice(dev); 256 257 spin_lock(&pci_lock); 258 list_for_each_entry(pdrv, &pci_drivers, node) { 259 for (id = pdrv->id_table; id->vendor != 0; id++) { 260 if (vendor == id->vendor && 261 (PCI_ANY_ID == id->device || device == id->device) && 262 (PCI_ANY_ID == id->subvendor || subvendor == id->subvendor) && 263 (PCI_ANY_ID == id->subdevice || subdevice == id->subdevice)) { 264 *idp = id; 265 spin_unlock(&pci_lock); 266 return (pdrv); 267 } 268 } 269 } 270 spin_unlock(&pci_lock); 271 return (NULL); 272 } 273 274 struct pci_dev * 275 lkpi_pci_get_device(uint16_t vendor, uint16_t device, struct pci_dev *odev) 276 { 277 struct pci_dev *pdev; 278 279 KASSERT(odev == NULL, ("%s: odev argument not yet supported\n", __func__)); 280 281 spin_lock(&pci_lock); 282 list_for_each_entry(pdev, &pci_devices, links) { 283 if (pdev->vendor == vendor && pdev->device == device) 284 break; 285 } 286 spin_unlock(&pci_lock); 287 288 return (pdev); 289 } 290 291 static void 292 lkpi_pci_dev_release(struct device *dev) 293 { 294 295 lkpi_devres_release_free_list(dev); 296 spin_lock_destroy(&dev->devres_lock); 297 } 298 299 static void 300 lkpifill_pci_dev(device_t dev, struct pci_dev *pdev) 301 { 302 303 pdev->devfn = PCI_DEVFN(pci_get_slot(dev), pci_get_function(dev)); 304 pdev->vendor = pci_get_vendor(dev); 305 pdev->device = pci_get_device(dev); 306 pdev->subsystem_vendor = pci_get_subvendor(dev); 307 pdev->subsystem_device = pci_get_subdevice(dev); 308 pdev->class = pci_get_class(dev); 309 pdev->revision = pci_get_revid(dev); 310 pdev->path_name = kasprintf(GFP_KERNEL, "%04d:%02d:%02d.%d", 311 pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev), 312 pci_get_function(dev)); 313 pdev->bus = malloc(sizeof(*pdev->bus), M_DEVBUF, M_WAITOK | M_ZERO); 314 /* 315 * This should be the upstream bridge; pci_upstream_bridge() 316 * handles that case on demand as otherwise we'll shadow the 317 * entire PCI hierarchy. 318 */ 319 pdev->bus->self = pdev; 320 pdev->bus->number = pci_get_bus(dev); 321 pdev->bus->domain = pci_get_domain(dev); 322 pdev->dev.bsddev = dev; 323 pdev->dev.parent = &linux_root_device; 324 pdev->dev.release = lkpi_pci_dev_release; 325 INIT_LIST_HEAD(&pdev->dev.irqents); 326 327 if (pci_msi_count(dev) > 0) 328 pdev->msi_desc = malloc(pci_msi_count(dev) * 329 sizeof(*pdev->msi_desc), M_DEVBUF, M_WAITOK | M_ZERO); 330 331 kobject_init(&pdev->dev.kobj, &linux_dev_ktype); 332 kobject_set_name(&pdev->dev.kobj, device_get_nameunit(dev)); 333 kobject_add(&pdev->dev.kobj, &linux_root_device.kobj, 334 kobject_name(&pdev->dev.kobj)); 335 spin_lock_init(&pdev->dev.devres_lock); 336 INIT_LIST_HEAD(&pdev->dev.devres_head); 337 } 338 339 static void 340 lkpinew_pci_dev_release(struct device *dev) 341 { 342 struct pci_dev *pdev; 343 int i; 344 345 pdev = to_pci_dev(dev); 346 if (pdev->root != NULL) 347 pci_dev_put(pdev->root); 348 if (pdev->bus->self != pdev) 349 pci_dev_put(pdev->bus->self); 350 free(pdev->bus, M_DEVBUF); 351 if (pdev->msi_desc != NULL) { 352 for (i = pci_msi_count(pdev->dev.bsddev) - 1; i >= 0; i--) 353 free(pdev->msi_desc[i], M_DEVBUF); 354 free(pdev->msi_desc, M_DEVBUF); 355 } 356 kfree(pdev->path_name); 357 free(pdev, M_DEVBUF); 358 } 359 360 struct pci_dev * 361 lkpinew_pci_dev(device_t dev) 362 { 363 struct pci_dev *pdev; 364 365 pdev = malloc(sizeof(*pdev), M_DEVBUF, M_WAITOK|M_ZERO); 366 lkpifill_pci_dev(dev, pdev); 367 pdev->dev.release = lkpinew_pci_dev_release; 368 369 return (pdev); 370 } 371 372 struct pci_dev * 373 lkpi_pci_get_class(unsigned int class, struct pci_dev *from) 374 { 375 device_t dev; 376 device_t devfrom = NULL; 377 struct pci_dev *pdev; 378 379 if (from != NULL) 380 devfrom = from->dev.bsddev; 381 382 dev = pci_find_class_from(class >> 16, (class >> 8) & 0xFF, devfrom); 383 if (dev == NULL) 384 return (NULL); 385 386 pdev = lkpinew_pci_dev(dev); 387 return (pdev); 388 } 389 390 struct pci_dev * 391 lkpi_pci_get_domain_bus_and_slot(int domain, unsigned int bus, 392 unsigned int devfn) 393 { 394 device_t dev; 395 struct pci_dev *pdev; 396 397 dev = pci_find_dbsf(domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); 398 if (dev == NULL) 399 return (NULL); 400 401 pdev = lkpinew_pci_dev(dev); 402 return (pdev); 403 } 404 405 static int 406 linux_pci_probe(device_t dev) 407 { 408 const struct pci_device_id *id; 409 struct pci_driver *pdrv; 410 411 if ((pdrv = linux_pci_find(dev, &id)) == NULL) 412 return (ENXIO); 413 if (device_get_driver(dev) != &pdrv->bsddriver) 414 return (ENXIO); 415 device_set_desc(dev, pdrv->name); 416 417 /* Assume BSS initialized (should never return BUS_PROBE_SPECIFIC). */ 418 if (pdrv->bsd_probe_return == 0) 419 return (BUS_PROBE_DEFAULT); 420 else 421 return (pdrv->bsd_probe_return); 422 } 423 424 static int 425 linux_pci_attach(device_t dev) 426 { 427 const struct pci_device_id *id; 428 struct pci_driver *pdrv; 429 struct pci_dev *pdev; 430 431 pdrv = linux_pci_find(dev, &id); 432 pdev = device_get_softc(dev); 433 434 MPASS(pdrv != NULL); 435 MPASS(pdev != NULL); 436 437 return (linux_pci_attach_device(dev, pdrv, id, pdev)); 438 } 439 440 int 441 linux_pci_attach_device(device_t dev, struct pci_driver *pdrv, 442 const struct pci_device_id *id, struct pci_dev *pdev) 443 { 444 struct resource_list_entry *rle; 445 device_t parent; 446 uintptr_t rid; 447 int error; 448 bool isdrm; 449 450 linux_set_current(curthread); 451 452 parent = device_get_parent(dev); 453 isdrm = pdrv != NULL && pdrv->isdrm; 454 455 if (isdrm) { 456 struct pci_devinfo *dinfo; 457 458 dinfo = device_get_ivars(parent); 459 device_set_ivars(dev, dinfo); 460 } 461 462 lkpifill_pci_dev(dev, pdev); 463 if (isdrm) 464 PCI_GET_ID(device_get_parent(parent), parent, PCI_ID_RID, &rid); 465 else 466 PCI_GET_ID(parent, dev, PCI_ID_RID, &rid); 467 pdev->devfn = rid; 468 pdev->pdrv = pdrv; 469 rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 0, false); 470 if (rle != NULL) 471 pdev->dev.irq = rle->start; 472 else 473 pdev->dev.irq = LINUX_IRQ_INVALID; 474 pdev->irq = pdev->dev.irq; 475 error = linux_pdev_dma_init(pdev); 476 if (error) 477 goto out_dma_init; 478 479 TAILQ_INIT(&pdev->mmio); 480 481 spin_lock(&pci_lock); 482 list_add(&pdev->links, &pci_devices); 483 spin_unlock(&pci_lock); 484 485 if (pdrv != NULL) { 486 error = pdrv->probe(pdev, id); 487 if (error) 488 goto out_probe; 489 } 490 return (0); 491 492 out_probe: 493 free(pdev->bus, M_DEVBUF); 494 linux_pdev_dma_uninit(pdev); 495 out_dma_init: 496 spin_lock(&pci_lock); 497 list_del(&pdev->links); 498 spin_unlock(&pci_lock); 499 put_device(&pdev->dev); 500 return (-error); 501 } 502 503 static int 504 linux_pci_detach(device_t dev) 505 { 506 struct pci_dev *pdev; 507 508 pdev = device_get_softc(dev); 509 510 MPASS(pdev != NULL); 511 512 device_set_desc(dev, NULL); 513 514 return (linux_pci_detach_device(pdev)); 515 } 516 517 int 518 linux_pci_detach_device(struct pci_dev *pdev) 519 { 520 521 linux_set_current(curthread); 522 523 if (pdev->pdrv != NULL) 524 pdev->pdrv->remove(pdev); 525 526 if (pdev->root != NULL) 527 pci_dev_put(pdev->root); 528 free(pdev->bus, M_DEVBUF); 529 linux_pdev_dma_uninit(pdev); 530 531 spin_lock(&pci_lock); 532 list_del(&pdev->links); 533 spin_unlock(&pci_lock); 534 put_device(&pdev->dev); 535 536 return (0); 537 } 538 539 static int 540 lkpi_pci_disable_dev(struct device *dev) 541 { 542 543 (void) pci_disable_io(dev->bsddev, SYS_RES_MEMORY); 544 (void) pci_disable_io(dev->bsddev, SYS_RES_IOPORT); 545 return (0); 546 } 547 548 struct pci_devres * 549 lkpi_pci_devres_get_alloc(struct pci_dev *pdev) 550 { 551 struct pci_devres *dr; 552 553 dr = lkpi_devres_find(&pdev->dev, lkpi_pci_devres_release, NULL, NULL); 554 if (dr == NULL) { 555 dr = lkpi_devres_alloc(lkpi_pci_devres_release, sizeof(*dr), 556 GFP_KERNEL | __GFP_ZERO); 557 if (dr != NULL) 558 lkpi_devres_add(&pdev->dev, dr); 559 } 560 561 return (dr); 562 } 563 564 void 565 lkpi_pci_devres_release(struct device *dev, void *p) 566 { 567 struct pci_devres *dr; 568 struct pci_dev *pdev; 569 int bar; 570 571 pdev = to_pci_dev(dev); 572 dr = p; 573 574 if (pdev->msix_enabled) 575 lkpi_pci_disable_msix(pdev); 576 if (pdev->msi_enabled) 577 lkpi_pci_disable_msi(pdev); 578 579 if (dr->enable_io && lkpi_pci_disable_dev(dev) == 0) 580 dr->enable_io = false; 581 582 if (dr->region_mask == 0) 583 return; 584 for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) { 585 586 if ((dr->region_mask & (1 << bar)) == 0) 587 continue; 588 pci_release_region(pdev, bar); 589 } 590 } 591 592 struct pcim_iomap_devres * 593 lkpi_pcim_iomap_devres_find(struct pci_dev *pdev) 594 { 595 struct pcim_iomap_devres *dr; 596 597 dr = lkpi_devres_find(&pdev->dev, lkpi_pcim_iomap_table_release, 598 NULL, NULL); 599 if (dr == NULL) { 600 dr = lkpi_devres_alloc(lkpi_pcim_iomap_table_release, 601 sizeof(*dr), GFP_KERNEL | __GFP_ZERO); 602 if (dr != NULL) 603 lkpi_devres_add(&pdev->dev, dr); 604 } 605 606 if (dr == NULL) 607 device_printf(pdev->dev.bsddev, "%s: NULL\n", __func__); 608 609 return (dr); 610 } 611 612 void 613 lkpi_pcim_iomap_table_release(struct device *dev, void *p) 614 { 615 struct pcim_iomap_devres *dr; 616 struct pci_dev *pdev; 617 int bar; 618 619 dr = p; 620 pdev = to_pci_dev(dev); 621 for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) { 622 623 if (dr->mmio_table[bar] == NULL) 624 continue; 625 626 pci_iounmap(pdev, dr->mmio_table[bar]); 627 } 628 } 629 630 static int 631 linux_pci_suspend(device_t dev) 632 { 633 const struct dev_pm_ops *pmops; 634 struct pm_message pm = { }; 635 struct pci_dev *pdev; 636 int error; 637 638 error = 0; 639 linux_set_current(curthread); 640 pdev = device_get_softc(dev); 641 pmops = pdev->pdrv->driver.pm; 642 643 if (pdev->pdrv->suspend != NULL) 644 error = -pdev->pdrv->suspend(pdev, pm); 645 else if (pmops != NULL && pmops->suspend != NULL) { 646 error = -pmops->suspend(&pdev->dev); 647 if (error == 0 && pmops->suspend_late != NULL) 648 error = -pmops->suspend_late(&pdev->dev); 649 } 650 return (error); 651 } 652 653 static int 654 linux_pci_resume(device_t dev) 655 { 656 const struct dev_pm_ops *pmops; 657 struct pci_dev *pdev; 658 int error; 659 660 error = 0; 661 linux_set_current(curthread); 662 pdev = device_get_softc(dev); 663 pmops = pdev->pdrv->driver.pm; 664 665 if (pdev->pdrv->resume != NULL) 666 error = -pdev->pdrv->resume(pdev); 667 else if (pmops != NULL && pmops->resume != NULL) { 668 if (pmops->resume_early != NULL) 669 error = -pmops->resume_early(&pdev->dev); 670 if (error == 0 && pmops->resume != NULL) 671 error = -pmops->resume(&pdev->dev); 672 } 673 return (error); 674 } 675 676 static int 677 linux_pci_shutdown(device_t dev) 678 { 679 struct pci_dev *pdev; 680 681 linux_set_current(curthread); 682 pdev = device_get_softc(dev); 683 if (pdev->pdrv->shutdown != NULL) 684 pdev->pdrv->shutdown(pdev); 685 return (0); 686 } 687 688 static int 689 linux_pci_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *pf_config) 690 { 691 struct pci_dev *pdev; 692 int error; 693 694 linux_set_current(curthread); 695 pdev = device_get_softc(dev); 696 if (pdev->pdrv->bsd_iov_init != NULL) 697 error = pdev->pdrv->bsd_iov_init(dev, num_vfs, pf_config); 698 else 699 error = EINVAL; 700 return (error); 701 } 702 703 static void 704 linux_pci_iov_uninit(device_t dev) 705 { 706 struct pci_dev *pdev; 707 708 linux_set_current(curthread); 709 pdev = device_get_softc(dev); 710 if (pdev->pdrv->bsd_iov_uninit != NULL) 711 pdev->pdrv->bsd_iov_uninit(dev); 712 } 713 714 static int 715 linux_pci_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *vf_config) 716 { 717 struct pci_dev *pdev; 718 int error; 719 720 linux_set_current(curthread); 721 pdev = device_get_softc(dev); 722 if (pdev->pdrv->bsd_iov_add_vf != NULL) 723 error = pdev->pdrv->bsd_iov_add_vf(dev, vfnum, vf_config); 724 else 725 error = EINVAL; 726 return (error); 727 } 728 729 static int 730 _linux_pci_register_driver(struct pci_driver *pdrv, devclass_t dc) 731 { 732 int error; 733 734 linux_set_current(curthread); 735 spin_lock(&pci_lock); 736 list_add(&pdrv->node, &pci_drivers); 737 spin_unlock(&pci_lock); 738 if (pdrv->bsddriver.name == NULL) 739 pdrv->bsddriver.name = pdrv->name; 740 pdrv->bsddriver.methods = pci_methods; 741 pdrv->bsddriver.size = sizeof(struct pci_dev); 742 743 bus_topo_lock(); 744 error = devclass_add_driver(dc, &pdrv->bsddriver, 745 BUS_PASS_DEFAULT, &pdrv->bsdclass); 746 bus_topo_unlock(); 747 return (-error); 748 } 749 750 int 751 linux_pci_register_driver(struct pci_driver *pdrv) 752 { 753 devclass_t dc; 754 755 dc = devclass_find("pci"); 756 if (dc == NULL) 757 return (-ENXIO); 758 pdrv->isdrm = false; 759 return (_linux_pci_register_driver(pdrv, dc)); 760 } 761 762 struct resource_list_entry * 763 linux_pci_reserve_bar(struct pci_dev *pdev, struct resource_list *rl, 764 int type, int rid) 765 { 766 device_t dev; 767 struct resource *res; 768 769 KASSERT(type == SYS_RES_IOPORT || type == SYS_RES_MEMORY, 770 ("trying to reserve non-BAR type %d", type)); 771 772 dev = pdev->pdrv != NULL && pdev->pdrv->isdrm ? 773 device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev; 774 res = pci_reserve_map(device_get_parent(dev), dev, type, &rid, 0, ~0, 775 1, 1, 0); 776 if (res == NULL) 777 return (NULL); 778 return (resource_list_find(rl, type, rid)); 779 } 780 781 unsigned long 782 pci_resource_start(struct pci_dev *pdev, int bar) 783 { 784 struct resource_list_entry *rle; 785 rman_res_t newstart; 786 device_t dev; 787 int error; 788 789 if ((rle = linux_pci_get_bar(pdev, bar, true)) == NULL) 790 return (0); 791 dev = pdev->pdrv != NULL && pdev->pdrv->isdrm ? 792 device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev; 793 error = bus_translate_resource(dev, rle->type, rle->start, &newstart); 794 if (error != 0) { 795 device_printf(pdev->dev.bsddev, 796 "translate of %#jx failed: %d\n", 797 (uintmax_t)rle->start, error); 798 return (0); 799 } 800 return (newstart); 801 } 802 803 unsigned long 804 pci_resource_len(struct pci_dev *pdev, int bar) 805 { 806 struct resource_list_entry *rle; 807 808 if ((rle = linux_pci_get_bar(pdev, bar, true)) == NULL) 809 return (0); 810 return (rle->count); 811 } 812 813 int 814 pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) 815 { 816 struct resource *res; 817 struct pci_devres *dr; 818 struct pci_mmio_region *mmio; 819 int rid; 820 int type; 821 822 type = pci_resource_type(pdev, bar); 823 if (type < 0) 824 return (-ENODEV); 825 rid = PCIR_BAR(bar); 826 res = bus_alloc_resource_any(pdev->dev.bsddev, type, &rid, 827 RF_ACTIVE|RF_SHAREABLE); 828 if (res == NULL) { 829 device_printf(pdev->dev.bsddev, "%s: failed to alloc " 830 "bar %d type %d rid %d\n", 831 __func__, bar, type, PCIR_BAR(bar)); 832 return (-ENODEV); 833 } 834 835 /* 836 * It seems there is an implicit devres tracking on these if the device 837 * is managed; otherwise the resources are not automatiaclly freed on 838 * FreeBSD/LinuxKPI tough they should be/are expected to be by Linux 839 * drivers. 840 */ 841 dr = lkpi_pci_devres_find(pdev); 842 if (dr != NULL) { 843 dr->region_mask |= (1 << bar); 844 dr->region_table[bar] = res; 845 } 846 847 /* Even if the device is not managed we need to track it for iomap. */ 848 mmio = malloc(sizeof(*mmio), M_DEVBUF, M_WAITOK | M_ZERO); 849 mmio->rid = PCIR_BAR(bar); 850 mmio->type = type; 851 mmio->res = res; 852 TAILQ_INSERT_TAIL(&pdev->mmio, mmio, next); 853 854 return (0); 855 } 856 857 struct resource * 858 _lkpi_pci_iomap(struct pci_dev *pdev, int bar, int mmio_size __unused) 859 { 860 struct pci_mmio_region *mmio, *p; 861 int type; 862 863 type = pci_resource_type(pdev, bar); 864 if (type < 0) { 865 device_printf(pdev->dev.bsddev, "%s: bar %d type %d\n", 866 __func__, bar, type); 867 return (NULL); 868 } 869 870 /* 871 * Check for duplicate mappings. 872 * This can happen if a driver calls pci_request_region() first. 873 */ 874 TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) { 875 if (mmio->type == type && mmio->rid == PCIR_BAR(bar)) { 876 return (mmio->res); 877 } 878 } 879 880 mmio = malloc(sizeof(*mmio), M_DEVBUF, M_WAITOK | M_ZERO); 881 mmio->rid = PCIR_BAR(bar); 882 mmio->type = type; 883 mmio->res = bus_alloc_resource_any(pdev->dev.bsddev, mmio->type, 884 &mmio->rid, RF_ACTIVE|RF_SHAREABLE); 885 if (mmio->res == NULL) { 886 device_printf(pdev->dev.bsddev, "%s: failed to alloc " 887 "bar %d type %d rid %d\n", 888 __func__, bar, type, PCIR_BAR(bar)); 889 free(mmio, M_DEVBUF); 890 return (NULL); 891 } 892 TAILQ_INSERT_TAIL(&pdev->mmio, mmio, next); 893 894 return (mmio->res); 895 } 896 897 int 898 linux_pci_register_drm_driver(struct pci_driver *pdrv) 899 { 900 devclass_t dc; 901 902 dc = devclass_create("vgapci"); 903 if (dc == NULL) 904 return (-ENXIO); 905 pdrv->isdrm = true; 906 pdrv->name = "drmn"; 907 return (_linux_pci_register_driver(pdrv, dc)); 908 } 909 910 void 911 linux_pci_unregister_driver(struct pci_driver *pdrv) 912 { 913 devclass_t bus; 914 915 bus = devclass_find("pci"); 916 917 spin_lock(&pci_lock); 918 list_del(&pdrv->node); 919 spin_unlock(&pci_lock); 920 bus_topo_lock(); 921 if (bus != NULL) 922 devclass_delete_driver(bus, &pdrv->bsddriver); 923 bus_topo_unlock(); 924 } 925 926 void 927 linux_pci_unregister_drm_driver(struct pci_driver *pdrv) 928 { 929 devclass_t bus; 930 931 bus = devclass_find("vgapci"); 932 933 spin_lock(&pci_lock); 934 list_del(&pdrv->node); 935 spin_unlock(&pci_lock); 936 bus_topo_lock(); 937 if (bus != NULL) 938 devclass_delete_driver(bus, &pdrv->bsddriver); 939 bus_topo_unlock(); 940 } 941 942 int 943 pci_alloc_irq_vectors(struct pci_dev *pdev, int minv, int maxv, 944 unsigned int flags) 945 { 946 int error; 947 948 if (flags & PCI_IRQ_MSIX) { 949 struct msix_entry *entries; 950 int i; 951 952 entries = kcalloc(maxv, sizeof(*entries), GFP_KERNEL); 953 if (entries == NULL) { 954 error = -ENOMEM; 955 goto out; 956 } 957 for (i = 0; i < maxv; ++i) 958 entries[i].entry = i; 959 error = pci_enable_msix(pdev, entries, maxv); 960 out: 961 kfree(entries); 962 if (error == 0 && pdev->msix_enabled) 963 return (pdev->dev.irq_end - pdev->dev.irq_start); 964 } 965 if (flags & PCI_IRQ_MSI) { 966 if (pci_msi_count(pdev->dev.bsddev) < minv) 967 return (-ENOSPC); 968 error = _lkpi_pci_enable_msi_range(pdev, minv, maxv); 969 if (error == 0 && pdev->msi_enabled) 970 return (pdev->dev.irq_end - pdev->dev.irq_start); 971 } 972 if (flags & PCI_IRQ_LEGACY) { 973 if (pdev->irq) 974 return (1); 975 } 976 977 return (-EINVAL); 978 } 979 980 struct msi_desc * 981 lkpi_pci_msi_desc_alloc(int irq) 982 { 983 struct device *dev; 984 struct pci_dev *pdev; 985 struct msi_desc *desc; 986 struct pci_devinfo *dinfo; 987 struct pcicfg_msi *msi; 988 int vec; 989 990 dev = linux_pci_find_irq_dev(irq); 991 if (dev == NULL) 992 return (NULL); 993 994 pdev = to_pci_dev(dev); 995 996 if (pdev->msi_desc == NULL) 997 return (NULL); 998 999 if (irq < pdev->dev.irq_start || irq >= pdev->dev.irq_end) 1000 return (NULL); 1001 1002 vec = pdev->dev.irq_start - irq; 1003 1004 if (pdev->msi_desc[vec] != NULL) 1005 return (pdev->msi_desc[vec]); 1006 1007 dinfo = device_get_ivars(dev->bsddev); 1008 msi = &dinfo->cfg.msi; 1009 1010 desc = malloc(sizeof(*desc), M_DEVBUF, M_WAITOK | M_ZERO); 1011 1012 desc->pci.msi_attrib.is_64 = 1013 (msi->msi_ctrl & PCIM_MSICTRL_64BIT) ? true : false; 1014 desc->msg.data = msi->msi_data; 1015 1016 pdev->msi_desc[vec] = desc; 1017 1018 return (desc); 1019 } 1020 1021 bool 1022 pci_device_is_present(struct pci_dev *pdev) 1023 { 1024 device_t dev; 1025 1026 dev = pdev->dev.bsddev; 1027 1028 return (bus_child_present(dev)); 1029 } 1030 1031 CTASSERT(sizeof(dma_addr_t) <= sizeof(uint64_t)); 1032 1033 struct linux_dma_obj { 1034 void *vaddr; 1035 uint64_t dma_addr; 1036 bus_dmamap_t dmamap; 1037 bus_dma_tag_t dmat; 1038 }; 1039 1040 static uma_zone_t linux_dma_trie_zone; 1041 static uma_zone_t linux_dma_obj_zone; 1042 1043 static void 1044 linux_dma_init(void *arg) 1045 { 1046 1047 linux_dma_trie_zone = uma_zcreate("linux_dma_pctrie", 1048 pctrie_node_size(), NULL, NULL, pctrie_zone_init, NULL, 1049 UMA_ALIGN_PTR, 0); 1050 linux_dma_obj_zone = uma_zcreate("linux_dma_object", 1051 sizeof(struct linux_dma_obj), NULL, NULL, NULL, NULL, 1052 UMA_ALIGN_PTR, 0); 1053 lkpi_pci_nseg1_fail = counter_u64_alloc(M_WAITOK); 1054 } 1055 SYSINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_init, NULL); 1056 1057 static void 1058 linux_dma_uninit(void *arg) 1059 { 1060 1061 counter_u64_free(lkpi_pci_nseg1_fail); 1062 uma_zdestroy(linux_dma_obj_zone); 1063 uma_zdestroy(linux_dma_trie_zone); 1064 } 1065 SYSUNINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_uninit, NULL); 1066 1067 static void * 1068 linux_dma_trie_alloc(struct pctrie *ptree) 1069 { 1070 1071 return (uma_zalloc(linux_dma_trie_zone, M_NOWAIT)); 1072 } 1073 1074 static void 1075 linux_dma_trie_free(struct pctrie *ptree, void *node) 1076 { 1077 1078 uma_zfree(linux_dma_trie_zone, node); 1079 } 1080 1081 PCTRIE_DEFINE(LINUX_DMA, linux_dma_obj, dma_addr, linux_dma_trie_alloc, 1082 linux_dma_trie_free); 1083 1084 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 1085 static dma_addr_t 1086 linux_dma_map_phys_common(struct device *dev, vm_paddr_t phys, size_t len, 1087 bus_dma_tag_t dmat) 1088 { 1089 struct linux_dma_priv *priv; 1090 struct linux_dma_obj *obj; 1091 int error, nseg; 1092 bus_dma_segment_t seg; 1093 1094 priv = dev->dma_priv; 1095 1096 /* 1097 * If the resultant mapping will be entirely 1:1 with the 1098 * physical address, short-circuit the remainder of the 1099 * bus_dma API. This avoids tracking collisions in the pctrie 1100 * with the additional benefit of reducing overhead. 1101 */ 1102 if (bus_dma_id_mapped(dmat, phys, len)) 1103 return (phys); 1104 1105 obj = uma_zalloc(linux_dma_obj_zone, M_NOWAIT); 1106 if (obj == NULL) { 1107 return (0); 1108 } 1109 obj->dmat = dmat; 1110 1111 DMA_PRIV_LOCK(priv); 1112 if (bus_dmamap_create(obj->dmat, 0, &obj->dmamap) != 0) { 1113 DMA_PRIV_UNLOCK(priv); 1114 uma_zfree(linux_dma_obj_zone, obj); 1115 return (0); 1116 } 1117 1118 nseg = -1; 1119 if (_bus_dmamap_load_phys(obj->dmat, obj->dmamap, phys, len, 1120 BUS_DMA_NOWAIT, &seg, &nseg) != 0) { 1121 bus_dmamap_destroy(obj->dmat, obj->dmamap); 1122 DMA_PRIV_UNLOCK(priv); 1123 uma_zfree(linux_dma_obj_zone, obj); 1124 counter_u64_add(lkpi_pci_nseg1_fail, 1); 1125 if (linuxkpi_debug) 1126 dump_stack(); 1127 return (0); 1128 } 1129 1130 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); 1131 obj->dma_addr = seg.ds_addr; 1132 1133 error = LINUX_DMA_PCTRIE_INSERT(&priv->ptree, obj); 1134 if (error != 0) { 1135 bus_dmamap_unload(obj->dmat, obj->dmamap); 1136 bus_dmamap_destroy(obj->dmat, obj->dmamap); 1137 DMA_PRIV_UNLOCK(priv); 1138 uma_zfree(linux_dma_obj_zone, obj); 1139 return (0); 1140 } 1141 DMA_PRIV_UNLOCK(priv); 1142 return (obj->dma_addr); 1143 } 1144 #else 1145 static dma_addr_t 1146 linux_dma_map_phys_common(struct device *dev __unused, vm_paddr_t phys, 1147 size_t len __unused, bus_dma_tag_t dmat __unused) 1148 { 1149 return (phys); 1150 } 1151 #endif 1152 1153 dma_addr_t 1154 linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len) 1155 { 1156 struct linux_dma_priv *priv; 1157 1158 priv = dev->dma_priv; 1159 return (linux_dma_map_phys_common(dev, phys, len, priv->dmat)); 1160 } 1161 1162 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 1163 void 1164 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len) 1165 { 1166 struct linux_dma_priv *priv; 1167 struct linux_dma_obj *obj; 1168 1169 priv = dev->dma_priv; 1170 1171 if (pctrie_is_empty(&priv->ptree)) 1172 return; 1173 1174 DMA_PRIV_LOCK(priv); 1175 obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr); 1176 if (obj == NULL) { 1177 DMA_PRIV_UNLOCK(priv); 1178 return; 1179 } 1180 LINUX_DMA_PCTRIE_REMOVE(&priv->ptree, dma_addr); 1181 bus_dmamap_unload(obj->dmat, obj->dmamap); 1182 bus_dmamap_destroy(obj->dmat, obj->dmamap); 1183 DMA_PRIV_UNLOCK(priv); 1184 1185 uma_zfree(linux_dma_obj_zone, obj); 1186 } 1187 #else 1188 void 1189 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len) 1190 { 1191 } 1192 #endif 1193 1194 void * 1195 linux_dma_alloc_coherent(struct device *dev, size_t size, 1196 dma_addr_t *dma_handle, gfp_t flag) 1197 { 1198 struct linux_dma_priv *priv; 1199 vm_paddr_t high; 1200 size_t align; 1201 void *mem; 1202 1203 if (dev == NULL || dev->dma_priv == NULL) { 1204 *dma_handle = 0; 1205 return (NULL); 1206 } 1207 priv = dev->dma_priv; 1208 if (priv->dma_coherent_mask) 1209 high = priv->dma_coherent_mask; 1210 else 1211 /* Coherent is lower 32bit only by default in Linux. */ 1212 high = BUS_SPACE_MAXADDR_32BIT; 1213 align = PAGE_SIZE << get_order(size); 1214 /* Always zero the allocation. */ 1215 flag |= M_ZERO; 1216 mem = kmem_alloc_contig(size, flag & GFP_NATIVE_MASK, 0, high, 1217 align, 0, VM_MEMATTR_DEFAULT); 1218 if (mem != NULL) { 1219 *dma_handle = linux_dma_map_phys_common(dev, vtophys(mem), size, 1220 priv->dmat_coherent); 1221 if (*dma_handle == 0) { 1222 kmem_free(mem, size); 1223 mem = NULL; 1224 } 1225 } else { 1226 *dma_handle = 0; 1227 } 1228 return (mem); 1229 } 1230 1231 struct lkpi_devres_dmam_coherent { 1232 size_t size; 1233 dma_addr_t *handle; 1234 void *mem; 1235 }; 1236 1237 static void 1238 lkpi_dmam_free_coherent(struct device *dev, void *p) 1239 { 1240 struct lkpi_devres_dmam_coherent *dr; 1241 1242 dr = p; 1243 dma_free_coherent(dev, dr->size, dr->mem, *dr->handle); 1244 } 1245 1246 void * 1247 linuxkpi_dmam_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 1248 gfp_t flag) 1249 { 1250 struct lkpi_devres_dmam_coherent *dr; 1251 1252 dr = lkpi_devres_alloc(lkpi_dmam_free_coherent, 1253 sizeof(*dr), GFP_KERNEL | __GFP_ZERO); 1254 1255 if (dr == NULL) 1256 return (NULL); 1257 1258 dr->size = size; 1259 dr->mem = linux_dma_alloc_coherent(dev, size, dma_handle, flag); 1260 dr->handle = dma_handle; 1261 if (dr->mem == NULL) { 1262 lkpi_devres_free(dr); 1263 return (NULL); 1264 } 1265 1266 lkpi_devres_add(dev, dr); 1267 return (dr->mem); 1268 } 1269 1270 void 1271 linuxkpi_dma_sync(struct device *dev, dma_addr_t dma_addr, size_t size, 1272 bus_dmasync_op_t op) 1273 { 1274 struct linux_dma_priv *priv; 1275 struct linux_dma_obj *obj; 1276 1277 priv = dev->dma_priv; 1278 1279 if (pctrie_is_empty(&priv->ptree)) 1280 return; 1281 1282 DMA_PRIV_LOCK(priv); 1283 obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr); 1284 if (obj == NULL) { 1285 DMA_PRIV_UNLOCK(priv); 1286 return; 1287 } 1288 1289 bus_dmamap_sync(obj->dmat, obj->dmamap, op); 1290 DMA_PRIV_UNLOCK(priv); 1291 } 1292 1293 int 1294 linux_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents, 1295 enum dma_data_direction direction, unsigned long attrs __unused) 1296 { 1297 struct linux_dma_priv *priv; 1298 struct scatterlist *sg; 1299 int i, nseg; 1300 bus_dma_segment_t seg; 1301 1302 priv = dev->dma_priv; 1303 1304 DMA_PRIV_LOCK(priv); 1305 1306 /* create common DMA map in the first S/G entry */ 1307 if (bus_dmamap_create(priv->dmat, 0, &sgl->dma_map) != 0) { 1308 DMA_PRIV_UNLOCK(priv); 1309 return (0); 1310 } 1311 1312 /* load all S/G list entries */ 1313 for_each_sg(sgl, sg, nents, i) { 1314 nseg = -1; 1315 if (_bus_dmamap_load_phys(priv->dmat, sgl->dma_map, 1316 sg_phys(sg), sg->length, BUS_DMA_NOWAIT, 1317 &seg, &nseg) != 0) { 1318 bus_dmamap_unload(priv->dmat, sgl->dma_map); 1319 bus_dmamap_destroy(priv->dmat, sgl->dma_map); 1320 DMA_PRIV_UNLOCK(priv); 1321 return (0); 1322 } 1323 KASSERT(nseg == 0, 1324 ("More than one segment (nseg=%d)", nseg + 1)); 1325 1326 sg_dma_address(sg) = seg.ds_addr; 1327 } 1328 1329 switch (direction) { 1330 case DMA_BIDIRECTIONAL: 1331 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREWRITE); 1332 break; 1333 case DMA_TO_DEVICE: 1334 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREREAD); 1335 break; 1336 case DMA_FROM_DEVICE: 1337 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREWRITE); 1338 break; 1339 default: 1340 break; 1341 } 1342 1343 DMA_PRIV_UNLOCK(priv); 1344 1345 return (nents); 1346 } 1347 1348 void 1349 linux_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, 1350 int nents __unused, enum dma_data_direction direction, 1351 unsigned long attrs __unused) 1352 { 1353 struct linux_dma_priv *priv; 1354 1355 priv = dev->dma_priv; 1356 1357 DMA_PRIV_LOCK(priv); 1358 1359 switch (direction) { 1360 case DMA_BIDIRECTIONAL: 1361 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTREAD); 1362 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREREAD); 1363 break; 1364 case DMA_TO_DEVICE: 1365 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTWRITE); 1366 break; 1367 case DMA_FROM_DEVICE: 1368 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTREAD); 1369 break; 1370 default: 1371 break; 1372 } 1373 1374 bus_dmamap_unload(priv->dmat, sgl->dma_map); 1375 bus_dmamap_destroy(priv->dmat, sgl->dma_map); 1376 DMA_PRIV_UNLOCK(priv); 1377 } 1378 1379 struct dma_pool { 1380 struct device *pool_device; 1381 uma_zone_t pool_zone; 1382 struct mtx pool_lock; 1383 bus_dma_tag_t pool_dmat; 1384 size_t pool_entry_size; 1385 struct pctrie pool_ptree; 1386 }; 1387 1388 #define DMA_POOL_LOCK(pool) mtx_lock(&(pool)->pool_lock) 1389 #define DMA_POOL_UNLOCK(pool) mtx_unlock(&(pool)->pool_lock) 1390 1391 static inline int 1392 dma_pool_obj_ctor(void *mem, int size, void *arg, int flags) 1393 { 1394 struct linux_dma_obj *obj = mem; 1395 struct dma_pool *pool = arg; 1396 int error, nseg; 1397 bus_dma_segment_t seg; 1398 1399 nseg = -1; 1400 DMA_POOL_LOCK(pool); 1401 error = _bus_dmamap_load_phys(pool->pool_dmat, obj->dmamap, 1402 vtophys(obj->vaddr), pool->pool_entry_size, BUS_DMA_NOWAIT, 1403 &seg, &nseg); 1404 DMA_POOL_UNLOCK(pool); 1405 if (error != 0) { 1406 return (error); 1407 } 1408 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); 1409 obj->dma_addr = seg.ds_addr; 1410 1411 return (0); 1412 } 1413 1414 static void 1415 dma_pool_obj_dtor(void *mem, int size, void *arg) 1416 { 1417 struct linux_dma_obj *obj = mem; 1418 struct dma_pool *pool = arg; 1419 1420 DMA_POOL_LOCK(pool); 1421 bus_dmamap_unload(pool->pool_dmat, obj->dmamap); 1422 DMA_POOL_UNLOCK(pool); 1423 } 1424 1425 static int 1426 dma_pool_obj_import(void *arg, void **store, int count, int domain __unused, 1427 int flags) 1428 { 1429 struct dma_pool *pool = arg; 1430 struct linux_dma_obj *obj; 1431 int error, i; 1432 1433 for (i = 0; i < count; i++) { 1434 obj = uma_zalloc(linux_dma_obj_zone, flags); 1435 if (obj == NULL) 1436 break; 1437 1438 error = bus_dmamem_alloc(pool->pool_dmat, &obj->vaddr, 1439 BUS_DMA_NOWAIT, &obj->dmamap); 1440 if (error!= 0) { 1441 uma_zfree(linux_dma_obj_zone, obj); 1442 break; 1443 } 1444 1445 store[i] = obj; 1446 } 1447 1448 return (i); 1449 } 1450 1451 static void 1452 dma_pool_obj_release(void *arg, void **store, int count) 1453 { 1454 struct dma_pool *pool = arg; 1455 struct linux_dma_obj *obj; 1456 int i; 1457 1458 for (i = 0; i < count; i++) { 1459 obj = store[i]; 1460 bus_dmamem_free(pool->pool_dmat, obj->vaddr, obj->dmamap); 1461 uma_zfree(linux_dma_obj_zone, obj); 1462 } 1463 } 1464 1465 struct dma_pool * 1466 linux_dma_pool_create(char *name, struct device *dev, size_t size, 1467 size_t align, size_t boundary) 1468 { 1469 struct linux_dma_priv *priv; 1470 struct dma_pool *pool; 1471 1472 priv = dev->dma_priv; 1473 1474 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 1475 pool->pool_device = dev; 1476 pool->pool_entry_size = size; 1477 1478 if (bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 1479 align, boundary, /* alignment, boundary */ 1480 priv->dma_mask, /* lowaddr */ 1481 BUS_SPACE_MAXADDR, /* highaddr */ 1482 NULL, NULL, /* filtfunc, filtfuncarg */ 1483 size, /* maxsize */ 1484 1, /* nsegments */ 1485 size, /* maxsegsz */ 1486 0, /* flags */ 1487 NULL, NULL, /* lockfunc, lockfuncarg */ 1488 &pool->pool_dmat)) { 1489 kfree(pool); 1490 return (NULL); 1491 } 1492 1493 pool->pool_zone = uma_zcache_create(name, -1, dma_pool_obj_ctor, 1494 dma_pool_obj_dtor, NULL, NULL, dma_pool_obj_import, 1495 dma_pool_obj_release, pool, 0); 1496 1497 mtx_init(&pool->pool_lock, "lkpi-dma-pool", NULL, MTX_DEF); 1498 pctrie_init(&pool->pool_ptree); 1499 1500 return (pool); 1501 } 1502 1503 void 1504 linux_dma_pool_destroy(struct dma_pool *pool) 1505 { 1506 1507 uma_zdestroy(pool->pool_zone); 1508 bus_dma_tag_destroy(pool->pool_dmat); 1509 mtx_destroy(&pool->pool_lock); 1510 kfree(pool); 1511 } 1512 1513 void 1514 lkpi_dmam_pool_destroy(struct device *dev, void *p) 1515 { 1516 struct dma_pool *pool; 1517 1518 pool = *(struct dma_pool **)p; 1519 LINUX_DMA_PCTRIE_RECLAIM(&pool->pool_ptree); 1520 linux_dma_pool_destroy(pool); 1521 } 1522 1523 void * 1524 linux_dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, 1525 dma_addr_t *handle) 1526 { 1527 struct linux_dma_obj *obj; 1528 1529 obj = uma_zalloc_arg(pool->pool_zone, pool, mem_flags & GFP_NATIVE_MASK); 1530 if (obj == NULL) 1531 return (NULL); 1532 1533 DMA_POOL_LOCK(pool); 1534 if (LINUX_DMA_PCTRIE_INSERT(&pool->pool_ptree, obj) != 0) { 1535 DMA_POOL_UNLOCK(pool); 1536 uma_zfree_arg(pool->pool_zone, obj, pool); 1537 return (NULL); 1538 } 1539 DMA_POOL_UNLOCK(pool); 1540 1541 *handle = obj->dma_addr; 1542 return (obj->vaddr); 1543 } 1544 1545 void 1546 linux_dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma_addr) 1547 { 1548 struct linux_dma_obj *obj; 1549 1550 DMA_POOL_LOCK(pool); 1551 obj = LINUX_DMA_PCTRIE_LOOKUP(&pool->pool_ptree, dma_addr); 1552 if (obj == NULL) { 1553 DMA_POOL_UNLOCK(pool); 1554 return; 1555 } 1556 LINUX_DMA_PCTRIE_REMOVE(&pool->pool_ptree, dma_addr); 1557 DMA_POOL_UNLOCK(pool); 1558 1559 uma_zfree_arg(pool->pool_zone, obj, pool); 1560 } 1561 1562 static int 1563 linux_backlight_get_status(device_t dev, struct backlight_props *props) 1564 { 1565 struct pci_dev *pdev; 1566 1567 linux_set_current(curthread); 1568 pdev = device_get_softc(dev); 1569 1570 props->brightness = pdev->dev.bd->props.brightness; 1571 props->brightness = props->brightness * 100 / pdev->dev.bd->props.max_brightness; 1572 props->nlevels = 0; 1573 1574 return (0); 1575 } 1576 1577 static int 1578 linux_backlight_get_info(device_t dev, struct backlight_info *info) 1579 { 1580 struct pci_dev *pdev; 1581 1582 linux_set_current(curthread); 1583 pdev = device_get_softc(dev); 1584 1585 info->type = BACKLIGHT_TYPE_PANEL; 1586 strlcpy(info->name, pdev->dev.bd->name, BACKLIGHTMAXNAMELENGTH); 1587 return (0); 1588 } 1589 1590 static int 1591 linux_backlight_update_status(device_t dev, struct backlight_props *props) 1592 { 1593 struct pci_dev *pdev; 1594 1595 linux_set_current(curthread); 1596 pdev = device_get_softc(dev); 1597 1598 pdev->dev.bd->props.brightness = pdev->dev.bd->props.max_brightness * 1599 props->brightness / 100; 1600 pdev->dev.bd->props.power = props->brightness == 0 ? 1601 4/* FB_BLANK_POWERDOWN */ : 0/* FB_BLANK_UNBLANK */; 1602 return (pdev->dev.bd->ops->update_status(pdev->dev.bd)); 1603 } 1604 1605 struct backlight_device * 1606 linux_backlight_device_register(const char *name, struct device *dev, 1607 void *data, const struct backlight_ops *ops, struct backlight_properties *props) 1608 { 1609 1610 dev->bd = malloc(sizeof(*dev->bd), M_DEVBUF, M_WAITOK | M_ZERO); 1611 dev->bd->ops = ops; 1612 dev->bd->props.type = props->type; 1613 dev->bd->props.max_brightness = props->max_brightness; 1614 dev->bd->props.brightness = props->brightness; 1615 dev->bd->props.power = props->power; 1616 dev->bd->data = data; 1617 dev->bd->dev = dev; 1618 dev->bd->name = strdup(name, M_DEVBUF); 1619 1620 dev->backlight_dev = backlight_register(name, dev->bsddev); 1621 1622 return (dev->bd); 1623 } 1624 1625 void 1626 linux_backlight_device_unregister(struct backlight_device *bd) 1627 { 1628 1629 backlight_destroy(bd->dev->backlight_dev); 1630 free(bd->name, M_DEVBUF); 1631 free(bd, M_DEVBUF); 1632 } 1633