1 /*- 2 * Copyright (c) 2015-2016 Mellanox Technologies, Ltd. 3 * All rights reserved. 4 * Copyright (c) 2020-2022 The FreeBSD Foundation 5 * 6 * Portions of this software were developed by Björn Zeeb 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice unmodified, this list of conditions, and the following 14 * disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/bus.h> 37 #include <sys/malloc.h> 38 #include <sys/kernel.h> 39 #include <sys/sysctl.h> 40 #include <sys/lock.h> 41 #include <sys/mutex.h> 42 #include <sys/fcntl.h> 43 #include <sys/file.h> 44 #include <sys/filio.h> 45 #include <sys/pciio.h> 46 #include <sys/pctrie.h> 47 #include <sys/rwlock.h> 48 49 #include <vm/vm.h> 50 #include <vm/pmap.h> 51 52 #include <machine/stdarg.h> 53 54 #include <dev/pci/pcivar.h> 55 #include <dev/pci/pci_private.h> 56 #include <dev/pci/pci_iov.h> 57 #include <dev/backlight/backlight.h> 58 59 #include <linux/kernel.h> 60 #include <linux/kobject.h> 61 #include <linux/device.h> 62 #include <linux/slab.h> 63 #include <linux/module.h> 64 #include <linux/cdev.h> 65 #include <linux/file.h> 66 #include <linux/sysfs.h> 67 #include <linux/mm.h> 68 #include <linux/io.h> 69 #include <linux/vmalloc.h> 70 #include <linux/pci.h> 71 #include <linux/compat.h> 72 73 #include <linux/backlight.h> 74 75 #include "backlight_if.h" 76 #include "pcib_if.h" 77 78 /* Undef the linux function macro defined in linux/pci.h */ 79 #undef pci_get_class 80 81 extern int linuxkpi_debug; 82 83 SYSCTL_DECL(_compat_linuxkpi); 84 85 static counter_u64_t lkpi_pci_nseg1_fail; 86 SYSCTL_COUNTER_U64(_compat_linuxkpi, OID_AUTO, lkpi_pci_nseg1_fail, CTLFLAG_RD, 87 &lkpi_pci_nseg1_fail, "Count of busdma mapping failures of single-segment"); 88 89 static device_probe_t linux_pci_probe; 90 static device_attach_t linux_pci_attach; 91 static device_detach_t linux_pci_detach; 92 static device_suspend_t linux_pci_suspend; 93 static device_resume_t linux_pci_resume; 94 static device_shutdown_t linux_pci_shutdown; 95 static pci_iov_init_t linux_pci_iov_init; 96 static pci_iov_uninit_t linux_pci_iov_uninit; 97 static pci_iov_add_vf_t linux_pci_iov_add_vf; 98 static int linux_backlight_get_status(device_t dev, struct backlight_props *props); 99 static int linux_backlight_update_status(device_t dev, struct backlight_props *props); 100 static int linux_backlight_get_info(device_t dev, struct backlight_info *info); 101 102 static device_method_t pci_methods[] = { 103 DEVMETHOD(device_probe, linux_pci_probe), 104 DEVMETHOD(device_attach, linux_pci_attach), 105 DEVMETHOD(device_detach, linux_pci_detach), 106 DEVMETHOD(device_suspend, linux_pci_suspend), 107 DEVMETHOD(device_resume, linux_pci_resume), 108 DEVMETHOD(device_shutdown, linux_pci_shutdown), 109 DEVMETHOD(pci_iov_init, linux_pci_iov_init), 110 DEVMETHOD(pci_iov_uninit, linux_pci_iov_uninit), 111 DEVMETHOD(pci_iov_add_vf, linux_pci_iov_add_vf), 112 113 /* backlight interface */ 114 DEVMETHOD(backlight_update_status, linux_backlight_update_status), 115 DEVMETHOD(backlight_get_status, linux_backlight_get_status), 116 DEVMETHOD(backlight_get_info, linux_backlight_get_info), 117 DEVMETHOD_END 118 }; 119 120 const char *pci_power_names[] = { 121 "UNKNOWN", "D0", "D1", "D2", "D3hot", "D3cold" 122 }; 123 124 struct linux_dma_priv { 125 uint64_t dma_mask; 126 bus_dma_tag_t dmat; 127 uint64_t dma_coherent_mask; 128 bus_dma_tag_t dmat_coherent; 129 struct mtx lock; 130 struct pctrie ptree; 131 }; 132 #define DMA_PRIV_LOCK(priv) mtx_lock(&(priv)->lock) 133 #define DMA_PRIV_UNLOCK(priv) mtx_unlock(&(priv)->lock) 134 135 static int 136 linux_pdev_dma_uninit(struct pci_dev *pdev) 137 { 138 struct linux_dma_priv *priv; 139 140 priv = pdev->dev.dma_priv; 141 if (priv->dmat) 142 bus_dma_tag_destroy(priv->dmat); 143 if (priv->dmat_coherent) 144 bus_dma_tag_destroy(priv->dmat_coherent); 145 mtx_destroy(&priv->lock); 146 pdev->dev.dma_priv = NULL; 147 free(priv, M_DEVBUF); 148 return (0); 149 } 150 151 static int 152 linux_pdev_dma_init(struct pci_dev *pdev) 153 { 154 struct linux_dma_priv *priv; 155 int error; 156 157 priv = malloc(sizeof(*priv), M_DEVBUF, M_WAITOK | M_ZERO); 158 159 mtx_init(&priv->lock, "lkpi-priv-dma", NULL, MTX_DEF); 160 pctrie_init(&priv->ptree); 161 162 pdev->dev.dma_priv = priv; 163 164 /* Create a default DMA tags. */ 165 error = linux_dma_tag_init(&pdev->dev, DMA_BIT_MASK(64)); 166 if (error != 0) 167 goto err; 168 /* Coherent is lower 32bit only by default in Linux. */ 169 error = linux_dma_tag_init_coherent(&pdev->dev, DMA_BIT_MASK(32)); 170 if (error != 0) 171 goto err; 172 173 return (error); 174 175 err: 176 linux_pdev_dma_uninit(pdev); 177 return (error); 178 } 179 180 int 181 linux_dma_tag_init(struct device *dev, u64 dma_mask) 182 { 183 struct linux_dma_priv *priv; 184 int error; 185 186 priv = dev->dma_priv; 187 188 if (priv->dmat) { 189 if (priv->dma_mask == dma_mask) 190 return (0); 191 192 bus_dma_tag_destroy(priv->dmat); 193 } 194 195 priv->dma_mask = dma_mask; 196 197 error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 198 1, 0, /* alignment, boundary */ 199 dma_mask, /* lowaddr */ 200 BUS_SPACE_MAXADDR, /* highaddr */ 201 NULL, NULL, /* filtfunc, filtfuncarg */ 202 BUS_SPACE_MAXSIZE, /* maxsize */ 203 1, /* nsegments */ 204 BUS_SPACE_MAXSIZE, /* maxsegsz */ 205 0, /* flags */ 206 NULL, NULL, /* lockfunc, lockfuncarg */ 207 &priv->dmat); 208 return (-error); 209 } 210 211 int 212 linux_dma_tag_init_coherent(struct device *dev, u64 dma_mask) 213 { 214 struct linux_dma_priv *priv; 215 int error; 216 217 priv = dev->dma_priv; 218 219 if (priv->dmat_coherent) { 220 if (priv->dma_coherent_mask == dma_mask) 221 return (0); 222 223 bus_dma_tag_destroy(priv->dmat_coherent); 224 } 225 226 priv->dma_coherent_mask = dma_mask; 227 228 error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 229 1, 0, /* alignment, boundary */ 230 dma_mask, /* lowaddr */ 231 BUS_SPACE_MAXADDR, /* highaddr */ 232 NULL, NULL, /* filtfunc, filtfuncarg */ 233 BUS_SPACE_MAXSIZE, /* maxsize */ 234 1, /* nsegments */ 235 BUS_SPACE_MAXSIZE, /* maxsegsz */ 236 0, /* flags */ 237 NULL, NULL, /* lockfunc, lockfuncarg */ 238 &priv->dmat_coherent); 239 return (-error); 240 } 241 242 static struct pci_driver * 243 linux_pci_find(device_t dev, const struct pci_device_id **idp) 244 { 245 const struct pci_device_id *id; 246 struct pci_driver *pdrv; 247 uint16_t vendor; 248 uint16_t device; 249 uint16_t subvendor; 250 uint16_t subdevice; 251 252 vendor = pci_get_vendor(dev); 253 device = pci_get_device(dev); 254 subvendor = pci_get_subvendor(dev); 255 subdevice = pci_get_subdevice(dev); 256 257 spin_lock(&pci_lock); 258 list_for_each_entry(pdrv, &pci_drivers, node) { 259 for (id = pdrv->id_table; id->vendor != 0; id++) { 260 if (vendor == id->vendor && 261 (PCI_ANY_ID == id->device || device == id->device) && 262 (PCI_ANY_ID == id->subvendor || subvendor == id->subvendor) && 263 (PCI_ANY_ID == id->subdevice || subdevice == id->subdevice)) { 264 *idp = id; 265 spin_unlock(&pci_lock); 266 return (pdrv); 267 } 268 } 269 } 270 spin_unlock(&pci_lock); 271 return (NULL); 272 } 273 274 static void 275 lkpi_pci_dev_release(struct device *dev) 276 { 277 278 lkpi_devres_release_free_list(dev); 279 spin_lock_destroy(&dev->devres_lock); 280 } 281 282 static void 283 lkpifill_pci_dev(device_t dev, struct pci_dev *pdev) 284 { 285 286 pdev->devfn = PCI_DEVFN(pci_get_slot(dev), pci_get_function(dev)); 287 pdev->vendor = pci_get_vendor(dev); 288 pdev->device = pci_get_device(dev); 289 pdev->subsystem_vendor = pci_get_subvendor(dev); 290 pdev->subsystem_device = pci_get_subdevice(dev); 291 pdev->class = pci_get_class(dev); 292 pdev->revision = pci_get_revid(dev); 293 pdev->bus = malloc(sizeof(*pdev->bus), M_DEVBUF, M_WAITOK | M_ZERO); 294 /* 295 * This should be the upstream bridge; pci_upstream_bridge() 296 * handles that case on demand as otherwise we'll shadow the 297 * entire PCI hierarchy. 298 */ 299 pdev->bus->self = pdev; 300 pdev->bus->number = pci_get_bus(dev); 301 pdev->bus->domain = pci_get_domain(dev); 302 pdev->dev.bsddev = dev; 303 pdev->dev.parent = &linux_root_device; 304 pdev->dev.release = lkpi_pci_dev_release; 305 INIT_LIST_HEAD(&pdev->dev.irqents); 306 kobject_init(&pdev->dev.kobj, &linux_dev_ktype); 307 kobject_set_name(&pdev->dev.kobj, device_get_nameunit(dev)); 308 kobject_add(&pdev->dev.kobj, &linux_root_device.kobj, 309 kobject_name(&pdev->dev.kobj)); 310 spin_lock_init(&pdev->dev.devres_lock); 311 INIT_LIST_HEAD(&pdev->dev.devres_head); 312 } 313 314 static void 315 lkpinew_pci_dev_release(struct device *dev) 316 { 317 struct pci_dev *pdev; 318 319 pdev = to_pci_dev(dev); 320 if (pdev->root != NULL) 321 pci_dev_put(pdev->root); 322 if (pdev->bus->self != pdev) 323 pci_dev_put(pdev->bus->self); 324 free(pdev->bus, M_DEVBUF); 325 free(pdev, M_DEVBUF); 326 } 327 328 struct pci_dev * 329 lkpinew_pci_dev(device_t dev) 330 { 331 struct pci_dev *pdev; 332 333 pdev = malloc(sizeof(*pdev), M_DEVBUF, M_WAITOK|M_ZERO); 334 lkpifill_pci_dev(dev, pdev); 335 pdev->dev.release = lkpinew_pci_dev_release; 336 337 return (pdev); 338 } 339 340 struct pci_dev * 341 lkpi_pci_get_class(unsigned int class, struct pci_dev *from) 342 { 343 device_t dev; 344 device_t devfrom = NULL; 345 struct pci_dev *pdev; 346 347 if (from != NULL) 348 devfrom = from->dev.bsddev; 349 350 dev = pci_find_class_from(class >> 16, (class >> 8) & 0xFF, devfrom); 351 if (dev == NULL) 352 return (NULL); 353 354 pdev = lkpinew_pci_dev(dev); 355 return (pdev); 356 } 357 358 struct pci_dev * 359 lkpi_pci_get_domain_bus_and_slot(int domain, unsigned int bus, 360 unsigned int devfn) 361 { 362 device_t dev; 363 struct pci_dev *pdev; 364 365 dev = pci_find_dbsf(domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); 366 if (dev == NULL) 367 return (NULL); 368 369 pdev = lkpinew_pci_dev(dev); 370 return (pdev); 371 } 372 373 static int 374 linux_pci_probe(device_t dev) 375 { 376 const struct pci_device_id *id; 377 struct pci_driver *pdrv; 378 379 if ((pdrv = linux_pci_find(dev, &id)) == NULL) 380 return (ENXIO); 381 if (device_get_driver(dev) != &pdrv->bsddriver) 382 return (ENXIO); 383 device_set_desc(dev, pdrv->name); 384 385 /* Assume BSS initialized (should never return BUS_PROBE_SPECIFIC). */ 386 if (pdrv->bsd_probe_return == 0) 387 return (BUS_PROBE_DEFAULT); 388 else 389 return (pdrv->bsd_probe_return); 390 } 391 392 static int 393 linux_pci_attach(device_t dev) 394 { 395 const struct pci_device_id *id; 396 struct pci_driver *pdrv; 397 struct pci_dev *pdev; 398 399 pdrv = linux_pci_find(dev, &id); 400 pdev = device_get_softc(dev); 401 402 MPASS(pdrv != NULL); 403 MPASS(pdev != NULL); 404 405 return (linux_pci_attach_device(dev, pdrv, id, pdev)); 406 } 407 408 int 409 linux_pci_attach_device(device_t dev, struct pci_driver *pdrv, 410 const struct pci_device_id *id, struct pci_dev *pdev) 411 { 412 struct resource_list_entry *rle; 413 device_t parent; 414 uintptr_t rid; 415 int error; 416 bool isdrm; 417 418 linux_set_current(curthread); 419 420 parent = device_get_parent(dev); 421 isdrm = pdrv != NULL && pdrv->isdrm; 422 423 if (isdrm) { 424 struct pci_devinfo *dinfo; 425 426 dinfo = device_get_ivars(parent); 427 device_set_ivars(dev, dinfo); 428 } 429 430 lkpifill_pci_dev(dev, pdev); 431 if (isdrm) 432 PCI_GET_ID(device_get_parent(parent), parent, PCI_ID_RID, &rid); 433 else 434 PCI_GET_ID(parent, dev, PCI_ID_RID, &rid); 435 pdev->devfn = rid; 436 pdev->pdrv = pdrv; 437 rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 0, false); 438 if (rle != NULL) 439 pdev->dev.irq = rle->start; 440 else 441 pdev->dev.irq = LINUX_IRQ_INVALID; 442 pdev->irq = pdev->dev.irq; 443 error = linux_pdev_dma_init(pdev); 444 if (error) 445 goto out_dma_init; 446 447 TAILQ_INIT(&pdev->mmio); 448 449 spin_lock(&pci_lock); 450 list_add(&pdev->links, &pci_devices); 451 spin_unlock(&pci_lock); 452 453 if (pdrv != NULL) { 454 error = pdrv->probe(pdev, id); 455 if (error) 456 goto out_probe; 457 } 458 return (0); 459 460 out_probe: 461 free(pdev->bus, M_DEVBUF); 462 linux_pdev_dma_uninit(pdev); 463 out_dma_init: 464 spin_lock(&pci_lock); 465 list_del(&pdev->links); 466 spin_unlock(&pci_lock); 467 put_device(&pdev->dev); 468 return (-error); 469 } 470 471 static int 472 linux_pci_detach(device_t dev) 473 { 474 struct pci_dev *pdev; 475 476 pdev = device_get_softc(dev); 477 478 MPASS(pdev != NULL); 479 480 device_set_desc(dev, NULL); 481 482 return (linux_pci_detach_device(pdev)); 483 } 484 485 int 486 linux_pci_detach_device(struct pci_dev *pdev) 487 { 488 489 linux_set_current(curthread); 490 491 if (pdev->pdrv != NULL) 492 pdev->pdrv->remove(pdev); 493 494 if (pdev->root != NULL) 495 pci_dev_put(pdev->root); 496 free(pdev->bus, M_DEVBUF); 497 linux_pdev_dma_uninit(pdev); 498 499 spin_lock(&pci_lock); 500 list_del(&pdev->links); 501 spin_unlock(&pci_lock); 502 put_device(&pdev->dev); 503 504 return (0); 505 } 506 507 static int 508 lkpi_pci_disable_dev(struct device *dev) 509 { 510 511 (void) pci_disable_io(dev->bsddev, SYS_RES_MEMORY); 512 (void) pci_disable_io(dev->bsddev, SYS_RES_IOPORT); 513 return (0); 514 } 515 516 struct pci_devres * 517 lkpi_pci_devres_get_alloc(struct pci_dev *pdev) 518 { 519 struct pci_devres *dr; 520 521 dr = lkpi_devres_find(&pdev->dev, lkpi_pci_devres_release, NULL, NULL); 522 if (dr == NULL) { 523 dr = lkpi_devres_alloc(lkpi_pci_devres_release, sizeof(*dr), 524 GFP_KERNEL | __GFP_ZERO); 525 if (dr != NULL) 526 lkpi_devres_add(&pdev->dev, dr); 527 } 528 529 return (dr); 530 } 531 532 void 533 lkpi_pci_devres_release(struct device *dev, void *p) 534 { 535 struct pci_devres *dr; 536 struct pci_dev *pdev; 537 int bar; 538 539 pdev = to_pci_dev(dev); 540 dr = p; 541 542 if (pdev->msix_enabled) 543 lkpi_pci_disable_msix(pdev); 544 if (pdev->msi_enabled) 545 lkpi_pci_disable_msi(pdev); 546 547 if (dr->enable_io && lkpi_pci_disable_dev(dev) == 0) 548 dr->enable_io = false; 549 550 if (dr->region_mask == 0) 551 return; 552 for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) { 553 554 if ((dr->region_mask & (1 << bar)) == 0) 555 continue; 556 pci_release_region(pdev, bar); 557 } 558 } 559 560 struct pcim_iomap_devres * 561 lkpi_pcim_iomap_devres_find(struct pci_dev *pdev) 562 { 563 struct pcim_iomap_devres *dr; 564 565 dr = lkpi_devres_find(&pdev->dev, lkpi_pcim_iomap_table_release, 566 NULL, NULL); 567 if (dr == NULL) { 568 dr = lkpi_devres_alloc(lkpi_pcim_iomap_table_release, 569 sizeof(*dr), GFP_KERNEL | __GFP_ZERO); 570 if (dr != NULL) 571 lkpi_devres_add(&pdev->dev, dr); 572 } 573 574 if (dr == NULL) 575 device_printf(pdev->dev.bsddev, "%s: NULL\n", __func__); 576 577 return (dr); 578 } 579 580 void 581 lkpi_pcim_iomap_table_release(struct device *dev, void *p) 582 { 583 struct pcim_iomap_devres *dr; 584 struct pci_dev *pdev; 585 int bar; 586 587 dr = p; 588 pdev = to_pci_dev(dev); 589 for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) { 590 591 if (dr->mmio_table[bar] == NULL) 592 continue; 593 594 pci_iounmap(pdev, dr->mmio_table[bar]); 595 } 596 } 597 598 static int 599 linux_pci_suspend(device_t dev) 600 { 601 const struct dev_pm_ops *pmops; 602 struct pm_message pm = { }; 603 struct pci_dev *pdev; 604 int error; 605 606 error = 0; 607 linux_set_current(curthread); 608 pdev = device_get_softc(dev); 609 pmops = pdev->pdrv->driver.pm; 610 611 if (pdev->pdrv->suspend != NULL) 612 error = -pdev->pdrv->suspend(pdev, pm); 613 else if (pmops != NULL && pmops->suspend != NULL) { 614 error = -pmops->suspend(&pdev->dev); 615 if (error == 0 && pmops->suspend_late != NULL) 616 error = -pmops->suspend_late(&pdev->dev); 617 } 618 return (error); 619 } 620 621 static int 622 linux_pci_resume(device_t dev) 623 { 624 const struct dev_pm_ops *pmops; 625 struct pci_dev *pdev; 626 int error; 627 628 error = 0; 629 linux_set_current(curthread); 630 pdev = device_get_softc(dev); 631 pmops = pdev->pdrv->driver.pm; 632 633 if (pdev->pdrv->resume != NULL) 634 error = -pdev->pdrv->resume(pdev); 635 else if (pmops != NULL && pmops->resume != NULL) { 636 if (pmops->resume_early != NULL) 637 error = -pmops->resume_early(&pdev->dev); 638 if (error == 0 && pmops->resume != NULL) 639 error = -pmops->resume(&pdev->dev); 640 } 641 return (error); 642 } 643 644 static int 645 linux_pci_shutdown(device_t dev) 646 { 647 struct pci_dev *pdev; 648 649 linux_set_current(curthread); 650 pdev = device_get_softc(dev); 651 if (pdev->pdrv->shutdown != NULL) 652 pdev->pdrv->shutdown(pdev); 653 return (0); 654 } 655 656 static int 657 linux_pci_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *pf_config) 658 { 659 struct pci_dev *pdev; 660 int error; 661 662 linux_set_current(curthread); 663 pdev = device_get_softc(dev); 664 if (pdev->pdrv->bsd_iov_init != NULL) 665 error = pdev->pdrv->bsd_iov_init(dev, num_vfs, pf_config); 666 else 667 error = EINVAL; 668 return (error); 669 } 670 671 static void 672 linux_pci_iov_uninit(device_t dev) 673 { 674 struct pci_dev *pdev; 675 676 linux_set_current(curthread); 677 pdev = device_get_softc(dev); 678 if (pdev->pdrv->bsd_iov_uninit != NULL) 679 pdev->pdrv->bsd_iov_uninit(dev); 680 } 681 682 static int 683 linux_pci_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *vf_config) 684 { 685 struct pci_dev *pdev; 686 int error; 687 688 linux_set_current(curthread); 689 pdev = device_get_softc(dev); 690 if (pdev->pdrv->bsd_iov_add_vf != NULL) 691 error = pdev->pdrv->bsd_iov_add_vf(dev, vfnum, vf_config); 692 else 693 error = EINVAL; 694 return (error); 695 } 696 697 static int 698 _linux_pci_register_driver(struct pci_driver *pdrv, devclass_t dc) 699 { 700 int error; 701 702 linux_set_current(curthread); 703 spin_lock(&pci_lock); 704 list_add(&pdrv->node, &pci_drivers); 705 spin_unlock(&pci_lock); 706 if (pdrv->bsddriver.name == NULL) 707 pdrv->bsddriver.name = pdrv->name; 708 pdrv->bsddriver.methods = pci_methods; 709 pdrv->bsddriver.size = sizeof(struct pci_dev); 710 711 bus_topo_lock(); 712 error = devclass_add_driver(dc, &pdrv->bsddriver, 713 BUS_PASS_DEFAULT, &pdrv->bsdclass); 714 bus_topo_unlock(); 715 return (-error); 716 } 717 718 int 719 linux_pci_register_driver(struct pci_driver *pdrv) 720 { 721 devclass_t dc; 722 723 dc = devclass_find("pci"); 724 if (dc == NULL) 725 return (-ENXIO); 726 pdrv->isdrm = false; 727 return (_linux_pci_register_driver(pdrv, dc)); 728 } 729 730 struct resource_list_entry * 731 linux_pci_reserve_bar(struct pci_dev *pdev, struct resource_list *rl, 732 int type, int rid) 733 { 734 device_t dev; 735 struct resource *res; 736 737 KASSERT(type == SYS_RES_IOPORT || type == SYS_RES_MEMORY, 738 ("trying to reserve non-BAR type %d", type)); 739 740 dev = pdev->pdrv != NULL && pdev->pdrv->isdrm ? 741 device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev; 742 res = pci_reserve_map(device_get_parent(dev), dev, type, &rid, 0, ~0, 743 1, 1, 0); 744 if (res == NULL) 745 return (NULL); 746 return (resource_list_find(rl, type, rid)); 747 } 748 749 unsigned long 750 pci_resource_start(struct pci_dev *pdev, int bar) 751 { 752 struct resource_list_entry *rle; 753 rman_res_t newstart; 754 device_t dev; 755 int error; 756 757 if ((rle = linux_pci_get_bar(pdev, bar, true)) == NULL) 758 return (0); 759 dev = pdev->pdrv != NULL && pdev->pdrv->isdrm ? 760 device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev; 761 error = bus_translate_resource(dev, rle->type, rle->start, &newstart); 762 if (error != 0) { 763 device_printf(pdev->dev.bsddev, 764 "translate of %#jx failed: %d\n", 765 (uintmax_t)rle->start, error); 766 return (0); 767 } 768 return (newstart); 769 } 770 771 unsigned long 772 pci_resource_len(struct pci_dev *pdev, int bar) 773 { 774 struct resource_list_entry *rle; 775 776 if ((rle = linux_pci_get_bar(pdev, bar, true)) == NULL) 777 return (0); 778 return (rle->count); 779 } 780 781 int 782 pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) 783 { 784 struct resource *res; 785 struct pci_devres *dr; 786 struct pci_mmio_region *mmio; 787 int rid; 788 int type; 789 790 type = pci_resource_type(pdev, bar); 791 if (type < 0) 792 return (-ENODEV); 793 rid = PCIR_BAR(bar); 794 res = bus_alloc_resource_any(pdev->dev.bsddev, type, &rid, 795 RF_ACTIVE|RF_SHAREABLE); 796 if (res == NULL) { 797 device_printf(pdev->dev.bsddev, "%s: failed to alloc " 798 "bar %d type %d rid %d\n", 799 __func__, bar, type, PCIR_BAR(bar)); 800 return (-ENODEV); 801 } 802 803 /* 804 * It seems there is an implicit devres tracking on these if the device 805 * is managed; otherwise the resources are not automatiaclly freed on 806 * FreeBSD/LinuxKPI tough they should be/are expected to be by Linux 807 * drivers. 808 */ 809 dr = lkpi_pci_devres_find(pdev); 810 if (dr != NULL) { 811 dr->region_mask |= (1 << bar); 812 dr->region_table[bar] = res; 813 } 814 815 /* Even if the device is not managed we need to track it for iomap. */ 816 mmio = malloc(sizeof(*mmio), M_DEVBUF, M_WAITOK | M_ZERO); 817 mmio->rid = PCIR_BAR(bar); 818 mmio->type = type; 819 mmio->res = res; 820 TAILQ_INSERT_TAIL(&pdev->mmio, mmio, next); 821 822 return (0); 823 } 824 825 struct resource * 826 _lkpi_pci_iomap(struct pci_dev *pdev, int bar, int mmio_size __unused) 827 { 828 struct pci_mmio_region *mmio, *p; 829 int type; 830 831 type = pci_resource_type(pdev, bar); 832 if (type < 0) { 833 device_printf(pdev->dev.bsddev, "%s: bar %d type %d\n", 834 __func__, bar, type); 835 return (NULL); 836 } 837 838 /* 839 * Check for duplicate mappings. 840 * This can happen if a driver calls pci_request_region() first. 841 */ 842 TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) { 843 if (mmio->type == type && mmio->rid == PCIR_BAR(bar)) { 844 return (mmio->res); 845 } 846 } 847 848 mmio = malloc(sizeof(*mmio), M_DEVBUF, M_WAITOK | M_ZERO); 849 mmio->rid = PCIR_BAR(bar); 850 mmio->type = type; 851 mmio->res = bus_alloc_resource_any(pdev->dev.bsddev, mmio->type, 852 &mmio->rid, RF_ACTIVE|RF_SHAREABLE); 853 if (mmio->res == NULL) { 854 device_printf(pdev->dev.bsddev, "%s: failed to alloc " 855 "bar %d type %d rid %d\n", 856 __func__, bar, type, PCIR_BAR(bar)); 857 free(mmio, M_DEVBUF); 858 return (NULL); 859 } 860 TAILQ_INSERT_TAIL(&pdev->mmio, mmio, next); 861 862 return (mmio->res); 863 } 864 865 int 866 linux_pci_register_drm_driver(struct pci_driver *pdrv) 867 { 868 devclass_t dc; 869 870 dc = devclass_create("vgapci"); 871 if (dc == NULL) 872 return (-ENXIO); 873 pdrv->isdrm = true; 874 pdrv->name = "drmn"; 875 return (_linux_pci_register_driver(pdrv, dc)); 876 } 877 878 void 879 linux_pci_unregister_driver(struct pci_driver *pdrv) 880 { 881 devclass_t bus; 882 883 bus = devclass_find("pci"); 884 885 spin_lock(&pci_lock); 886 list_del(&pdrv->node); 887 spin_unlock(&pci_lock); 888 bus_topo_lock(); 889 if (bus != NULL) 890 devclass_delete_driver(bus, &pdrv->bsddriver); 891 bus_topo_unlock(); 892 } 893 894 void 895 linux_pci_unregister_drm_driver(struct pci_driver *pdrv) 896 { 897 devclass_t bus; 898 899 bus = devclass_find("vgapci"); 900 901 spin_lock(&pci_lock); 902 list_del(&pdrv->node); 903 spin_unlock(&pci_lock); 904 bus_topo_lock(); 905 if (bus != NULL) 906 devclass_delete_driver(bus, &pdrv->bsddriver); 907 bus_topo_unlock(); 908 } 909 910 int 911 pci_alloc_irq_vectors(struct pci_dev *pdev, int minv, int maxv, 912 unsigned int flags) 913 { 914 int error; 915 916 if (flags & PCI_IRQ_MSIX) { 917 struct msix_entry *entries; 918 int i; 919 920 entries = kcalloc(maxv, sizeof(*entries), GFP_KERNEL); 921 if (entries == NULL) { 922 error = -ENOMEM; 923 goto out; 924 } 925 for (i = 0; i < maxv; ++i) 926 entries[i].entry = i; 927 error = pci_enable_msix(pdev, entries, maxv); 928 out: 929 kfree(entries); 930 if (error == 0 && pdev->msix_enabled) 931 return (pdev->dev.irq_end - pdev->dev.irq_start); 932 } 933 if (flags & PCI_IRQ_MSI) { 934 error = pci_enable_msi(pdev); 935 if (error == 0 && pdev->msi_enabled) 936 return (pdev->dev.irq_end - pdev->dev.irq_start); 937 } 938 if (flags & PCI_IRQ_LEGACY) { 939 if (pdev->irq) 940 return (1); 941 } 942 943 return (-EINVAL); 944 } 945 946 CTASSERT(sizeof(dma_addr_t) <= sizeof(uint64_t)); 947 948 struct linux_dma_obj { 949 void *vaddr; 950 uint64_t dma_addr; 951 bus_dmamap_t dmamap; 952 bus_dma_tag_t dmat; 953 }; 954 955 static uma_zone_t linux_dma_trie_zone; 956 static uma_zone_t linux_dma_obj_zone; 957 958 static void 959 linux_dma_init(void *arg) 960 { 961 962 linux_dma_trie_zone = uma_zcreate("linux_dma_pctrie", 963 pctrie_node_size(), NULL, NULL, pctrie_zone_init, NULL, 964 UMA_ALIGN_PTR, 0); 965 linux_dma_obj_zone = uma_zcreate("linux_dma_object", 966 sizeof(struct linux_dma_obj), NULL, NULL, NULL, NULL, 967 UMA_ALIGN_PTR, 0); 968 lkpi_pci_nseg1_fail = counter_u64_alloc(M_WAITOK); 969 } 970 SYSINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_init, NULL); 971 972 static void 973 linux_dma_uninit(void *arg) 974 { 975 976 counter_u64_free(lkpi_pci_nseg1_fail); 977 uma_zdestroy(linux_dma_obj_zone); 978 uma_zdestroy(linux_dma_trie_zone); 979 } 980 SYSUNINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_uninit, NULL); 981 982 static void * 983 linux_dma_trie_alloc(struct pctrie *ptree) 984 { 985 986 return (uma_zalloc(linux_dma_trie_zone, M_NOWAIT)); 987 } 988 989 static void 990 linux_dma_trie_free(struct pctrie *ptree, void *node) 991 { 992 993 uma_zfree(linux_dma_trie_zone, node); 994 } 995 996 PCTRIE_DEFINE(LINUX_DMA, linux_dma_obj, dma_addr, linux_dma_trie_alloc, 997 linux_dma_trie_free); 998 999 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 1000 static dma_addr_t 1001 linux_dma_map_phys_common(struct device *dev, vm_paddr_t phys, size_t len, 1002 bus_dma_tag_t dmat) 1003 { 1004 struct linux_dma_priv *priv; 1005 struct linux_dma_obj *obj; 1006 int error, nseg; 1007 bus_dma_segment_t seg; 1008 1009 priv = dev->dma_priv; 1010 1011 /* 1012 * If the resultant mapping will be entirely 1:1 with the 1013 * physical address, short-circuit the remainder of the 1014 * bus_dma API. This avoids tracking collisions in the pctrie 1015 * with the additional benefit of reducing overhead. 1016 */ 1017 if (bus_dma_id_mapped(dmat, phys, len)) 1018 return (phys); 1019 1020 obj = uma_zalloc(linux_dma_obj_zone, M_NOWAIT); 1021 if (obj == NULL) { 1022 return (0); 1023 } 1024 obj->dmat = dmat; 1025 1026 DMA_PRIV_LOCK(priv); 1027 if (bus_dmamap_create(obj->dmat, 0, &obj->dmamap) != 0) { 1028 DMA_PRIV_UNLOCK(priv); 1029 uma_zfree(linux_dma_obj_zone, obj); 1030 return (0); 1031 } 1032 1033 nseg = -1; 1034 if (_bus_dmamap_load_phys(obj->dmat, obj->dmamap, phys, len, 1035 BUS_DMA_NOWAIT, &seg, &nseg) != 0) { 1036 bus_dmamap_destroy(obj->dmat, obj->dmamap); 1037 DMA_PRIV_UNLOCK(priv); 1038 uma_zfree(linux_dma_obj_zone, obj); 1039 counter_u64_add(lkpi_pci_nseg1_fail, 1); 1040 if (linuxkpi_debug) 1041 dump_stack(); 1042 return (0); 1043 } 1044 1045 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); 1046 obj->dma_addr = seg.ds_addr; 1047 1048 error = LINUX_DMA_PCTRIE_INSERT(&priv->ptree, obj); 1049 if (error != 0) { 1050 bus_dmamap_unload(obj->dmat, obj->dmamap); 1051 bus_dmamap_destroy(obj->dmat, obj->dmamap); 1052 DMA_PRIV_UNLOCK(priv); 1053 uma_zfree(linux_dma_obj_zone, obj); 1054 return (0); 1055 } 1056 DMA_PRIV_UNLOCK(priv); 1057 return (obj->dma_addr); 1058 } 1059 #else 1060 static dma_addr_t 1061 linux_dma_map_phys_common(struct device *dev __unused, vm_paddr_t phys, 1062 size_t len __unused, bus_dma_tag_t dmat __unused) 1063 { 1064 return (phys); 1065 } 1066 #endif 1067 1068 dma_addr_t 1069 linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len) 1070 { 1071 struct linux_dma_priv *priv; 1072 1073 priv = dev->dma_priv; 1074 return (linux_dma_map_phys_common(dev, phys, len, priv->dmat)); 1075 } 1076 1077 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 1078 void 1079 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len) 1080 { 1081 struct linux_dma_priv *priv; 1082 struct linux_dma_obj *obj; 1083 1084 priv = dev->dma_priv; 1085 1086 if (pctrie_is_empty(&priv->ptree)) 1087 return; 1088 1089 DMA_PRIV_LOCK(priv); 1090 obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr); 1091 if (obj == NULL) { 1092 DMA_PRIV_UNLOCK(priv); 1093 return; 1094 } 1095 LINUX_DMA_PCTRIE_REMOVE(&priv->ptree, dma_addr); 1096 bus_dmamap_unload(obj->dmat, obj->dmamap); 1097 bus_dmamap_destroy(obj->dmat, obj->dmamap); 1098 DMA_PRIV_UNLOCK(priv); 1099 1100 uma_zfree(linux_dma_obj_zone, obj); 1101 } 1102 #else 1103 void 1104 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len) 1105 { 1106 } 1107 #endif 1108 1109 void * 1110 linux_dma_alloc_coherent(struct device *dev, size_t size, 1111 dma_addr_t *dma_handle, gfp_t flag) 1112 { 1113 struct linux_dma_priv *priv; 1114 vm_paddr_t high; 1115 size_t align; 1116 void *mem; 1117 1118 if (dev == NULL || dev->dma_priv == NULL) { 1119 *dma_handle = 0; 1120 return (NULL); 1121 } 1122 priv = dev->dma_priv; 1123 if (priv->dma_coherent_mask) 1124 high = priv->dma_coherent_mask; 1125 else 1126 /* Coherent is lower 32bit only by default in Linux. */ 1127 high = BUS_SPACE_MAXADDR_32BIT; 1128 align = PAGE_SIZE << get_order(size); 1129 /* Always zero the allocation. */ 1130 flag |= M_ZERO; 1131 mem = kmem_alloc_contig(size, flag & GFP_NATIVE_MASK, 0, high, 1132 align, 0, VM_MEMATTR_DEFAULT); 1133 if (mem != NULL) { 1134 *dma_handle = linux_dma_map_phys_common(dev, vtophys(mem), size, 1135 priv->dmat_coherent); 1136 if (*dma_handle == 0) { 1137 kmem_free(mem, size); 1138 mem = NULL; 1139 } 1140 } else { 1141 *dma_handle = 0; 1142 } 1143 return (mem); 1144 } 1145 1146 struct lkpi_devres_dmam_coherent { 1147 size_t size; 1148 dma_addr_t *handle; 1149 void *mem; 1150 }; 1151 1152 static void 1153 lkpi_dmam_free_coherent(struct device *dev, void *p) 1154 { 1155 struct lkpi_devres_dmam_coherent *dr; 1156 1157 dr = p; 1158 dma_free_coherent(dev, dr->size, dr->mem, *dr->handle); 1159 } 1160 1161 void * 1162 linuxkpi_dmam_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 1163 gfp_t flag) 1164 { 1165 struct lkpi_devres_dmam_coherent *dr; 1166 1167 dr = lkpi_devres_alloc(lkpi_dmam_free_coherent, 1168 sizeof(*dr), GFP_KERNEL | __GFP_ZERO); 1169 1170 if (dr == NULL) 1171 return (NULL); 1172 1173 dr->size = size; 1174 dr->mem = linux_dma_alloc_coherent(dev, size, dma_handle, flag); 1175 dr->handle = dma_handle; 1176 if (dr->mem == NULL) { 1177 lkpi_devres_free(dr); 1178 return (NULL); 1179 } 1180 1181 lkpi_devres_add(dev, dr); 1182 return (dr->mem); 1183 } 1184 1185 void 1186 linuxkpi_dma_sync(struct device *dev, dma_addr_t dma_addr, size_t size, 1187 bus_dmasync_op_t op) 1188 { 1189 struct linux_dma_priv *priv; 1190 struct linux_dma_obj *obj; 1191 1192 priv = dev->dma_priv; 1193 1194 if (pctrie_is_empty(&priv->ptree)) 1195 return; 1196 1197 DMA_PRIV_LOCK(priv); 1198 obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr); 1199 if (obj == NULL) { 1200 DMA_PRIV_UNLOCK(priv); 1201 return; 1202 } 1203 1204 bus_dmamap_sync(obj->dmat, obj->dmamap, op); 1205 DMA_PRIV_UNLOCK(priv); 1206 } 1207 1208 int 1209 linux_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents, 1210 enum dma_data_direction direction, unsigned long attrs __unused) 1211 { 1212 struct linux_dma_priv *priv; 1213 struct scatterlist *sg; 1214 int i, nseg; 1215 bus_dma_segment_t seg; 1216 1217 priv = dev->dma_priv; 1218 1219 DMA_PRIV_LOCK(priv); 1220 1221 /* create common DMA map in the first S/G entry */ 1222 if (bus_dmamap_create(priv->dmat, 0, &sgl->dma_map) != 0) { 1223 DMA_PRIV_UNLOCK(priv); 1224 return (0); 1225 } 1226 1227 /* load all S/G list entries */ 1228 for_each_sg(sgl, sg, nents, i) { 1229 nseg = -1; 1230 if (_bus_dmamap_load_phys(priv->dmat, sgl->dma_map, 1231 sg_phys(sg), sg->length, BUS_DMA_NOWAIT, 1232 &seg, &nseg) != 0) { 1233 bus_dmamap_unload(priv->dmat, sgl->dma_map); 1234 bus_dmamap_destroy(priv->dmat, sgl->dma_map); 1235 DMA_PRIV_UNLOCK(priv); 1236 return (0); 1237 } 1238 KASSERT(nseg == 0, 1239 ("More than one segment (nseg=%d)", nseg + 1)); 1240 1241 sg_dma_address(sg) = seg.ds_addr; 1242 } 1243 1244 switch (direction) { 1245 case DMA_BIDIRECTIONAL: 1246 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREWRITE); 1247 break; 1248 case DMA_TO_DEVICE: 1249 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREREAD); 1250 break; 1251 case DMA_FROM_DEVICE: 1252 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREWRITE); 1253 break; 1254 default: 1255 break; 1256 } 1257 1258 DMA_PRIV_UNLOCK(priv); 1259 1260 return (nents); 1261 } 1262 1263 void 1264 linux_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, 1265 int nents __unused, enum dma_data_direction direction, 1266 unsigned long attrs __unused) 1267 { 1268 struct linux_dma_priv *priv; 1269 1270 priv = dev->dma_priv; 1271 1272 DMA_PRIV_LOCK(priv); 1273 1274 switch (direction) { 1275 case DMA_BIDIRECTIONAL: 1276 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTREAD); 1277 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREREAD); 1278 break; 1279 case DMA_TO_DEVICE: 1280 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTWRITE); 1281 break; 1282 case DMA_FROM_DEVICE: 1283 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTREAD); 1284 break; 1285 default: 1286 break; 1287 } 1288 1289 bus_dmamap_unload(priv->dmat, sgl->dma_map); 1290 bus_dmamap_destroy(priv->dmat, sgl->dma_map); 1291 DMA_PRIV_UNLOCK(priv); 1292 } 1293 1294 struct dma_pool { 1295 struct device *pool_device; 1296 uma_zone_t pool_zone; 1297 struct mtx pool_lock; 1298 bus_dma_tag_t pool_dmat; 1299 size_t pool_entry_size; 1300 struct pctrie pool_ptree; 1301 }; 1302 1303 #define DMA_POOL_LOCK(pool) mtx_lock(&(pool)->pool_lock) 1304 #define DMA_POOL_UNLOCK(pool) mtx_unlock(&(pool)->pool_lock) 1305 1306 static inline int 1307 dma_pool_obj_ctor(void *mem, int size, void *arg, int flags) 1308 { 1309 struct linux_dma_obj *obj = mem; 1310 struct dma_pool *pool = arg; 1311 int error, nseg; 1312 bus_dma_segment_t seg; 1313 1314 nseg = -1; 1315 DMA_POOL_LOCK(pool); 1316 error = _bus_dmamap_load_phys(pool->pool_dmat, obj->dmamap, 1317 vtophys(obj->vaddr), pool->pool_entry_size, BUS_DMA_NOWAIT, 1318 &seg, &nseg); 1319 DMA_POOL_UNLOCK(pool); 1320 if (error != 0) { 1321 return (error); 1322 } 1323 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); 1324 obj->dma_addr = seg.ds_addr; 1325 1326 return (0); 1327 } 1328 1329 static void 1330 dma_pool_obj_dtor(void *mem, int size, void *arg) 1331 { 1332 struct linux_dma_obj *obj = mem; 1333 struct dma_pool *pool = arg; 1334 1335 DMA_POOL_LOCK(pool); 1336 bus_dmamap_unload(pool->pool_dmat, obj->dmamap); 1337 DMA_POOL_UNLOCK(pool); 1338 } 1339 1340 static int 1341 dma_pool_obj_import(void *arg, void **store, int count, int domain __unused, 1342 int flags) 1343 { 1344 struct dma_pool *pool = arg; 1345 struct linux_dma_obj *obj; 1346 int error, i; 1347 1348 for (i = 0; i < count; i++) { 1349 obj = uma_zalloc(linux_dma_obj_zone, flags); 1350 if (obj == NULL) 1351 break; 1352 1353 error = bus_dmamem_alloc(pool->pool_dmat, &obj->vaddr, 1354 BUS_DMA_NOWAIT, &obj->dmamap); 1355 if (error!= 0) { 1356 uma_zfree(linux_dma_obj_zone, obj); 1357 break; 1358 } 1359 1360 store[i] = obj; 1361 } 1362 1363 return (i); 1364 } 1365 1366 static void 1367 dma_pool_obj_release(void *arg, void **store, int count) 1368 { 1369 struct dma_pool *pool = arg; 1370 struct linux_dma_obj *obj; 1371 int i; 1372 1373 for (i = 0; i < count; i++) { 1374 obj = store[i]; 1375 bus_dmamem_free(pool->pool_dmat, obj->vaddr, obj->dmamap); 1376 uma_zfree(linux_dma_obj_zone, obj); 1377 } 1378 } 1379 1380 struct dma_pool * 1381 linux_dma_pool_create(char *name, struct device *dev, size_t size, 1382 size_t align, size_t boundary) 1383 { 1384 struct linux_dma_priv *priv; 1385 struct dma_pool *pool; 1386 1387 priv = dev->dma_priv; 1388 1389 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 1390 pool->pool_device = dev; 1391 pool->pool_entry_size = size; 1392 1393 if (bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 1394 align, boundary, /* alignment, boundary */ 1395 priv->dma_mask, /* lowaddr */ 1396 BUS_SPACE_MAXADDR, /* highaddr */ 1397 NULL, NULL, /* filtfunc, filtfuncarg */ 1398 size, /* maxsize */ 1399 1, /* nsegments */ 1400 size, /* maxsegsz */ 1401 0, /* flags */ 1402 NULL, NULL, /* lockfunc, lockfuncarg */ 1403 &pool->pool_dmat)) { 1404 kfree(pool); 1405 return (NULL); 1406 } 1407 1408 pool->pool_zone = uma_zcache_create(name, -1, dma_pool_obj_ctor, 1409 dma_pool_obj_dtor, NULL, NULL, dma_pool_obj_import, 1410 dma_pool_obj_release, pool, 0); 1411 1412 mtx_init(&pool->pool_lock, "lkpi-dma-pool", NULL, MTX_DEF); 1413 pctrie_init(&pool->pool_ptree); 1414 1415 return (pool); 1416 } 1417 1418 void 1419 linux_dma_pool_destroy(struct dma_pool *pool) 1420 { 1421 1422 uma_zdestroy(pool->pool_zone); 1423 bus_dma_tag_destroy(pool->pool_dmat); 1424 mtx_destroy(&pool->pool_lock); 1425 kfree(pool); 1426 } 1427 1428 void 1429 lkpi_dmam_pool_destroy(struct device *dev, void *p) 1430 { 1431 struct dma_pool *pool; 1432 1433 pool = *(struct dma_pool **)p; 1434 LINUX_DMA_PCTRIE_RECLAIM(&pool->pool_ptree); 1435 linux_dma_pool_destroy(pool); 1436 } 1437 1438 void * 1439 linux_dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, 1440 dma_addr_t *handle) 1441 { 1442 struct linux_dma_obj *obj; 1443 1444 obj = uma_zalloc_arg(pool->pool_zone, pool, mem_flags & GFP_NATIVE_MASK); 1445 if (obj == NULL) 1446 return (NULL); 1447 1448 DMA_POOL_LOCK(pool); 1449 if (LINUX_DMA_PCTRIE_INSERT(&pool->pool_ptree, obj) != 0) { 1450 DMA_POOL_UNLOCK(pool); 1451 uma_zfree_arg(pool->pool_zone, obj, pool); 1452 return (NULL); 1453 } 1454 DMA_POOL_UNLOCK(pool); 1455 1456 *handle = obj->dma_addr; 1457 return (obj->vaddr); 1458 } 1459 1460 void 1461 linux_dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma_addr) 1462 { 1463 struct linux_dma_obj *obj; 1464 1465 DMA_POOL_LOCK(pool); 1466 obj = LINUX_DMA_PCTRIE_LOOKUP(&pool->pool_ptree, dma_addr); 1467 if (obj == NULL) { 1468 DMA_POOL_UNLOCK(pool); 1469 return; 1470 } 1471 LINUX_DMA_PCTRIE_REMOVE(&pool->pool_ptree, dma_addr); 1472 DMA_POOL_UNLOCK(pool); 1473 1474 uma_zfree_arg(pool->pool_zone, obj, pool); 1475 } 1476 1477 static int 1478 linux_backlight_get_status(device_t dev, struct backlight_props *props) 1479 { 1480 struct pci_dev *pdev; 1481 1482 linux_set_current(curthread); 1483 pdev = device_get_softc(dev); 1484 1485 props->brightness = pdev->dev.bd->props.brightness; 1486 props->brightness = props->brightness * 100 / pdev->dev.bd->props.max_brightness; 1487 props->nlevels = 0; 1488 1489 return (0); 1490 } 1491 1492 static int 1493 linux_backlight_get_info(device_t dev, struct backlight_info *info) 1494 { 1495 struct pci_dev *pdev; 1496 1497 linux_set_current(curthread); 1498 pdev = device_get_softc(dev); 1499 1500 info->type = BACKLIGHT_TYPE_PANEL; 1501 strlcpy(info->name, pdev->dev.bd->name, BACKLIGHTMAXNAMELENGTH); 1502 return (0); 1503 } 1504 1505 static int 1506 linux_backlight_update_status(device_t dev, struct backlight_props *props) 1507 { 1508 struct pci_dev *pdev; 1509 1510 linux_set_current(curthread); 1511 pdev = device_get_softc(dev); 1512 1513 pdev->dev.bd->props.brightness = pdev->dev.bd->props.max_brightness * 1514 props->brightness / 100; 1515 pdev->dev.bd->props.power = props->brightness == 0 ? 1516 4/* FB_BLANK_POWERDOWN */ : 0/* FB_BLANK_UNBLANK */; 1517 return (pdev->dev.bd->ops->update_status(pdev->dev.bd)); 1518 } 1519 1520 struct backlight_device * 1521 linux_backlight_device_register(const char *name, struct device *dev, 1522 void *data, const struct backlight_ops *ops, struct backlight_properties *props) 1523 { 1524 1525 dev->bd = malloc(sizeof(*dev->bd), M_DEVBUF, M_WAITOK | M_ZERO); 1526 dev->bd->ops = ops; 1527 dev->bd->props.type = props->type; 1528 dev->bd->props.max_brightness = props->max_brightness; 1529 dev->bd->props.brightness = props->brightness; 1530 dev->bd->props.power = props->power; 1531 dev->bd->data = data; 1532 dev->bd->dev = dev; 1533 dev->bd->name = strdup(name, M_DEVBUF); 1534 1535 dev->backlight_dev = backlight_register(name, dev->bsddev); 1536 1537 return (dev->bd); 1538 } 1539 1540 void 1541 linux_backlight_device_unregister(struct backlight_device *bd) 1542 { 1543 1544 backlight_destroy(bd->dev->backlight_dev); 1545 free(bd->name, M_DEVBUF); 1546 free(bd, M_DEVBUF); 1547 } 1548