1 /*- 2 * Copyright (c) 2015-2016 Mellanox Technologies, Ltd. 3 * All rights reserved. 4 * Copyright (c) 2020-2022 The FreeBSD Foundation 5 * 6 * Portions of this software were developed by Björn Zeeb 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice unmodified, this list of conditions, and the following 14 * disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/bus.h> 37 #include <sys/malloc.h> 38 #include <sys/kernel.h> 39 #include <sys/sysctl.h> 40 #include <sys/lock.h> 41 #include <sys/mutex.h> 42 #include <sys/fcntl.h> 43 #include <sys/file.h> 44 #include <sys/filio.h> 45 #include <sys/pciio.h> 46 #include <sys/pctrie.h> 47 #include <sys/rwlock.h> 48 49 #include <vm/vm.h> 50 #include <vm/pmap.h> 51 52 #include <machine/stdarg.h> 53 54 #include <dev/pci/pcivar.h> 55 #include <dev/pci/pci_private.h> 56 #include <dev/pci/pci_iov.h> 57 #include <dev/backlight/backlight.h> 58 59 #include <linux/kobject.h> 60 #include <linux/device.h> 61 #include <linux/slab.h> 62 #include <linux/module.h> 63 #include <linux/cdev.h> 64 #include <linux/file.h> 65 #include <linux/sysfs.h> 66 #include <linux/mm.h> 67 #include <linux/io.h> 68 #include <linux/vmalloc.h> 69 #include <linux/pci.h> 70 #include <linux/compat.h> 71 72 #include <linux/backlight.h> 73 74 #include "backlight_if.h" 75 #include "pcib_if.h" 76 77 /* Undef the linux function macro defined in linux/pci.h */ 78 #undef pci_get_class 79 80 static device_probe_t linux_pci_probe; 81 static device_attach_t linux_pci_attach; 82 static device_detach_t linux_pci_detach; 83 static device_suspend_t linux_pci_suspend; 84 static device_resume_t linux_pci_resume; 85 static device_shutdown_t linux_pci_shutdown; 86 static pci_iov_init_t linux_pci_iov_init; 87 static pci_iov_uninit_t linux_pci_iov_uninit; 88 static pci_iov_add_vf_t linux_pci_iov_add_vf; 89 static int linux_backlight_get_status(device_t dev, struct backlight_props *props); 90 static int linux_backlight_update_status(device_t dev, struct backlight_props *props); 91 static int linux_backlight_get_info(device_t dev, struct backlight_info *info); 92 93 static device_method_t pci_methods[] = { 94 DEVMETHOD(device_probe, linux_pci_probe), 95 DEVMETHOD(device_attach, linux_pci_attach), 96 DEVMETHOD(device_detach, linux_pci_detach), 97 DEVMETHOD(device_suspend, linux_pci_suspend), 98 DEVMETHOD(device_resume, linux_pci_resume), 99 DEVMETHOD(device_shutdown, linux_pci_shutdown), 100 DEVMETHOD(pci_iov_init, linux_pci_iov_init), 101 DEVMETHOD(pci_iov_uninit, linux_pci_iov_uninit), 102 DEVMETHOD(pci_iov_add_vf, linux_pci_iov_add_vf), 103 104 /* backlight interface */ 105 DEVMETHOD(backlight_update_status, linux_backlight_update_status), 106 DEVMETHOD(backlight_get_status, linux_backlight_get_status), 107 DEVMETHOD(backlight_get_info, linux_backlight_get_info), 108 DEVMETHOD_END 109 }; 110 111 struct linux_dma_priv { 112 uint64_t dma_mask; 113 bus_dma_tag_t dmat; 114 uint64_t dma_coherent_mask; 115 bus_dma_tag_t dmat_coherent; 116 struct mtx lock; 117 struct pctrie ptree; 118 }; 119 #define DMA_PRIV_LOCK(priv) mtx_lock(&(priv)->lock) 120 #define DMA_PRIV_UNLOCK(priv) mtx_unlock(&(priv)->lock) 121 122 static int 123 linux_pdev_dma_uninit(struct pci_dev *pdev) 124 { 125 struct linux_dma_priv *priv; 126 127 priv = pdev->dev.dma_priv; 128 if (priv->dmat) 129 bus_dma_tag_destroy(priv->dmat); 130 if (priv->dmat_coherent) 131 bus_dma_tag_destroy(priv->dmat_coherent); 132 mtx_destroy(&priv->lock); 133 pdev->dev.dma_priv = NULL; 134 free(priv, M_DEVBUF); 135 return (0); 136 } 137 138 static int 139 linux_pdev_dma_init(struct pci_dev *pdev) 140 { 141 struct linux_dma_priv *priv; 142 int error; 143 144 priv = malloc(sizeof(*priv), M_DEVBUF, M_WAITOK | M_ZERO); 145 146 mtx_init(&priv->lock, "lkpi-priv-dma", NULL, MTX_DEF); 147 pctrie_init(&priv->ptree); 148 149 pdev->dev.dma_priv = priv; 150 151 /* Create a default DMA tags. */ 152 error = linux_dma_tag_init(&pdev->dev, DMA_BIT_MASK(64)); 153 if (error != 0) 154 goto err; 155 /* Coherent is lower 32bit only by default in Linux. */ 156 error = linux_dma_tag_init_coherent(&pdev->dev, DMA_BIT_MASK(32)); 157 if (error != 0) 158 goto err; 159 160 return (error); 161 162 err: 163 linux_pdev_dma_uninit(pdev); 164 return (error); 165 } 166 167 int 168 linux_dma_tag_init(struct device *dev, u64 dma_mask) 169 { 170 struct linux_dma_priv *priv; 171 int error; 172 173 priv = dev->dma_priv; 174 175 if (priv->dmat) { 176 if (priv->dma_mask == dma_mask) 177 return (0); 178 179 bus_dma_tag_destroy(priv->dmat); 180 } 181 182 priv->dma_mask = dma_mask; 183 184 error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 185 1, 0, /* alignment, boundary */ 186 dma_mask, /* lowaddr */ 187 BUS_SPACE_MAXADDR, /* highaddr */ 188 NULL, NULL, /* filtfunc, filtfuncarg */ 189 BUS_SPACE_MAXSIZE, /* maxsize */ 190 1, /* nsegments */ 191 BUS_SPACE_MAXSIZE, /* maxsegsz */ 192 0, /* flags */ 193 NULL, NULL, /* lockfunc, lockfuncarg */ 194 &priv->dmat); 195 return (-error); 196 } 197 198 int 199 linux_dma_tag_init_coherent(struct device *dev, u64 dma_mask) 200 { 201 struct linux_dma_priv *priv; 202 int error; 203 204 priv = dev->dma_priv; 205 206 if (priv->dmat_coherent) { 207 if (priv->dma_coherent_mask == dma_mask) 208 return (0); 209 210 bus_dma_tag_destroy(priv->dmat_coherent); 211 } 212 213 priv->dma_coherent_mask = dma_mask; 214 215 error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 216 1, 0, /* alignment, boundary */ 217 dma_mask, /* lowaddr */ 218 BUS_SPACE_MAXADDR, /* highaddr */ 219 NULL, NULL, /* filtfunc, filtfuncarg */ 220 BUS_SPACE_MAXSIZE, /* maxsize */ 221 1, /* nsegments */ 222 BUS_SPACE_MAXSIZE, /* maxsegsz */ 223 0, /* flags */ 224 NULL, NULL, /* lockfunc, lockfuncarg */ 225 &priv->dmat_coherent); 226 return (-error); 227 } 228 229 static struct pci_driver * 230 linux_pci_find(device_t dev, const struct pci_device_id **idp) 231 { 232 const struct pci_device_id *id; 233 struct pci_driver *pdrv; 234 uint16_t vendor; 235 uint16_t device; 236 uint16_t subvendor; 237 uint16_t subdevice; 238 239 vendor = pci_get_vendor(dev); 240 device = pci_get_device(dev); 241 subvendor = pci_get_subvendor(dev); 242 subdevice = pci_get_subdevice(dev); 243 244 spin_lock(&pci_lock); 245 list_for_each_entry(pdrv, &pci_drivers, node) { 246 for (id = pdrv->id_table; id->vendor != 0; id++) { 247 if (vendor == id->vendor && 248 (PCI_ANY_ID == id->device || device == id->device) && 249 (PCI_ANY_ID == id->subvendor || subvendor == id->subvendor) && 250 (PCI_ANY_ID == id->subdevice || subdevice == id->subdevice)) { 251 *idp = id; 252 spin_unlock(&pci_lock); 253 return (pdrv); 254 } 255 } 256 } 257 spin_unlock(&pci_lock); 258 return (NULL); 259 } 260 261 static void 262 lkpi_pci_dev_release(struct device *dev) 263 { 264 265 lkpi_devres_release_free_list(dev); 266 spin_lock_destroy(&dev->devres_lock); 267 } 268 269 static void 270 lkpifill_pci_dev(device_t dev, struct pci_dev *pdev) 271 { 272 273 pdev->devfn = PCI_DEVFN(pci_get_slot(dev), pci_get_function(dev)); 274 pdev->vendor = pci_get_vendor(dev); 275 pdev->device = pci_get_device(dev); 276 pdev->subsystem_vendor = pci_get_subvendor(dev); 277 pdev->subsystem_device = pci_get_subdevice(dev); 278 pdev->class = pci_get_class(dev); 279 pdev->revision = pci_get_revid(dev); 280 pdev->bus = malloc(sizeof(*pdev->bus), M_DEVBUF, M_WAITOK | M_ZERO); 281 /* 282 * This should be the upstream bridge; pci_upstream_bridge() 283 * handles that case on demand as otherwise we'll shadow the 284 * entire PCI hierarchy. 285 */ 286 pdev->bus->self = pdev; 287 pdev->bus->number = pci_get_bus(dev); 288 pdev->bus->domain = pci_get_domain(dev); 289 pdev->dev.bsddev = dev; 290 pdev->dev.parent = &linux_root_device; 291 pdev->dev.release = lkpi_pci_dev_release; 292 INIT_LIST_HEAD(&pdev->dev.irqents); 293 kobject_init(&pdev->dev.kobj, &linux_dev_ktype); 294 kobject_set_name(&pdev->dev.kobj, device_get_nameunit(dev)); 295 kobject_add(&pdev->dev.kobj, &linux_root_device.kobj, 296 kobject_name(&pdev->dev.kobj)); 297 spin_lock_init(&pdev->dev.devres_lock); 298 INIT_LIST_HEAD(&pdev->dev.devres_head); 299 } 300 301 static void 302 lkpinew_pci_dev_release(struct device *dev) 303 { 304 struct pci_dev *pdev; 305 306 pdev = to_pci_dev(dev); 307 if (pdev->root != NULL) 308 pci_dev_put(pdev->root); 309 if (pdev->bus->self != pdev) 310 pci_dev_put(pdev->bus->self); 311 free(pdev->bus, M_DEVBUF); 312 free(pdev, M_DEVBUF); 313 } 314 315 struct pci_dev * 316 lkpinew_pci_dev(device_t dev) 317 { 318 struct pci_dev *pdev; 319 320 pdev = malloc(sizeof(*pdev), M_DEVBUF, M_WAITOK|M_ZERO); 321 lkpifill_pci_dev(dev, pdev); 322 pdev->dev.release = lkpinew_pci_dev_release; 323 324 return (pdev); 325 } 326 327 struct pci_dev * 328 lkpi_pci_get_class(unsigned int class, struct pci_dev *from) 329 { 330 device_t dev; 331 device_t devfrom = NULL; 332 struct pci_dev *pdev; 333 334 if (from != NULL) 335 devfrom = from->dev.bsddev; 336 337 dev = pci_find_class_from(class >> 16, (class >> 8) & 0xFF, devfrom); 338 if (dev == NULL) 339 return (NULL); 340 341 pdev = lkpinew_pci_dev(dev); 342 return (pdev); 343 } 344 345 struct pci_dev * 346 lkpi_pci_get_domain_bus_and_slot(int domain, unsigned int bus, 347 unsigned int devfn) 348 { 349 device_t dev; 350 struct pci_dev *pdev; 351 352 dev = pci_find_dbsf(domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); 353 if (dev == NULL) 354 return (NULL); 355 356 pdev = lkpinew_pci_dev(dev); 357 return (pdev); 358 } 359 360 static int 361 linux_pci_probe(device_t dev) 362 { 363 const struct pci_device_id *id; 364 struct pci_driver *pdrv; 365 366 if ((pdrv = linux_pci_find(dev, &id)) == NULL) 367 return (ENXIO); 368 if (device_get_driver(dev) != &pdrv->bsddriver) 369 return (ENXIO); 370 device_set_desc(dev, pdrv->name); 371 372 /* Assume BSS initialized (should never return BUS_PROBE_SPECIFIC). */ 373 if (pdrv->bsd_probe_return == 0) 374 return (BUS_PROBE_DEFAULT); 375 else 376 return (pdrv->bsd_probe_return); 377 } 378 379 static int 380 linux_pci_attach(device_t dev) 381 { 382 const struct pci_device_id *id; 383 struct pci_driver *pdrv; 384 struct pci_dev *pdev; 385 386 pdrv = linux_pci_find(dev, &id); 387 pdev = device_get_softc(dev); 388 389 MPASS(pdrv != NULL); 390 MPASS(pdev != NULL); 391 392 return (linux_pci_attach_device(dev, pdrv, id, pdev)); 393 } 394 395 int 396 linux_pci_attach_device(device_t dev, struct pci_driver *pdrv, 397 const struct pci_device_id *id, struct pci_dev *pdev) 398 { 399 struct resource_list_entry *rle; 400 device_t parent; 401 uintptr_t rid; 402 int error; 403 bool isdrm; 404 405 linux_set_current(curthread); 406 407 parent = device_get_parent(dev); 408 isdrm = pdrv != NULL && pdrv->isdrm; 409 410 if (isdrm) { 411 struct pci_devinfo *dinfo; 412 413 dinfo = device_get_ivars(parent); 414 device_set_ivars(dev, dinfo); 415 } 416 417 lkpifill_pci_dev(dev, pdev); 418 if (isdrm) 419 PCI_GET_ID(device_get_parent(parent), parent, PCI_ID_RID, &rid); 420 else 421 PCI_GET_ID(parent, dev, PCI_ID_RID, &rid); 422 pdev->devfn = rid; 423 pdev->pdrv = pdrv; 424 rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 0, false); 425 if (rle != NULL) 426 pdev->dev.irq = rle->start; 427 else 428 pdev->dev.irq = LINUX_IRQ_INVALID; 429 pdev->irq = pdev->dev.irq; 430 error = linux_pdev_dma_init(pdev); 431 if (error) 432 goto out_dma_init; 433 434 TAILQ_INIT(&pdev->mmio); 435 436 spin_lock(&pci_lock); 437 list_add(&pdev->links, &pci_devices); 438 spin_unlock(&pci_lock); 439 440 if (pdrv != NULL) { 441 error = pdrv->probe(pdev, id); 442 if (error) 443 goto out_probe; 444 } 445 return (0); 446 447 out_probe: 448 free(pdev->bus, M_DEVBUF); 449 linux_pdev_dma_uninit(pdev); 450 out_dma_init: 451 spin_lock(&pci_lock); 452 list_del(&pdev->links); 453 spin_unlock(&pci_lock); 454 put_device(&pdev->dev); 455 return (-error); 456 } 457 458 static int 459 linux_pci_detach(device_t dev) 460 { 461 struct pci_dev *pdev; 462 463 pdev = device_get_softc(dev); 464 465 MPASS(pdev != NULL); 466 467 device_set_desc(dev, NULL); 468 469 return (linux_pci_detach_device(pdev)); 470 } 471 472 int 473 linux_pci_detach_device(struct pci_dev *pdev) 474 { 475 476 linux_set_current(curthread); 477 478 if (pdev->pdrv != NULL) 479 pdev->pdrv->remove(pdev); 480 481 if (pdev->root != NULL) 482 pci_dev_put(pdev->root); 483 free(pdev->bus, M_DEVBUF); 484 linux_pdev_dma_uninit(pdev); 485 486 spin_lock(&pci_lock); 487 list_del(&pdev->links); 488 spin_unlock(&pci_lock); 489 put_device(&pdev->dev); 490 491 return (0); 492 } 493 494 static int 495 lkpi_pci_disable_dev(struct device *dev) 496 { 497 498 (void) pci_disable_io(dev->bsddev, SYS_RES_MEMORY); 499 (void) pci_disable_io(dev->bsddev, SYS_RES_IOPORT); 500 return (0); 501 } 502 503 void 504 lkpi_pci_devres_release(struct device *dev, void *p) 505 { 506 struct pci_devres *dr; 507 struct pci_dev *pdev; 508 int bar; 509 510 pdev = to_pci_dev(dev); 511 dr = p; 512 513 if (pdev->msix_enabled) 514 lkpi_pci_disable_msix(pdev); 515 if (pdev->msi_enabled) 516 lkpi_pci_disable_msi(pdev); 517 518 if (dr->enable_io && lkpi_pci_disable_dev(dev) == 0) 519 dr->enable_io = false; 520 521 if (dr->region_mask == 0) 522 return; 523 for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) { 524 525 if ((dr->region_mask & (1 << bar)) == 0) 526 continue; 527 pci_release_region(pdev, bar); 528 } 529 } 530 531 void 532 lkpi_pcim_iomap_table_release(struct device *dev, void *p) 533 { 534 struct pcim_iomap_devres *dr; 535 struct pci_dev *pdev; 536 int bar; 537 538 dr = p; 539 pdev = to_pci_dev(dev); 540 for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) { 541 542 if (dr->mmio_table[bar] == NULL) 543 continue; 544 545 pci_iounmap(pdev, dr->mmio_table[bar]); 546 } 547 } 548 549 static int 550 linux_pci_suspend(device_t dev) 551 { 552 const struct dev_pm_ops *pmops; 553 struct pm_message pm = { }; 554 struct pci_dev *pdev; 555 int error; 556 557 error = 0; 558 linux_set_current(curthread); 559 pdev = device_get_softc(dev); 560 pmops = pdev->pdrv->driver.pm; 561 562 if (pdev->pdrv->suspend != NULL) 563 error = -pdev->pdrv->suspend(pdev, pm); 564 else if (pmops != NULL && pmops->suspend != NULL) { 565 error = -pmops->suspend(&pdev->dev); 566 if (error == 0 && pmops->suspend_late != NULL) 567 error = -pmops->suspend_late(&pdev->dev); 568 } 569 return (error); 570 } 571 572 static int 573 linux_pci_resume(device_t dev) 574 { 575 const struct dev_pm_ops *pmops; 576 struct pci_dev *pdev; 577 int error; 578 579 error = 0; 580 linux_set_current(curthread); 581 pdev = device_get_softc(dev); 582 pmops = pdev->pdrv->driver.pm; 583 584 if (pdev->pdrv->resume != NULL) 585 error = -pdev->pdrv->resume(pdev); 586 else if (pmops != NULL && pmops->resume != NULL) { 587 if (pmops->resume_early != NULL) 588 error = -pmops->resume_early(&pdev->dev); 589 if (error == 0 && pmops->resume != NULL) 590 error = -pmops->resume(&pdev->dev); 591 } 592 return (error); 593 } 594 595 static int 596 linux_pci_shutdown(device_t dev) 597 { 598 struct pci_dev *pdev; 599 600 linux_set_current(curthread); 601 pdev = device_get_softc(dev); 602 if (pdev->pdrv->shutdown != NULL) 603 pdev->pdrv->shutdown(pdev); 604 return (0); 605 } 606 607 static int 608 linux_pci_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *pf_config) 609 { 610 struct pci_dev *pdev; 611 int error; 612 613 linux_set_current(curthread); 614 pdev = device_get_softc(dev); 615 if (pdev->pdrv->bsd_iov_init != NULL) 616 error = pdev->pdrv->bsd_iov_init(dev, num_vfs, pf_config); 617 else 618 error = EINVAL; 619 return (error); 620 } 621 622 static void 623 linux_pci_iov_uninit(device_t dev) 624 { 625 struct pci_dev *pdev; 626 627 linux_set_current(curthread); 628 pdev = device_get_softc(dev); 629 if (pdev->pdrv->bsd_iov_uninit != NULL) 630 pdev->pdrv->bsd_iov_uninit(dev); 631 } 632 633 static int 634 linux_pci_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *vf_config) 635 { 636 struct pci_dev *pdev; 637 int error; 638 639 linux_set_current(curthread); 640 pdev = device_get_softc(dev); 641 if (pdev->pdrv->bsd_iov_add_vf != NULL) 642 error = pdev->pdrv->bsd_iov_add_vf(dev, vfnum, vf_config); 643 else 644 error = EINVAL; 645 return (error); 646 } 647 648 static int 649 _linux_pci_register_driver(struct pci_driver *pdrv, devclass_t dc) 650 { 651 int error; 652 653 linux_set_current(curthread); 654 spin_lock(&pci_lock); 655 list_add(&pdrv->node, &pci_drivers); 656 spin_unlock(&pci_lock); 657 if (pdrv->bsddriver.name == NULL) 658 pdrv->bsddriver.name = pdrv->name; 659 pdrv->bsddriver.methods = pci_methods; 660 pdrv->bsddriver.size = sizeof(struct pci_dev); 661 662 bus_topo_lock(); 663 error = devclass_add_driver(dc, &pdrv->bsddriver, 664 BUS_PASS_DEFAULT, &pdrv->bsdclass); 665 bus_topo_unlock(); 666 return (-error); 667 } 668 669 int 670 linux_pci_register_driver(struct pci_driver *pdrv) 671 { 672 devclass_t dc; 673 674 dc = devclass_find("pci"); 675 if (dc == NULL) 676 return (-ENXIO); 677 pdrv->isdrm = false; 678 return (_linux_pci_register_driver(pdrv, dc)); 679 } 680 681 struct resource_list_entry * 682 linux_pci_reserve_bar(struct pci_dev *pdev, struct resource_list *rl, 683 int type, int rid) 684 { 685 device_t dev; 686 struct resource *res; 687 688 KASSERT(type == SYS_RES_IOPORT || type == SYS_RES_MEMORY, 689 ("trying to reserve non-BAR type %d", type)); 690 691 dev = pdev->pdrv != NULL && pdev->pdrv->isdrm ? 692 device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev; 693 res = pci_reserve_map(device_get_parent(dev), dev, type, &rid, 0, ~0, 694 1, 1, 0); 695 if (res == NULL) 696 return (NULL); 697 return (resource_list_find(rl, type, rid)); 698 } 699 700 unsigned long 701 pci_resource_start(struct pci_dev *pdev, int bar) 702 { 703 struct resource_list_entry *rle; 704 rman_res_t newstart; 705 device_t dev; 706 int error; 707 708 if ((rle = linux_pci_get_bar(pdev, bar, true)) == NULL) 709 return (0); 710 dev = pdev->pdrv != NULL && pdev->pdrv->isdrm ? 711 device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev; 712 error = bus_translate_resource(dev, rle->type, rle->start, &newstart); 713 if (error != 0) { 714 device_printf(pdev->dev.bsddev, 715 "translate of %#jx failed: %d\n", 716 (uintmax_t)rle->start, error); 717 return (0); 718 } 719 return (newstart); 720 } 721 722 unsigned long 723 pci_resource_len(struct pci_dev *pdev, int bar) 724 { 725 struct resource_list_entry *rle; 726 727 if ((rle = linux_pci_get_bar(pdev, bar, true)) == NULL) 728 return (0); 729 return (rle->count); 730 } 731 732 int 733 linux_pci_register_drm_driver(struct pci_driver *pdrv) 734 { 735 devclass_t dc; 736 737 dc = devclass_create("vgapci"); 738 if (dc == NULL) 739 return (-ENXIO); 740 pdrv->isdrm = true; 741 pdrv->name = "drmn"; 742 return (_linux_pci_register_driver(pdrv, dc)); 743 } 744 745 void 746 linux_pci_unregister_driver(struct pci_driver *pdrv) 747 { 748 devclass_t bus; 749 750 bus = devclass_find("pci"); 751 752 spin_lock(&pci_lock); 753 list_del(&pdrv->node); 754 spin_unlock(&pci_lock); 755 bus_topo_lock(); 756 if (bus != NULL) 757 devclass_delete_driver(bus, &pdrv->bsddriver); 758 bus_topo_unlock(); 759 } 760 761 void 762 linux_pci_unregister_drm_driver(struct pci_driver *pdrv) 763 { 764 devclass_t bus; 765 766 bus = devclass_find("vgapci"); 767 768 spin_lock(&pci_lock); 769 list_del(&pdrv->node); 770 spin_unlock(&pci_lock); 771 bus_topo_lock(); 772 if (bus != NULL) 773 devclass_delete_driver(bus, &pdrv->bsddriver); 774 bus_topo_unlock(); 775 } 776 777 CTASSERT(sizeof(dma_addr_t) <= sizeof(uint64_t)); 778 779 struct linux_dma_obj { 780 void *vaddr; 781 uint64_t dma_addr; 782 bus_dmamap_t dmamap; 783 bus_dma_tag_t dmat; 784 }; 785 786 static uma_zone_t linux_dma_trie_zone; 787 static uma_zone_t linux_dma_obj_zone; 788 789 static void 790 linux_dma_init(void *arg) 791 { 792 793 linux_dma_trie_zone = uma_zcreate("linux_dma_pctrie", 794 pctrie_node_size(), NULL, NULL, pctrie_zone_init, NULL, 795 UMA_ALIGN_PTR, 0); 796 linux_dma_obj_zone = uma_zcreate("linux_dma_object", 797 sizeof(struct linux_dma_obj), NULL, NULL, NULL, NULL, 798 UMA_ALIGN_PTR, 0); 799 800 } 801 SYSINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_init, NULL); 802 803 static void 804 linux_dma_uninit(void *arg) 805 { 806 807 uma_zdestroy(linux_dma_obj_zone); 808 uma_zdestroy(linux_dma_trie_zone); 809 } 810 SYSUNINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_uninit, NULL); 811 812 static void * 813 linux_dma_trie_alloc(struct pctrie *ptree) 814 { 815 816 return (uma_zalloc(linux_dma_trie_zone, M_NOWAIT)); 817 } 818 819 static void 820 linux_dma_trie_free(struct pctrie *ptree, void *node) 821 { 822 823 uma_zfree(linux_dma_trie_zone, node); 824 } 825 826 PCTRIE_DEFINE(LINUX_DMA, linux_dma_obj, dma_addr, linux_dma_trie_alloc, 827 linux_dma_trie_free); 828 829 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 830 static dma_addr_t 831 linux_dma_map_phys_common(struct device *dev, vm_paddr_t phys, size_t len, 832 bus_dma_tag_t dmat) 833 { 834 struct linux_dma_priv *priv; 835 struct linux_dma_obj *obj; 836 int error, nseg; 837 bus_dma_segment_t seg; 838 839 priv = dev->dma_priv; 840 841 /* 842 * If the resultant mapping will be entirely 1:1 with the 843 * physical address, short-circuit the remainder of the 844 * bus_dma API. This avoids tracking collisions in the pctrie 845 * with the additional benefit of reducing overhead. 846 */ 847 if (bus_dma_id_mapped(dmat, phys, len)) 848 return (phys); 849 850 obj = uma_zalloc(linux_dma_obj_zone, M_NOWAIT); 851 if (obj == NULL) { 852 return (0); 853 } 854 obj->dmat = dmat; 855 856 DMA_PRIV_LOCK(priv); 857 if (bus_dmamap_create(obj->dmat, 0, &obj->dmamap) != 0) { 858 DMA_PRIV_UNLOCK(priv); 859 uma_zfree(linux_dma_obj_zone, obj); 860 return (0); 861 } 862 863 nseg = -1; 864 if (_bus_dmamap_load_phys(obj->dmat, obj->dmamap, phys, len, 865 BUS_DMA_NOWAIT, &seg, &nseg) != 0) { 866 bus_dmamap_destroy(obj->dmat, obj->dmamap); 867 DMA_PRIV_UNLOCK(priv); 868 uma_zfree(linux_dma_obj_zone, obj); 869 return (0); 870 } 871 872 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); 873 obj->dma_addr = seg.ds_addr; 874 875 error = LINUX_DMA_PCTRIE_INSERT(&priv->ptree, obj); 876 if (error != 0) { 877 bus_dmamap_unload(obj->dmat, obj->dmamap); 878 bus_dmamap_destroy(obj->dmat, obj->dmamap); 879 DMA_PRIV_UNLOCK(priv); 880 uma_zfree(linux_dma_obj_zone, obj); 881 return (0); 882 } 883 DMA_PRIV_UNLOCK(priv); 884 return (obj->dma_addr); 885 } 886 #else 887 static dma_addr_t 888 linux_dma_map_phys_common(struct device *dev __unused, vm_paddr_t phys, 889 size_t len __unused, bus_dma_tag_t dmat __unused) 890 { 891 return (phys); 892 } 893 #endif 894 895 dma_addr_t 896 linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len) 897 { 898 struct linux_dma_priv *priv; 899 900 priv = dev->dma_priv; 901 return (linux_dma_map_phys_common(dev, phys, len, priv->dmat)); 902 } 903 904 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 905 void 906 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len) 907 { 908 struct linux_dma_priv *priv; 909 struct linux_dma_obj *obj; 910 911 priv = dev->dma_priv; 912 913 if (pctrie_is_empty(&priv->ptree)) 914 return; 915 916 DMA_PRIV_LOCK(priv); 917 obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr); 918 if (obj == NULL) { 919 DMA_PRIV_UNLOCK(priv); 920 return; 921 } 922 LINUX_DMA_PCTRIE_REMOVE(&priv->ptree, dma_addr); 923 bus_dmamap_unload(obj->dmat, obj->dmamap); 924 bus_dmamap_destroy(obj->dmat, obj->dmamap); 925 DMA_PRIV_UNLOCK(priv); 926 927 uma_zfree(linux_dma_obj_zone, obj); 928 } 929 #else 930 void 931 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len) 932 { 933 } 934 #endif 935 936 void * 937 linux_dma_alloc_coherent(struct device *dev, size_t size, 938 dma_addr_t *dma_handle, gfp_t flag) 939 { 940 struct linux_dma_priv *priv; 941 vm_paddr_t high; 942 size_t align; 943 void *mem; 944 945 if (dev == NULL || dev->dma_priv == NULL) { 946 *dma_handle = 0; 947 return (NULL); 948 } 949 priv = dev->dma_priv; 950 if (priv->dma_coherent_mask) 951 high = priv->dma_coherent_mask; 952 else 953 /* Coherent is lower 32bit only by default in Linux. */ 954 high = BUS_SPACE_MAXADDR_32BIT; 955 align = PAGE_SIZE << get_order(size); 956 /* Always zero the allocation. */ 957 flag |= M_ZERO; 958 mem = (void *)kmem_alloc_contig(size, flag & GFP_NATIVE_MASK, 0, high, 959 align, 0, VM_MEMATTR_DEFAULT); 960 if (mem != NULL) { 961 *dma_handle = linux_dma_map_phys_common(dev, vtophys(mem), size, 962 priv->dmat_coherent); 963 if (*dma_handle == 0) { 964 kmem_free((vm_offset_t)mem, size); 965 mem = NULL; 966 } 967 } else { 968 *dma_handle = 0; 969 } 970 return (mem); 971 } 972 973 void 974 linuxkpi_dma_sync(struct device *dev, dma_addr_t dma_addr, size_t size, 975 bus_dmasync_op_t op) 976 { 977 struct linux_dma_priv *priv; 978 struct linux_dma_obj *obj; 979 980 priv = dev->dma_priv; 981 982 if (pctrie_is_empty(&priv->ptree)) 983 return; 984 985 DMA_PRIV_LOCK(priv); 986 obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr); 987 if (obj == NULL) { 988 DMA_PRIV_UNLOCK(priv); 989 return; 990 } 991 992 bus_dmamap_sync(obj->dmat, obj->dmamap, op); 993 DMA_PRIV_UNLOCK(priv); 994 } 995 996 int 997 linux_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents, 998 enum dma_data_direction direction, unsigned long attrs __unused) 999 { 1000 struct linux_dma_priv *priv; 1001 struct scatterlist *sg; 1002 int i, nseg; 1003 bus_dma_segment_t seg; 1004 1005 priv = dev->dma_priv; 1006 1007 DMA_PRIV_LOCK(priv); 1008 1009 /* create common DMA map in the first S/G entry */ 1010 if (bus_dmamap_create(priv->dmat, 0, &sgl->dma_map) != 0) { 1011 DMA_PRIV_UNLOCK(priv); 1012 return (0); 1013 } 1014 1015 /* load all S/G list entries */ 1016 for_each_sg(sgl, sg, nents, i) { 1017 nseg = -1; 1018 if (_bus_dmamap_load_phys(priv->dmat, sgl->dma_map, 1019 sg_phys(sg), sg->length, BUS_DMA_NOWAIT, 1020 &seg, &nseg) != 0) { 1021 bus_dmamap_unload(priv->dmat, sgl->dma_map); 1022 bus_dmamap_destroy(priv->dmat, sgl->dma_map); 1023 DMA_PRIV_UNLOCK(priv); 1024 return (0); 1025 } 1026 KASSERT(nseg == 0, 1027 ("More than one segment (nseg=%d)", nseg + 1)); 1028 1029 sg_dma_address(sg) = seg.ds_addr; 1030 } 1031 1032 switch (direction) { 1033 case DMA_BIDIRECTIONAL: 1034 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREWRITE); 1035 break; 1036 case DMA_TO_DEVICE: 1037 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREREAD); 1038 break; 1039 case DMA_FROM_DEVICE: 1040 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREWRITE); 1041 break; 1042 default: 1043 break; 1044 } 1045 1046 DMA_PRIV_UNLOCK(priv); 1047 1048 return (nents); 1049 } 1050 1051 void 1052 linux_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, 1053 int nents __unused, enum dma_data_direction direction, 1054 unsigned long attrs __unused) 1055 { 1056 struct linux_dma_priv *priv; 1057 1058 priv = dev->dma_priv; 1059 1060 DMA_PRIV_LOCK(priv); 1061 1062 switch (direction) { 1063 case DMA_BIDIRECTIONAL: 1064 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTREAD); 1065 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREREAD); 1066 break; 1067 case DMA_TO_DEVICE: 1068 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTWRITE); 1069 break; 1070 case DMA_FROM_DEVICE: 1071 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTREAD); 1072 break; 1073 default: 1074 break; 1075 } 1076 1077 bus_dmamap_unload(priv->dmat, sgl->dma_map); 1078 bus_dmamap_destroy(priv->dmat, sgl->dma_map); 1079 DMA_PRIV_UNLOCK(priv); 1080 } 1081 1082 struct dma_pool { 1083 struct device *pool_device; 1084 uma_zone_t pool_zone; 1085 struct mtx pool_lock; 1086 bus_dma_tag_t pool_dmat; 1087 size_t pool_entry_size; 1088 struct pctrie pool_ptree; 1089 }; 1090 1091 #define DMA_POOL_LOCK(pool) mtx_lock(&(pool)->pool_lock) 1092 #define DMA_POOL_UNLOCK(pool) mtx_unlock(&(pool)->pool_lock) 1093 1094 static inline int 1095 dma_pool_obj_ctor(void *mem, int size, void *arg, int flags) 1096 { 1097 struct linux_dma_obj *obj = mem; 1098 struct dma_pool *pool = arg; 1099 int error, nseg; 1100 bus_dma_segment_t seg; 1101 1102 nseg = -1; 1103 DMA_POOL_LOCK(pool); 1104 error = _bus_dmamap_load_phys(pool->pool_dmat, obj->dmamap, 1105 vtophys(obj->vaddr), pool->pool_entry_size, BUS_DMA_NOWAIT, 1106 &seg, &nseg); 1107 DMA_POOL_UNLOCK(pool); 1108 if (error != 0) { 1109 return (error); 1110 } 1111 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); 1112 obj->dma_addr = seg.ds_addr; 1113 1114 return (0); 1115 } 1116 1117 static void 1118 dma_pool_obj_dtor(void *mem, int size, void *arg) 1119 { 1120 struct linux_dma_obj *obj = mem; 1121 struct dma_pool *pool = arg; 1122 1123 DMA_POOL_LOCK(pool); 1124 bus_dmamap_unload(pool->pool_dmat, obj->dmamap); 1125 DMA_POOL_UNLOCK(pool); 1126 } 1127 1128 static int 1129 dma_pool_obj_import(void *arg, void **store, int count, int domain __unused, 1130 int flags) 1131 { 1132 struct dma_pool *pool = arg; 1133 struct linux_dma_obj *obj; 1134 int error, i; 1135 1136 for (i = 0; i < count; i++) { 1137 obj = uma_zalloc(linux_dma_obj_zone, flags); 1138 if (obj == NULL) 1139 break; 1140 1141 error = bus_dmamem_alloc(pool->pool_dmat, &obj->vaddr, 1142 BUS_DMA_NOWAIT, &obj->dmamap); 1143 if (error!= 0) { 1144 uma_zfree(linux_dma_obj_zone, obj); 1145 break; 1146 } 1147 1148 store[i] = obj; 1149 } 1150 1151 return (i); 1152 } 1153 1154 static void 1155 dma_pool_obj_release(void *arg, void **store, int count) 1156 { 1157 struct dma_pool *pool = arg; 1158 struct linux_dma_obj *obj; 1159 int i; 1160 1161 for (i = 0; i < count; i++) { 1162 obj = store[i]; 1163 bus_dmamem_free(pool->pool_dmat, obj->vaddr, obj->dmamap); 1164 uma_zfree(linux_dma_obj_zone, obj); 1165 } 1166 } 1167 1168 struct dma_pool * 1169 linux_dma_pool_create(char *name, struct device *dev, size_t size, 1170 size_t align, size_t boundary) 1171 { 1172 struct linux_dma_priv *priv; 1173 struct dma_pool *pool; 1174 1175 priv = dev->dma_priv; 1176 1177 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 1178 pool->pool_device = dev; 1179 pool->pool_entry_size = size; 1180 1181 if (bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 1182 align, boundary, /* alignment, boundary */ 1183 priv->dma_mask, /* lowaddr */ 1184 BUS_SPACE_MAXADDR, /* highaddr */ 1185 NULL, NULL, /* filtfunc, filtfuncarg */ 1186 size, /* maxsize */ 1187 1, /* nsegments */ 1188 size, /* maxsegsz */ 1189 0, /* flags */ 1190 NULL, NULL, /* lockfunc, lockfuncarg */ 1191 &pool->pool_dmat)) { 1192 kfree(pool); 1193 return (NULL); 1194 } 1195 1196 pool->pool_zone = uma_zcache_create(name, -1, dma_pool_obj_ctor, 1197 dma_pool_obj_dtor, NULL, NULL, dma_pool_obj_import, 1198 dma_pool_obj_release, pool, 0); 1199 1200 mtx_init(&pool->pool_lock, "lkpi-dma-pool", NULL, MTX_DEF); 1201 pctrie_init(&pool->pool_ptree); 1202 1203 return (pool); 1204 } 1205 1206 void 1207 linux_dma_pool_destroy(struct dma_pool *pool) 1208 { 1209 1210 uma_zdestroy(pool->pool_zone); 1211 bus_dma_tag_destroy(pool->pool_dmat); 1212 mtx_destroy(&pool->pool_lock); 1213 kfree(pool); 1214 } 1215 1216 void 1217 lkpi_dmam_pool_destroy(struct device *dev, void *p) 1218 { 1219 struct dma_pool *pool; 1220 1221 pool = *(struct dma_pool **)p; 1222 LINUX_DMA_PCTRIE_RECLAIM(&pool->pool_ptree); 1223 linux_dma_pool_destroy(pool); 1224 } 1225 1226 void * 1227 linux_dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, 1228 dma_addr_t *handle) 1229 { 1230 struct linux_dma_obj *obj; 1231 1232 obj = uma_zalloc_arg(pool->pool_zone, pool, mem_flags & GFP_NATIVE_MASK); 1233 if (obj == NULL) 1234 return (NULL); 1235 1236 DMA_POOL_LOCK(pool); 1237 if (LINUX_DMA_PCTRIE_INSERT(&pool->pool_ptree, obj) != 0) { 1238 DMA_POOL_UNLOCK(pool); 1239 uma_zfree_arg(pool->pool_zone, obj, pool); 1240 return (NULL); 1241 } 1242 DMA_POOL_UNLOCK(pool); 1243 1244 *handle = obj->dma_addr; 1245 return (obj->vaddr); 1246 } 1247 1248 void 1249 linux_dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma_addr) 1250 { 1251 struct linux_dma_obj *obj; 1252 1253 DMA_POOL_LOCK(pool); 1254 obj = LINUX_DMA_PCTRIE_LOOKUP(&pool->pool_ptree, dma_addr); 1255 if (obj == NULL) { 1256 DMA_POOL_UNLOCK(pool); 1257 return; 1258 } 1259 LINUX_DMA_PCTRIE_REMOVE(&pool->pool_ptree, dma_addr); 1260 DMA_POOL_UNLOCK(pool); 1261 1262 uma_zfree_arg(pool->pool_zone, obj, pool); 1263 } 1264 1265 static int 1266 linux_backlight_get_status(device_t dev, struct backlight_props *props) 1267 { 1268 struct pci_dev *pdev; 1269 1270 linux_set_current(curthread); 1271 pdev = device_get_softc(dev); 1272 1273 props->brightness = pdev->dev.bd->props.brightness; 1274 props->brightness = props->brightness * 100 / pdev->dev.bd->props.max_brightness; 1275 props->nlevels = 0; 1276 1277 return (0); 1278 } 1279 1280 static int 1281 linux_backlight_get_info(device_t dev, struct backlight_info *info) 1282 { 1283 struct pci_dev *pdev; 1284 1285 linux_set_current(curthread); 1286 pdev = device_get_softc(dev); 1287 1288 info->type = BACKLIGHT_TYPE_PANEL; 1289 strlcpy(info->name, pdev->dev.bd->name, BACKLIGHTMAXNAMELENGTH); 1290 return (0); 1291 } 1292 1293 static int 1294 linux_backlight_update_status(device_t dev, struct backlight_props *props) 1295 { 1296 struct pci_dev *pdev; 1297 1298 linux_set_current(curthread); 1299 pdev = device_get_softc(dev); 1300 1301 pdev->dev.bd->props.brightness = pdev->dev.bd->props.max_brightness * 1302 props->brightness / 100; 1303 pdev->dev.bd->props.power = props->brightness == 0 ? 1304 4/* FB_BLANK_POWERDOWN */ : 0/* FB_BLANK_UNBLANK */; 1305 return (pdev->dev.bd->ops->update_status(pdev->dev.bd)); 1306 } 1307 1308 struct backlight_device * 1309 linux_backlight_device_register(const char *name, struct device *dev, 1310 void *data, const struct backlight_ops *ops, struct backlight_properties *props) 1311 { 1312 1313 dev->bd = malloc(sizeof(*dev->bd), M_DEVBUF, M_WAITOK | M_ZERO); 1314 dev->bd->ops = ops; 1315 dev->bd->props.type = props->type; 1316 dev->bd->props.max_brightness = props->max_brightness; 1317 dev->bd->props.brightness = props->brightness; 1318 dev->bd->props.power = props->power; 1319 dev->bd->data = data; 1320 dev->bd->dev = dev; 1321 dev->bd->name = strdup(name, M_DEVBUF); 1322 1323 dev->backlight_dev = backlight_register(name, dev->bsddev); 1324 1325 return (dev->bd); 1326 } 1327 1328 void 1329 linux_backlight_device_unregister(struct backlight_device *bd) 1330 { 1331 1332 backlight_destroy(bd->dev->backlight_dev); 1333 free(bd->name, M_DEVBUF); 1334 free(bd, M_DEVBUF); 1335 } 1336