1 /*- 2 * Copyright (c) 2015-2016 Mellanox Technologies, Ltd. 3 * All rights reserved. 4 * Copyright (c) 2020-2021 The FreeBSD Foundation 5 * 6 * Portions of this software were developed by Björn Zeeb 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice unmodified, this list of conditions, and the following 14 * disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/bus.h> 37 #include <sys/malloc.h> 38 #include <sys/kernel.h> 39 #include <sys/sysctl.h> 40 #include <sys/lock.h> 41 #include <sys/mutex.h> 42 #include <sys/fcntl.h> 43 #include <sys/file.h> 44 #include <sys/filio.h> 45 #include <sys/pciio.h> 46 #include <sys/pctrie.h> 47 #include <sys/rwlock.h> 48 49 #include <vm/vm.h> 50 #include <vm/pmap.h> 51 52 #include <machine/stdarg.h> 53 54 #include <dev/pci/pcivar.h> 55 #include <dev/pci/pci_private.h> 56 #include <dev/pci/pci_iov.h> 57 #include <dev/backlight/backlight.h> 58 59 #include <linux/kobject.h> 60 #include <linux/device.h> 61 #include <linux/slab.h> 62 #include <linux/module.h> 63 #include <linux/cdev.h> 64 #include <linux/file.h> 65 #include <linux/sysfs.h> 66 #include <linux/mm.h> 67 #include <linux/io.h> 68 #include <linux/vmalloc.h> 69 #include <linux/pci.h> 70 #include <linux/compat.h> 71 72 #include <linux/backlight.h> 73 74 #include "backlight_if.h" 75 #include "pcib_if.h" 76 77 /* Undef the linux function macro defined in linux/pci.h */ 78 #undef pci_get_class 79 80 static device_probe_t linux_pci_probe; 81 static device_attach_t linux_pci_attach; 82 static device_detach_t linux_pci_detach; 83 static device_suspend_t linux_pci_suspend; 84 static device_resume_t linux_pci_resume; 85 static device_shutdown_t linux_pci_shutdown; 86 static pci_iov_init_t linux_pci_iov_init; 87 static pci_iov_uninit_t linux_pci_iov_uninit; 88 static pci_iov_add_vf_t linux_pci_iov_add_vf; 89 static int linux_backlight_get_status(device_t dev, struct backlight_props *props); 90 static int linux_backlight_update_status(device_t dev, struct backlight_props *props); 91 static int linux_backlight_get_info(device_t dev, struct backlight_info *info); 92 93 static device_method_t pci_methods[] = { 94 DEVMETHOD(device_probe, linux_pci_probe), 95 DEVMETHOD(device_attach, linux_pci_attach), 96 DEVMETHOD(device_detach, linux_pci_detach), 97 DEVMETHOD(device_suspend, linux_pci_suspend), 98 DEVMETHOD(device_resume, linux_pci_resume), 99 DEVMETHOD(device_shutdown, linux_pci_shutdown), 100 DEVMETHOD(pci_iov_init, linux_pci_iov_init), 101 DEVMETHOD(pci_iov_uninit, linux_pci_iov_uninit), 102 DEVMETHOD(pci_iov_add_vf, linux_pci_iov_add_vf), 103 104 /* backlight interface */ 105 DEVMETHOD(backlight_update_status, linux_backlight_update_status), 106 DEVMETHOD(backlight_get_status, linux_backlight_get_status), 107 DEVMETHOD(backlight_get_info, linux_backlight_get_info), 108 DEVMETHOD_END 109 }; 110 111 struct linux_dma_priv { 112 uint64_t dma_mask; 113 bus_dma_tag_t dmat; 114 uint64_t dma_coherent_mask; 115 bus_dma_tag_t dmat_coherent; 116 struct mtx lock; 117 struct pctrie ptree; 118 }; 119 #define DMA_PRIV_LOCK(priv) mtx_lock(&(priv)->lock) 120 #define DMA_PRIV_UNLOCK(priv) mtx_unlock(&(priv)->lock) 121 122 static int 123 linux_pdev_dma_uninit(struct pci_dev *pdev) 124 { 125 struct linux_dma_priv *priv; 126 127 priv = pdev->dev.dma_priv; 128 if (priv->dmat) 129 bus_dma_tag_destroy(priv->dmat); 130 if (priv->dmat_coherent) 131 bus_dma_tag_destroy(priv->dmat_coherent); 132 mtx_destroy(&priv->lock); 133 pdev->dev.dma_priv = NULL; 134 free(priv, M_DEVBUF); 135 return (0); 136 } 137 138 static int 139 linux_pdev_dma_init(struct pci_dev *pdev) 140 { 141 struct linux_dma_priv *priv; 142 int error; 143 144 priv = malloc(sizeof(*priv), M_DEVBUF, M_WAITOK | M_ZERO); 145 146 mtx_init(&priv->lock, "lkpi-priv-dma", NULL, MTX_DEF); 147 pctrie_init(&priv->ptree); 148 149 pdev->dev.dma_priv = priv; 150 151 /* Create a default DMA tags. */ 152 error = linux_dma_tag_init(&pdev->dev, DMA_BIT_MASK(64)); 153 if (error != 0) 154 goto err; 155 /* Coherent is lower 32bit only by default in Linux. */ 156 error = linux_dma_tag_init_coherent(&pdev->dev, DMA_BIT_MASK(32)); 157 if (error != 0) 158 goto err; 159 160 return (error); 161 162 err: 163 linux_pdev_dma_uninit(pdev); 164 return (error); 165 } 166 167 int 168 linux_dma_tag_init(struct device *dev, u64 dma_mask) 169 { 170 struct linux_dma_priv *priv; 171 int error; 172 173 priv = dev->dma_priv; 174 175 if (priv->dmat) { 176 if (priv->dma_mask == dma_mask) 177 return (0); 178 179 bus_dma_tag_destroy(priv->dmat); 180 } 181 182 priv->dma_mask = dma_mask; 183 184 error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 185 1, 0, /* alignment, boundary */ 186 dma_mask, /* lowaddr */ 187 BUS_SPACE_MAXADDR, /* highaddr */ 188 NULL, NULL, /* filtfunc, filtfuncarg */ 189 BUS_SPACE_MAXSIZE, /* maxsize */ 190 1, /* nsegments */ 191 BUS_SPACE_MAXSIZE, /* maxsegsz */ 192 0, /* flags */ 193 NULL, NULL, /* lockfunc, lockfuncarg */ 194 &priv->dmat); 195 return (-error); 196 } 197 198 int 199 linux_dma_tag_init_coherent(struct device *dev, u64 dma_mask) 200 { 201 struct linux_dma_priv *priv; 202 int error; 203 204 priv = dev->dma_priv; 205 206 if (priv->dmat_coherent) { 207 if (priv->dma_coherent_mask == dma_mask) 208 return (0); 209 210 bus_dma_tag_destroy(priv->dmat_coherent); 211 } 212 213 priv->dma_coherent_mask = dma_mask; 214 215 error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 216 1, 0, /* alignment, boundary */ 217 dma_mask, /* lowaddr */ 218 BUS_SPACE_MAXADDR, /* highaddr */ 219 NULL, NULL, /* filtfunc, filtfuncarg */ 220 BUS_SPACE_MAXSIZE, /* maxsize */ 221 1, /* nsegments */ 222 BUS_SPACE_MAXSIZE, /* maxsegsz */ 223 0, /* flags */ 224 NULL, NULL, /* lockfunc, lockfuncarg */ 225 &priv->dmat_coherent); 226 return (-error); 227 } 228 229 static struct pci_driver * 230 linux_pci_find(device_t dev, const struct pci_device_id **idp) 231 { 232 const struct pci_device_id *id; 233 struct pci_driver *pdrv; 234 uint16_t vendor; 235 uint16_t device; 236 uint16_t subvendor; 237 uint16_t subdevice; 238 239 vendor = pci_get_vendor(dev); 240 device = pci_get_device(dev); 241 subvendor = pci_get_subvendor(dev); 242 subdevice = pci_get_subdevice(dev); 243 244 spin_lock(&pci_lock); 245 list_for_each_entry(pdrv, &pci_drivers, node) { 246 for (id = pdrv->id_table; id->vendor != 0; id++) { 247 if (vendor == id->vendor && 248 (PCI_ANY_ID == id->device || device == id->device) && 249 (PCI_ANY_ID == id->subvendor || subvendor == id->subvendor) && 250 (PCI_ANY_ID == id->subdevice || subdevice == id->subdevice)) { 251 *idp = id; 252 spin_unlock(&pci_lock); 253 return (pdrv); 254 } 255 } 256 } 257 spin_unlock(&pci_lock); 258 return (NULL); 259 } 260 261 static void 262 lkpi_pci_dev_release(struct device *dev) 263 { 264 265 lkpi_devres_release_free_list(dev); 266 spin_lock_destroy(&dev->devres_lock); 267 } 268 269 static void 270 lkpifill_pci_dev(device_t dev, struct pci_dev *pdev) 271 { 272 273 pdev->devfn = PCI_DEVFN(pci_get_slot(dev), pci_get_function(dev)); 274 pdev->vendor = pci_get_vendor(dev); 275 pdev->device = pci_get_device(dev); 276 pdev->subsystem_vendor = pci_get_subvendor(dev); 277 pdev->subsystem_device = pci_get_subdevice(dev); 278 pdev->class = pci_get_class(dev); 279 pdev->revision = pci_get_revid(dev); 280 pdev->bus = malloc(sizeof(*pdev->bus), M_DEVBUF, M_WAITOK | M_ZERO); 281 pdev->bus->self = pdev; 282 pdev->bus->number = pci_get_bus(dev); 283 pdev->bus->domain = pci_get_domain(dev); 284 pdev->dev.bsddev = dev; 285 pdev->dev.parent = &linux_root_device; 286 pdev->dev.release = lkpi_pci_dev_release; 287 INIT_LIST_HEAD(&pdev->dev.irqents); 288 kobject_init(&pdev->dev.kobj, &linux_dev_ktype); 289 kobject_set_name(&pdev->dev.kobj, device_get_nameunit(dev)); 290 kobject_add(&pdev->dev.kobj, &linux_root_device.kobj, 291 kobject_name(&pdev->dev.kobj)); 292 spin_lock_init(&pdev->dev.devres_lock); 293 INIT_LIST_HEAD(&pdev->dev.devres_head); 294 } 295 296 static void 297 lkpinew_pci_dev_release(struct device *dev) 298 { 299 struct pci_dev *pdev; 300 301 pdev = to_pci_dev(dev); 302 if (pdev->root != NULL) 303 pci_dev_put(pdev->root); 304 free(pdev->bus, M_DEVBUF); 305 free(pdev, M_DEVBUF); 306 } 307 308 struct pci_dev * 309 lkpinew_pci_dev(device_t dev) 310 { 311 struct pci_dev *pdev; 312 313 pdev = malloc(sizeof(*pdev), M_DEVBUF, M_WAITOK|M_ZERO); 314 lkpifill_pci_dev(dev, pdev); 315 pdev->dev.release = lkpinew_pci_dev_release; 316 317 return (pdev); 318 } 319 320 struct pci_dev * 321 lkpi_pci_get_class(unsigned int class, struct pci_dev *from) 322 { 323 device_t dev; 324 device_t devfrom = NULL; 325 struct pci_dev *pdev; 326 327 if (from != NULL) 328 devfrom = from->dev.bsddev; 329 330 dev = pci_find_class_from(class >> 16, (class >> 8) & 0xFF, devfrom); 331 if (dev == NULL) 332 return (NULL); 333 334 pdev = lkpinew_pci_dev(dev); 335 return (pdev); 336 } 337 338 struct pci_dev * 339 lkpi_pci_get_domain_bus_and_slot(int domain, unsigned int bus, 340 unsigned int devfn) 341 { 342 device_t dev; 343 struct pci_dev *pdev; 344 345 dev = pci_find_dbsf(domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); 346 if (dev == NULL) 347 return (NULL); 348 349 pdev = lkpinew_pci_dev(dev); 350 return (pdev); 351 } 352 353 static int 354 linux_pci_probe(device_t dev) 355 { 356 const struct pci_device_id *id; 357 struct pci_driver *pdrv; 358 359 if ((pdrv = linux_pci_find(dev, &id)) == NULL) 360 return (ENXIO); 361 if (device_get_driver(dev) != &pdrv->bsddriver) 362 return (ENXIO); 363 device_set_desc(dev, pdrv->name); 364 return (0); 365 } 366 367 static int 368 linux_pci_attach(device_t dev) 369 { 370 const struct pci_device_id *id; 371 struct pci_driver *pdrv; 372 struct pci_dev *pdev; 373 374 pdrv = linux_pci_find(dev, &id); 375 pdev = device_get_softc(dev); 376 377 MPASS(pdrv != NULL); 378 MPASS(pdev != NULL); 379 380 return (linux_pci_attach_device(dev, pdrv, id, pdev)); 381 } 382 383 int 384 linux_pci_attach_device(device_t dev, struct pci_driver *pdrv, 385 const struct pci_device_id *id, struct pci_dev *pdev) 386 { 387 struct resource_list_entry *rle; 388 device_t parent; 389 uintptr_t rid; 390 int error; 391 bool isdrm; 392 393 linux_set_current(curthread); 394 395 parent = device_get_parent(dev); 396 isdrm = pdrv != NULL && pdrv->isdrm; 397 398 if (isdrm) { 399 struct pci_devinfo *dinfo; 400 401 dinfo = device_get_ivars(parent); 402 device_set_ivars(dev, dinfo); 403 } 404 405 lkpifill_pci_dev(dev, pdev); 406 if (isdrm) 407 PCI_GET_ID(device_get_parent(parent), parent, PCI_ID_RID, &rid); 408 else 409 PCI_GET_ID(parent, dev, PCI_ID_RID, &rid); 410 pdev->devfn = rid; 411 pdev->pdrv = pdrv; 412 rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 0, false); 413 if (rle != NULL) 414 pdev->dev.irq = rle->start; 415 else 416 pdev->dev.irq = LINUX_IRQ_INVALID; 417 pdev->irq = pdev->dev.irq; 418 error = linux_pdev_dma_init(pdev); 419 if (error) 420 goto out_dma_init; 421 422 TAILQ_INIT(&pdev->mmio); 423 424 spin_lock(&pci_lock); 425 list_add(&pdev->links, &pci_devices); 426 spin_unlock(&pci_lock); 427 428 if (pdrv != NULL) { 429 error = pdrv->probe(pdev, id); 430 if (error) 431 goto out_probe; 432 } 433 return (0); 434 435 out_probe: 436 free(pdev->bus, M_DEVBUF); 437 linux_pdev_dma_uninit(pdev); 438 out_dma_init: 439 spin_lock(&pci_lock); 440 list_del(&pdev->links); 441 spin_unlock(&pci_lock); 442 put_device(&pdev->dev); 443 return (-error); 444 } 445 446 static int 447 linux_pci_detach(device_t dev) 448 { 449 struct pci_dev *pdev; 450 451 pdev = device_get_softc(dev); 452 453 MPASS(pdev != NULL); 454 455 device_set_desc(dev, NULL); 456 457 return (linux_pci_detach_device(pdev)); 458 } 459 460 int 461 linux_pci_detach_device(struct pci_dev *pdev) 462 { 463 464 linux_set_current(curthread); 465 466 if (pdev->pdrv != NULL) 467 pdev->pdrv->remove(pdev); 468 469 if (pdev->root != NULL) 470 pci_dev_put(pdev->root); 471 free(pdev->bus, M_DEVBUF); 472 linux_pdev_dma_uninit(pdev); 473 474 spin_lock(&pci_lock); 475 list_del(&pdev->links); 476 spin_unlock(&pci_lock); 477 put_device(&pdev->dev); 478 479 return (0); 480 } 481 482 static int 483 lkpi_pci_disable_dev(struct device *dev) 484 { 485 486 (void) pci_disable_io(dev->bsddev, SYS_RES_MEMORY); 487 (void) pci_disable_io(dev->bsddev, SYS_RES_IOPORT); 488 return (0); 489 } 490 491 void 492 lkpi_pci_devres_release(struct device *dev, void *p) 493 { 494 struct pci_devres *dr; 495 struct pci_dev *pdev; 496 int bar; 497 498 pdev = to_pci_dev(dev); 499 dr = p; 500 501 if (pdev->msix_enabled) 502 lkpi_pci_disable_msix(pdev); 503 if (pdev->msi_enabled) 504 lkpi_pci_disable_msi(pdev); 505 506 if (dr->enable_io && lkpi_pci_disable_dev(dev) == 0) 507 dr->enable_io = false; 508 509 if (dr->region_mask == 0) 510 return; 511 for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) { 512 513 if ((dr->region_mask & (1 << bar)) == 0) 514 continue; 515 pci_release_region(pdev, bar); 516 } 517 } 518 519 void 520 lkpi_pcim_iomap_table_release(struct device *dev, void *p) 521 { 522 struct pcim_iomap_devres *dr; 523 struct pci_dev *pdev; 524 int bar; 525 526 dr = p; 527 pdev = to_pci_dev(dev); 528 for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) { 529 530 if (dr->mmio_table[bar] == NULL) 531 continue; 532 533 pci_iounmap(pdev, dr->mmio_table[bar]); 534 } 535 } 536 537 static int 538 linux_pci_suspend(device_t dev) 539 { 540 const struct dev_pm_ops *pmops; 541 struct pm_message pm = { }; 542 struct pci_dev *pdev; 543 int error; 544 545 error = 0; 546 linux_set_current(curthread); 547 pdev = device_get_softc(dev); 548 pmops = pdev->pdrv->driver.pm; 549 550 if (pdev->pdrv->suspend != NULL) 551 error = -pdev->pdrv->suspend(pdev, pm); 552 else if (pmops != NULL && pmops->suspend != NULL) { 553 error = -pmops->suspend(&pdev->dev); 554 if (error == 0 && pmops->suspend_late != NULL) 555 error = -pmops->suspend_late(&pdev->dev); 556 } 557 return (error); 558 } 559 560 static int 561 linux_pci_resume(device_t dev) 562 { 563 const struct dev_pm_ops *pmops; 564 struct pci_dev *pdev; 565 int error; 566 567 error = 0; 568 linux_set_current(curthread); 569 pdev = device_get_softc(dev); 570 pmops = pdev->pdrv->driver.pm; 571 572 if (pdev->pdrv->resume != NULL) 573 error = -pdev->pdrv->resume(pdev); 574 else if (pmops != NULL && pmops->resume != NULL) { 575 if (pmops->resume_early != NULL) 576 error = -pmops->resume_early(&pdev->dev); 577 if (error == 0 && pmops->resume != NULL) 578 error = -pmops->resume(&pdev->dev); 579 } 580 return (error); 581 } 582 583 static int 584 linux_pci_shutdown(device_t dev) 585 { 586 struct pci_dev *pdev; 587 588 linux_set_current(curthread); 589 pdev = device_get_softc(dev); 590 if (pdev->pdrv->shutdown != NULL) 591 pdev->pdrv->shutdown(pdev); 592 return (0); 593 } 594 595 static int 596 linux_pci_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *pf_config) 597 { 598 struct pci_dev *pdev; 599 int error; 600 601 linux_set_current(curthread); 602 pdev = device_get_softc(dev); 603 if (pdev->pdrv->bsd_iov_init != NULL) 604 error = pdev->pdrv->bsd_iov_init(dev, num_vfs, pf_config); 605 else 606 error = EINVAL; 607 return (error); 608 } 609 610 static void 611 linux_pci_iov_uninit(device_t dev) 612 { 613 struct pci_dev *pdev; 614 615 linux_set_current(curthread); 616 pdev = device_get_softc(dev); 617 if (pdev->pdrv->bsd_iov_uninit != NULL) 618 pdev->pdrv->bsd_iov_uninit(dev); 619 } 620 621 static int 622 linux_pci_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *vf_config) 623 { 624 struct pci_dev *pdev; 625 int error; 626 627 linux_set_current(curthread); 628 pdev = device_get_softc(dev); 629 if (pdev->pdrv->bsd_iov_add_vf != NULL) 630 error = pdev->pdrv->bsd_iov_add_vf(dev, vfnum, vf_config); 631 else 632 error = EINVAL; 633 return (error); 634 } 635 636 static int 637 _linux_pci_register_driver(struct pci_driver *pdrv, devclass_t dc) 638 { 639 int error; 640 641 linux_set_current(curthread); 642 spin_lock(&pci_lock); 643 list_add(&pdrv->node, &pci_drivers); 644 spin_unlock(&pci_lock); 645 pdrv->bsddriver.name = pdrv->name; 646 pdrv->bsddriver.methods = pci_methods; 647 pdrv->bsddriver.size = sizeof(struct pci_dev); 648 649 mtx_lock(&Giant); 650 error = devclass_add_driver(dc, &pdrv->bsddriver, 651 BUS_PASS_DEFAULT, &pdrv->bsdclass); 652 mtx_unlock(&Giant); 653 return (-error); 654 } 655 656 int 657 linux_pci_register_driver(struct pci_driver *pdrv) 658 { 659 devclass_t dc; 660 661 dc = devclass_find("pci"); 662 if (dc == NULL) 663 return (-ENXIO); 664 pdrv->isdrm = false; 665 return (_linux_pci_register_driver(pdrv, dc)); 666 } 667 668 struct resource_list_entry * 669 linux_pci_reserve_bar(struct pci_dev *pdev, struct resource_list *rl, 670 int type, int rid) 671 { 672 device_t dev; 673 struct resource *res; 674 675 KASSERT(type == SYS_RES_IOPORT || type == SYS_RES_MEMORY, 676 ("trying to reserve non-BAR type %d", type)); 677 678 dev = pdev->pdrv != NULL && pdev->pdrv->isdrm ? 679 device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev; 680 res = pci_reserve_map(device_get_parent(dev), dev, type, &rid, 0, ~0, 681 1, 1, 0); 682 if (res == NULL) 683 return (NULL); 684 return (resource_list_find(rl, type, rid)); 685 } 686 687 unsigned long 688 pci_resource_start(struct pci_dev *pdev, int bar) 689 { 690 struct resource_list_entry *rle; 691 rman_res_t newstart; 692 device_t dev; 693 694 if ((rle = linux_pci_get_bar(pdev, bar, true)) == NULL) 695 return (0); 696 dev = pdev->pdrv != NULL && pdev->pdrv->isdrm ? 697 device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev; 698 if (BUS_TRANSLATE_RESOURCE(dev, rle->type, rle->start, &newstart)) { 699 device_printf(pdev->dev.bsddev, "translate of %#jx failed\n", 700 (uintmax_t)rle->start); 701 return (0); 702 } 703 return (newstart); 704 } 705 706 unsigned long 707 pci_resource_len(struct pci_dev *pdev, int bar) 708 { 709 struct resource_list_entry *rle; 710 711 if ((rle = linux_pci_get_bar(pdev, bar, true)) == NULL) 712 return (0); 713 return (rle->count); 714 } 715 716 int 717 linux_pci_register_drm_driver(struct pci_driver *pdrv) 718 { 719 devclass_t dc; 720 721 dc = devclass_create("vgapci"); 722 if (dc == NULL) 723 return (-ENXIO); 724 pdrv->isdrm = true; 725 pdrv->name = "drmn"; 726 return (_linux_pci_register_driver(pdrv, dc)); 727 } 728 729 void 730 linux_pci_unregister_driver(struct pci_driver *pdrv) 731 { 732 devclass_t bus; 733 734 bus = devclass_find("pci"); 735 736 spin_lock(&pci_lock); 737 list_del(&pdrv->node); 738 spin_unlock(&pci_lock); 739 mtx_lock(&Giant); 740 if (bus != NULL) 741 devclass_delete_driver(bus, &pdrv->bsddriver); 742 mtx_unlock(&Giant); 743 } 744 745 void 746 linux_pci_unregister_drm_driver(struct pci_driver *pdrv) 747 { 748 devclass_t bus; 749 750 bus = devclass_find("vgapci"); 751 752 spin_lock(&pci_lock); 753 list_del(&pdrv->node); 754 spin_unlock(&pci_lock); 755 mtx_lock(&Giant); 756 if (bus != NULL) 757 devclass_delete_driver(bus, &pdrv->bsddriver); 758 mtx_unlock(&Giant); 759 } 760 761 CTASSERT(sizeof(dma_addr_t) <= sizeof(uint64_t)); 762 763 struct linux_dma_obj { 764 void *vaddr; 765 uint64_t dma_addr; 766 bus_dmamap_t dmamap; 767 bus_dma_tag_t dmat; 768 }; 769 770 static uma_zone_t linux_dma_trie_zone; 771 static uma_zone_t linux_dma_obj_zone; 772 773 static void 774 linux_dma_init(void *arg) 775 { 776 777 linux_dma_trie_zone = uma_zcreate("linux_dma_pctrie", 778 pctrie_node_size(), NULL, NULL, pctrie_zone_init, NULL, 779 UMA_ALIGN_PTR, 0); 780 linux_dma_obj_zone = uma_zcreate("linux_dma_object", 781 sizeof(struct linux_dma_obj), NULL, NULL, NULL, NULL, 782 UMA_ALIGN_PTR, 0); 783 784 } 785 SYSINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_init, NULL); 786 787 static void 788 linux_dma_uninit(void *arg) 789 { 790 791 uma_zdestroy(linux_dma_obj_zone); 792 uma_zdestroy(linux_dma_trie_zone); 793 } 794 SYSUNINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_uninit, NULL); 795 796 static void * 797 linux_dma_trie_alloc(struct pctrie *ptree) 798 { 799 800 return (uma_zalloc(linux_dma_trie_zone, M_NOWAIT)); 801 } 802 803 static void 804 linux_dma_trie_free(struct pctrie *ptree, void *node) 805 { 806 807 uma_zfree(linux_dma_trie_zone, node); 808 } 809 810 PCTRIE_DEFINE(LINUX_DMA, linux_dma_obj, dma_addr, linux_dma_trie_alloc, 811 linux_dma_trie_free); 812 813 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 814 static dma_addr_t 815 linux_dma_map_phys_common(struct device *dev, vm_paddr_t phys, size_t len, 816 bus_dma_tag_t dmat) 817 { 818 struct linux_dma_priv *priv; 819 struct linux_dma_obj *obj; 820 int error, nseg; 821 bus_dma_segment_t seg; 822 823 priv = dev->dma_priv; 824 825 /* 826 * If the resultant mapping will be entirely 1:1 with the 827 * physical address, short-circuit the remainder of the 828 * bus_dma API. This avoids tracking collisions in the pctrie 829 * with the additional benefit of reducing overhead. 830 */ 831 if (bus_dma_id_mapped(dmat, phys, len)) 832 return (phys); 833 834 obj = uma_zalloc(linux_dma_obj_zone, M_NOWAIT); 835 if (obj == NULL) { 836 return (0); 837 } 838 obj->dmat = dmat; 839 840 DMA_PRIV_LOCK(priv); 841 if (bus_dmamap_create(obj->dmat, 0, &obj->dmamap) != 0) { 842 DMA_PRIV_UNLOCK(priv); 843 uma_zfree(linux_dma_obj_zone, obj); 844 return (0); 845 } 846 847 nseg = -1; 848 if (_bus_dmamap_load_phys(obj->dmat, obj->dmamap, phys, len, 849 BUS_DMA_NOWAIT, &seg, &nseg) != 0) { 850 bus_dmamap_destroy(obj->dmat, obj->dmamap); 851 DMA_PRIV_UNLOCK(priv); 852 uma_zfree(linux_dma_obj_zone, obj); 853 return (0); 854 } 855 856 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); 857 obj->dma_addr = seg.ds_addr; 858 859 error = LINUX_DMA_PCTRIE_INSERT(&priv->ptree, obj); 860 if (error != 0) { 861 bus_dmamap_unload(obj->dmat, obj->dmamap); 862 bus_dmamap_destroy(obj->dmat, obj->dmamap); 863 DMA_PRIV_UNLOCK(priv); 864 uma_zfree(linux_dma_obj_zone, obj); 865 return (0); 866 } 867 DMA_PRIV_UNLOCK(priv); 868 return (obj->dma_addr); 869 } 870 #else 871 static dma_addr_t 872 linux_dma_map_phys_common(struct device *dev __unused, vm_paddr_t phys, 873 size_t len __unused, bus_dma_tag_t dmat __unused) 874 { 875 return (phys); 876 } 877 #endif 878 879 dma_addr_t 880 linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len) 881 { 882 struct linux_dma_priv *priv; 883 884 priv = dev->dma_priv; 885 return (linux_dma_map_phys_common(dev, phys, len, priv->dmat)); 886 } 887 888 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 889 void 890 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len) 891 { 892 struct linux_dma_priv *priv; 893 struct linux_dma_obj *obj; 894 895 priv = dev->dma_priv; 896 897 if (pctrie_is_empty(&priv->ptree)) 898 return; 899 900 DMA_PRIV_LOCK(priv); 901 obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr); 902 if (obj == NULL) { 903 DMA_PRIV_UNLOCK(priv); 904 return; 905 } 906 LINUX_DMA_PCTRIE_REMOVE(&priv->ptree, dma_addr); 907 bus_dmamap_unload(obj->dmat, obj->dmamap); 908 bus_dmamap_destroy(obj->dmat, obj->dmamap); 909 DMA_PRIV_UNLOCK(priv); 910 911 uma_zfree(linux_dma_obj_zone, obj); 912 } 913 #else 914 void 915 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len) 916 { 917 } 918 #endif 919 920 void * 921 linux_dma_alloc_coherent(struct device *dev, size_t size, 922 dma_addr_t *dma_handle, gfp_t flag) 923 { 924 struct linux_dma_priv *priv; 925 vm_paddr_t high; 926 size_t align; 927 void *mem; 928 929 if (dev == NULL || dev->dma_priv == NULL) { 930 *dma_handle = 0; 931 return (NULL); 932 } 933 priv = dev->dma_priv; 934 if (priv->dma_coherent_mask) 935 high = priv->dma_coherent_mask; 936 else 937 /* Coherent is lower 32bit only by default in Linux. */ 938 high = BUS_SPACE_MAXADDR_32BIT; 939 align = PAGE_SIZE << get_order(size); 940 /* Always zero the allocation. */ 941 flag |= M_ZERO; 942 mem = (void *)kmem_alloc_contig(size, flag & GFP_NATIVE_MASK, 0, high, 943 align, 0, VM_MEMATTR_DEFAULT); 944 if (mem != NULL) { 945 *dma_handle = linux_dma_map_phys_common(dev, vtophys(mem), size, 946 priv->dmat_coherent); 947 if (*dma_handle == 0) { 948 kmem_free((vm_offset_t)mem, size); 949 mem = NULL; 950 } 951 } else { 952 *dma_handle = 0; 953 } 954 return (mem); 955 } 956 957 int 958 linux_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents, 959 enum dma_data_direction dir __unused, unsigned long attrs __unused) 960 { 961 struct linux_dma_priv *priv; 962 struct scatterlist *sg; 963 int i, nseg; 964 bus_dma_segment_t seg; 965 966 priv = dev->dma_priv; 967 968 DMA_PRIV_LOCK(priv); 969 970 /* create common DMA map in the first S/G entry */ 971 if (bus_dmamap_create(priv->dmat, 0, &sgl->dma_map) != 0) { 972 DMA_PRIV_UNLOCK(priv); 973 return (0); 974 } 975 976 /* load all S/G list entries */ 977 for_each_sg(sgl, sg, nents, i) { 978 nseg = -1; 979 if (_bus_dmamap_load_phys(priv->dmat, sgl->dma_map, 980 sg_phys(sg), sg->length, BUS_DMA_NOWAIT, 981 &seg, &nseg) != 0) { 982 bus_dmamap_unload(priv->dmat, sgl->dma_map); 983 bus_dmamap_destroy(priv->dmat, sgl->dma_map); 984 DMA_PRIV_UNLOCK(priv); 985 return (0); 986 } 987 KASSERT(nseg == 0, 988 ("More than one segment (nseg=%d)", nseg + 1)); 989 990 sg_dma_address(sg) = seg.ds_addr; 991 } 992 DMA_PRIV_UNLOCK(priv); 993 994 return (nents); 995 } 996 997 void 998 linux_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, 999 int nents __unused, enum dma_data_direction dir __unused, 1000 unsigned long attrs __unused) 1001 { 1002 struct linux_dma_priv *priv; 1003 1004 priv = dev->dma_priv; 1005 1006 DMA_PRIV_LOCK(priv); 1007 bus_dmamap_unload(priv->dmat, sgl->dma_map); 1008 bus_dmamap_destroy(priv->dmat, sgl->dma_map); 1009 DMA_PRIV_UNLOCK(priv); 1010 } 1011 1012 struct dma_pool { 1013 struct device *pool_device; 1014 uma_zone_t pool_zone; 1015 struct mtx pool_lock; 1016 bus_dma_tag_t pool_dmat; 1017 size_t pool_entry_size; 1018 struct pctrie pool_ptree; 1019 }; 1020 1021 #define DMA_POOL_LOCK(pool) mtx_lock(&(pool)->pool_lock) 1022 #define DMA_POOL_UNLOCK(pool) mtx_unlock(&(pool)->pool_lock) 1023 1024 static inline int 1025 dma_pool_obj_ctor(void *mem, int size, void *arg, int flags) 1026 { 1027 struct linux_dma_obj *obj = mem; 1028 struct dma_pool *pool = arg; 1029 int error, nseg; 1030 bus_dma_segment_t seg; 1031 1032 nseg = -1; 1033 DMA_POOL_LOCK(pool); 1034 error = _bus_dmamap_load_phys(pool->pool_dmat, obj->dmamap, 1035 vtophys(obj->vaddr), pool->pool_entry_size, BUS_DMA_NOWAIT, 1036 &seg, &nseg); 1037 DMA_POOL_UNLOCK(pool); 1038 if (error != 0) { 1039 return (error); 1040 } 1041 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); 1042 obj->dma_addr = seg.ds_addr; 1043 1044 return (0); 1045 } 1046 1047 static void 1048 dma_pool_obj_dtor(void *mem, int size, void *arg) 1049 { 1050 struct linux_dma_obj *obj = mem; 1051 struct dma_pool *pool = arg; 1052 1053 DMA_POOL_LOCK(pool); 1054 bus_dmamap_unload(pool->pool_dmat, obj->dmamap); 1055 DMA_POOL_UNLOCK(pool); 1056 } 1057 1058 static int 1059 dma_pool_obj_import(void *arg, void **store, int count, int domain __unused, 1060 int flags) 1061 { 1062 struct dma_pool *pool = arg; 1063 struct linux_dma_priv *priv; 1064 struct linux_dma_obj *obj; 1065 int error, i; 1066 1067 priv = pool->pool_device->dma_priv; 1068 for (i = 0; i < count; i++) { 1069 obj = uma_zalloc(linux_dma_obj_zone, flags); 1070 if (obj == NULL) 1071 break; 1072 1073 error = bus_dmamem_alloc(pool->pool_dmat, &obj->vaddr, 1074 BUS_DMA_NOWAIT, &obj->dmamap); 1075 if (error!= 0) { 1076 uma_zfree(linux_dma_obj_zone, obj); 1077 break; 1078 } 1079 1080 store[i] = obj; 1081 } 1082 1083 return (i); 1084 } 1085 1086 static void 1087 dma_pool_obj_release(void *arg, void **store, int count) 1088 { 1089 struct dma_pool *pool = arg; 1090 struct linux_dma_priv *priv; 1091 struct linux_dma_obj *obj; 1092 int i; 1093 1094 priv = pool->pool_device->dma_priv; 1095 for (i = 0; i < count; i++) { 1096 obj = store[i]; 1097 bus_dmamem_free(pool->pool_dmat, obj->vaddr, obj->dmamap); 1098 uma_zfree(linux_dma_obj_zone, obj); 1099 } 1100 } 1101 1102 struct dma_pool * 1103 linux_dma_pool_create(char *name, struct device *dev, size_t size, 1104 size_t align, size_t boundary) 1105 { 1106 struct linux_dma_priv *priv; 1107 struct dma_pool *pool; 1108 1109 priv = dev->dma_priv; 1110 1111 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 1112 pool->pool_device = dev; 1113 pool->pool_entry_size = size; 1114 1115 if (bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 1116 align, boundary, /* alignment, boundary */ 1117 priv->dma_mask, /* lowaddr */ 1118 BUS_SPACE_MAXADDR, /* highaddr */ 1119 NULL, NULL, /* filtfunc, filtfuncarg */ 1120 size, /* maxsize */ 1121 1, /* nsegments */ 1122 size, /* maxsegsz */ 1123 0, /* flags */ 1124 NULL, NULL, /* lockfunc, lockfuncarg */ 1125 &pool->pool_dmat)) { 1126 kfree(pool); 1127 return (NULL); 1128 } 1129 1130 pool->pool_zone = uma_zcache_create(name, -1, dma_pool_obj_ctor, 1131 dma_pool_obj_dtor, NULL, NULL, dma_pool_obj_import, 1132 dma_pool_obj_release, pool, 0); 1133 1134 mtx_init(&pool->pool_lock, "lkpi-dma-pool", NULL, MTX_DEF); 1135 pctrie_init(&pool->pool_ptree); 1136 1137 return (pool); 1138 } 1139 1140 void 1141 linux_dma_pool_destroy(struct dma_pool *pool) 1142 { 1143 1144 uma_zdestroy(pool->pool_zone); 1145 bus_dma_tag_destroy(pool->pool_dmat); 1146 mtx_destroy(&pool->pool_lock); 1147 kfree(pool); 1148 } 1149 1150 void 1151 lkpi_dmam_pool_destroy(struct device *dev, void *p) 1152 { 1153 struct dma_pool *pool; 1154 1155 pool = *(struct dma_pool **)p; 1156 LINUX_DMA_PCTRIE_RECLAIM(&pool->pool_ptree); 1157 linux_dma_pool_destroy(pool); 1158 } 1159 1160 void * 1161 linux_dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, 1162 dma_addr_t *handle) 1163 { 1164 struct linux_dma_obj *obj; 1165 1166 obj = uma_zalloc_arg(pool->pool_zone, pool, mem_flags & GFP_NATIVE_MASK); 1167 if (obj == NULL) 1168 return (NULL); 1169 1170 DMA_POOL_LOCK(pool); 1171 if (LINUX_DMA_PCTRIE_INSERT(&pool->pool_ptree, obj) != 0) { 1172 DMA_POOL_UNLOCK(pool); 1173 uma_zfree_arg(pool->pool_zone, obj, pool); 1174 return (NULL); 1175 } 1176 DMA_POOL_UNLOCK(pool); 1177 1178 *handle = obj->dma_addr; 1179 return (obj->vaddr); 1180 } 1181 1182 void 1183 linux_dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma_addr) 1184 { 1185 struct linux_dma_obj *obj; 1186 1187 DMA_POOL_LOCK(pool); 1188 obj = LINUX_DMA_PCTRIE_LOOKUP(&pool->pool_ptree, dma_addr); 1189 if (obj == NULL) { 1190 DMA_POOL_UNLOCK(pool); 1191 return; 1192 } 1193 LINUX_DMA_PCTRIE_REMOVE(&pool->pool_ptree, dma_addr); 1194 DMA_POOL_UNLOCK(pool); 1195 1196 uma_zfree_arg(pool->pool_zone, obj, pool); 1197 } 1198 1199 static int 1200 linux_backlight_get_status(device_t dev, struct backlight_props *props) 1201 { 1202 struct pci_dev *pdev; 1203 1204 linux_set_current(curthread); 1205 pdev = device_get_softc(dev); 1206 1207 props->brightness = pdev->dev.bd->props.brightness; 1208 props->brightness = props->brightness * 100 / pdev->dev.bd->props.max_brightness; 1209 props->nlevels = 0; 1210 1211 return (0); 1212 } 1213 1214 static int 1215 linux_backlight_get_info(device_t dev, struct backlight_info *info) 1216 { 1217 struct pci_dev *pdev; 1218 1219 linux_set_current(curthread); 1220 pdev = device_get_softc(dev); 1221 1222 info->type = BACKLIGHT_TYPE_PANEL; 1223 strlcpy(info->name, pdev->dev.bd->name, BACKLIGHTMAXNAMELENGTH); 1224 return (0); 1225 } 1226 1227 static int 1228 linux_backlight_update_status(device_t dev, struct backlight_props *props) 1229 { 1230 struct pci_dev *pdev; 1231 1232 linux_set_current(curthread); 1233 pdev = device_get_softc(dev); 1234 1235 pdev->dev.bd->props.brightness = pdev->dev.bd->props.max_brightness * 1236 props->brightness / 100; 1237 pdev->dev.bd->props.power = props->brightness == 0 ? 1238 4/* FB_BLANK_POWERDOWN */ : 0/* FB_BLANK_UNBLANK */; 1239 return (pdev->dev.bd->ops->update_status(pdev->dev.bd)); 1240 } 1241 1242 struct backlight_device * 1243 linux_backlight_device_register(const char *name, struct device *dev, 1244 void *data, const struct backlight_ops *ops, struct backlight_properties *props) 1245 { 1246 1247 dev->bd = malloc(sizeof(*dev->bd), M_DEVBUF, M_WAITOK | M_ZERO); 1248 dev->bd->ops = ops; 1249 dev->bd->props.type = props->type; 1250 dev->bd->props.max_brightness = props->max_brightness; 1251 dev->bd->props.brightness = props->brightness; 1252 dev->bd->props.power = props->power; 1253 dev->bd->data = data; 1254 dev->bd->dev = dev; 1255 dev->bd->name = strdup(name, M_DEVBUF); 1256 1257 dev->backlight_dev = backlight_register(name, dev->bsddev); 1258 1259 return (dev->bd); 1260 } 1261 1262 void 1263 linux_backlight_device_unregister(struct backlight_device *bd) 1264 { 1265 1266 backlight_destroy(bd->dev->backlight_dev); 1267 free(bd->name, M_DEVBUF); 1268 free(bd, M_DEVBUF); 1269 } 1270