1 /*- 2 * Copyright (c) 2015-2016 Mellanox Technologies, Ltd. 3 * All rights reserved. 4 * Copyright (c) 2020-2021 The FreeBSD Foundation 5 * 6 * Portions of this software were developed by Björn Zeeb 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice unmodified, this list of conditions, and the following 14 * disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/bus.h> 37 #include <sys/malloc.h> 38 #include <sys/kernel.h> 39 #include <sys/sysctl.h> 40 #include <sys/lock.h> 41 #include <sys/mutex.h> 42 #include <sys/fcntl.h> 43 #include <sys/file.h> 44 #include <sys/filio.h> 45 #include <sys/pciio.h> 46 #include <sys/pctrie.h> 47 #include <sys/rwlock.h> 48 49 #include <vm/vm.h> 50 #include <vm/pmap.h> 51 52 #include <machine/stdarg.h> 53 54 #include <dev/pci/pcivar.h> 55 #include <dev/pci/pci_private.h> 56 #include <dev/pci/pci_iov.h> 57 #include <dev/backlight/backlight.h> 58 59 #include <linux/kobject.h> 60 #include <linux/device.h> 61 #include <linux/slab.h> 62 #include <linux/module.h> 63 #include <linux/cdev.h> 64 #include <linux/file.h> 65 #include <linux/sysfs.h> 66 #include <linux/mm.h> 67 #include <linux/io.h> 68 #include <linux/vmalloc.h> 69 #include <linux/pci.h> 70 #include <linux/compat.h> 71 72 #include <linux/backlight.h> 73 74 #include "backlight_if.h" 75 #include "pcib_if.h" 76 77 /* Undef the linux function macro defined in linux/pci.h */ 78 #undef pci_get_class 79 80 static device_probe_t linux_pci_probe; 81 static device_attach_t linux_pci_attach; 82 static device_detach_t linux_pci_detach; 83 static device_suspend_t linux_pci_suspend; 84 static device_resume_t linux_pci_resume; 85 static device_shutdown_t linux_pci_shutdown; 86 static pci_iov_init_t linux_pci_iov_init; 87 static pci_iov_uninit_t linux_pci_iov_uninit; 88 static pci_iov_add_vf_t linux_pci_iov_add_vf; 89 static int linux_backlight_get_status(device_t dev, struct backlight_props *props); 90 static int linux_backlight_update_status(device_t dev, struct backlight_props *props); 91 static int linux_backlight_get_info(device_t dev, struct backlight_info *info); 92 93 static device_method_t pci_methods[] = { 94 DEVMETHOD(device_probe, linux_pci_probe), 95 DEVMETHOD(device_attach, linux_pci_attach), 96 DEVMETHOD(device_detach, linux_pci_detach), 97 DEVMETHOD(device_suspend, linux_pci_suspend), 98 DEVMETHOD(device_resume, linux_pci_resume), 99 DEVMETHOD(device_shutdown, linux_pci_shutdown), 100 DEVMETHOD(pci_iov_init, linux_pci_iov_init), 101 DEVMETHOD(pci_iov_uninit, linux_pci_iov_uninit), 102 DEVMETHOD(pci_iov_add_vf, linux_pci_iov_add_vf), 103 104 /* backlight interface */ 105 DEVMETHOD(backlight_update_status, linux_backlight_update_status), 106 DEVMETHOD(backlight_get_status, linux_backlight_get_status), 107 DEVMETHOD(backlight_get_info, linux_backlight_get_info), 108 DEVMETHOD_END 109 }; 110 111 struct linux_dma_priv { 112 uint64_t dma_mask; 113 bus_dma_tag_t dmat; 114 uint64_t dma_coherent_mask; 115 bus_dma_tag_t dmat_coherent; 116 struct mtx lock; 117 struct pctrie ptree; 118 }; 119 #define DMA_PRIV_LOCK(priv) mtx_lock(&(priv)->lock) 120 #define DMA_PRIV_UNLOCK(priv) mtx_unlock(&(priv)->lock) 121 122 static int 123 linux_pdev_dma_uninit(struct pci_dev *pdev) 124 { 125 struct linux_dma_priv *priv; 126 127 priv = pdev->dev.dma_priv; 128 if (priv->dmat) 129 bus_dma_tag_destroy(priv->dmat); 130 if (priv->dmat_coherent) 131 bus_dma_tag_destroy(priv->dmat_coherent); 132 mtx_destroy(&priv->lock); 133 pdev->dev.dma_priv = NULL; 134 free(priv, M_DEVBUF); 135 return (0); 136 } 137 138 static int 139 linux_pdev_dma_init(struct pci_dev *pdev) 140 { 141 struct linux_dma_priv *priv; 142 int error; 143 144 priv = malloc(sizeof(*priv), M_DEVBUF, M_WAITOK | M_ZERO); 145 146 mtx_init(&priv->lock, "lkpi-priv-dma", NULL, MTX_DEF); 147 pctrie_init(&priv->ptree); 148 149 pdev->dev.dma_priv = priv; 150 151 /* Create a default DMA tags. */ 152 error = linux_dma_tag_init(&pdev->dev, DMA_BIT_MASK(64)); 153 if (error != 0) 154 goto err; 155 /* Coherent is lower 32bit only by default in Linux. */ 156 error = linux_dma_tag_init_coherent(&pdev->dev, DMA_BIT_MASK(32)); 157 if (error != 0) 158 goto err; 159 160 return (error); 161 162 err: 163 linux_pdev_dma_uninit(pdev); 164 return (error); 165 } 166 167 int 168 linux_dma_tag_init(struct device *dev, u64 dma_mask) 169 { 170 struct linux_dma_priv *priv; 171 int error; 172 173 priv = dev->dma_priv; 174 175 if (priv->dmat) { 176 if (priv->dma_mask == dma_mask) 177 return (0); 178 179 bus_dma_tag_destroy(priv->dmat); 180 } 181 182 priv->dma_mask = dma_mask; 183 184 error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 185 1, 0, /* alignment, boundary */ 186 dma_mask, /* lowaddr */ 187 BUS_SPACE_MAXADDR, /* highaddr */ 188 NULL, NULL, /* filtfunc, filtfuncarg */ 189 BUS_SPACE_MAXSIZE, /* maxsize */ 190 1, /* nsegments */ 191 BUS_SPACE_MAXSIZE, /* maxsegsz */ 192 0, /* flags */ 193 NULL, NULL, /* lockfunc, lockfuncarg */ 194 &priv->dmat); 195 return (-error); 196 } 197 198 int 199 linux_dma_tag_init_coherent(struct device *dev, u64 dma_mask) 200 { 201 struct linux_dma_priv *priv; 202 int error; 203 204 priv = dev->dma_priv; 205 206 if (priv->dmat_coherent) { 207 if (priv->dma_coherent_mask == dma_mask) 208 return (0); 209 210 bus_dma_tag_destroy(priv->dmat_coherent); 211 } 212 213 priv->dma_coherent_mask = dma_mask; 214 215 error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 216 1, 0, /* alignment, boundary */ 217 dma_mask, /* lowaddr */ 218 BUS_SPACE_MAXADDR, /* highaddr */ 219 NULL, NULL, /* filtfunc, filtfuncarg */ 220 BUS_SPACE_MAXSIZE, /* maxsize */ 221 1, /* nsegments */ 222 BUS_SPACE_MAXSIZE, /* maxsegsz */ 223 0, /* flags */ 224 NULL, NULL, /* lockfunc, lockfuncarg */ 225 &priv->dmat_coherent); 226 return (-error); 227 } 228 229 static struct pci_driver * 230 linux_pci_find(device_t dev, const struct pci_device_id **idp) 231 { 232 const struct pci_device_id *id; 233 struct pci_driver *pdrv; 234 uint16_t vendor; 235 uint16_t device; 236 uint16_t subvendor; 237 uint16_t subdevice; 238 239 vendor = pci_get_vendor(dev); 240 device = pci_get_device(dev); 241 subvendor = pci_get_subvendor(dev); 242 subdevice = pci_get_subdevice(dev); 243 244 spin_lock(&pci_lock); 245 list_for_each_entry(pdrv, &pci_drivers, node) { 246 for (id = pdrv->id_table; id->vendor != 0; id++) { 247 if (vendor == id->vendor && 248 (PCI_ANY_ID == id->device || device == id->device) && 249 (PCI_ANY_ID == id->subvendor || subvendor == id->subvendor) && 250 (PCI_ANY_ID == id->subdevice || subdevice == id->subdevice)) { 251 *idp = id; 252 spin_unlock(&pci_lock); 253 return (pdrv); 254 } 255 } 256 } 257 spin_unlock(&pci_lock); 258 return (NULL); 259 } 260 261 static void 262 lkpi_pci_dev_release(struct device *dev) 263 { 264 265 lkpi_devres_release_free_list(dev); 266 spin_lock_destroy(&dev->devres_lock); 267 } 268 269 static void 270 lkpifill_pci_dev(device_t dev, struct pci_dev *pdev) 271 { 272 273 pdev->devfn = PCI_DEVFN(pci_get_slot(dev), pci_get_function(dev)); 274 pdev->vendor = pci_get_vendor(dev); 275 pdev->device = pci_get_device(dev); 276 pdev->subsystem_vendor = pci_get_subvendor(dev); 277 pdev->subsystem_device = pci_get_subdevice(dev); 278 pdev->class = pci_get_class(dev); 279 pdev->revision = pci_get_revid(dev); 280 pdev->bus = malloc(sizeof(*pdev->bus), M_DEVBUF, M_WAITOK | M_ZERO); 281 pdev->bus->self = pdev; 282 pdev->bus->number = pci_get_bus(dev); 283 pdev->bus->domain = pci_get_domain(dev); 284 pdev->dev.bsddev = dev; 285 pdev->dev.parent = &linux_root_device; 286 pdev->dev.release = lkpi_pci_dev_release; 287 INIT_LIST_HEAD(&pdev->dev.irqents); 288 kobject_init(&pdev->dev.kobj, &linux_dev_ktype); 289 kobject_set_name(&pdev->dev.kobj, device_get_nameunit(dev)); 290 kobject_add(&pdev->dev.kobj, &linux_root_device.kobj, 291 kobject_name(&pdev->dev.kobj)); 292 spin_lock_init(&pdev->dev.devres_lock); 293 INIT_LIST_HEAD(&pdev->dev.devres_head); 294 } 295 296 static void 297 lkpinew_pci_dev_release(struct device *dev) 298 { 299 struct pci_dev *pdev; 300 301 pdev = to_pci_dev(dev); 302 if (pdev->root != NULL) 303 pci_dev_put(pdev->root); 304 free(pdev->bus, M_DEVBUF); 305 free(pdev, M_DEVBUF); 306 } 307 308 struct pci_dev * 309 lkpinew_pci_dev(device_t dev) 310 { 311 struct pci_dev *pdev; 312 313 pdev = malloc(sizeof(*pdev), M_DEVBUF, M_WAITOK|M_ZERO); 314 lkpifill_pci_dev(dev, pdev); 315 pdev->dev.release = lkpinew_pci_dev_release; 316 317 return (pdev); 318 } 319 320 struct pci_dev * 321 lkpi_pci_get_class(unsigned int class, struct pci_dev *from) 322 { 323 device_t dev; 324 device_t devfrom = NULL; 325 struct pci_dev *pdev; 326 327 if (from != NULL) 328 devfrom = from->dev.bsddev; 329 330 dev = pci_find_class_from(class >> 16, (class >> 8) & 0xFF, devfrom); 331 if (dev == NULL) 332 return (NULL); 333 334 pdev = lkpinew_pci_dev(dev); 335 return (pdev); 336 } 337 338 struct pci_dev * 339 lkpi_pci_get_domain_bus_and_slot(int domain, unsigned int bus, 340 unsigned int devfn) 341 { 342 device_t dev; 343 struct pci_dev *pdev; 344 345 dev = pci_find_dbsf(domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); 346 if (dev == NULL) 347 return (NULL); 348 349 pdev = lkpinew_pci_dev(dev); 350 return (pdev); 351 } 352 353 static int 354 linux_pci_probe(device_t dev) 355 { 356 const struct pci_device_id *id; 357 struct pci_driver *pdrv; 358 359 if ((pdrv = linux_pci_find(dev, &id)) == NULL) 360 return (ENXIO); 361 if (device_get_driver(dev) != &pdrv->bsddriver) 362 return (ENXIO); 363 device_set_desc(dev, pdrv->name); 364 return (0); 365 } 366 367 static int 368 linux_pci_attach(device_t dev) 369 { 370 const struct pci_device_id *id; 371 struct pci_driver *pdrv; 372 struct pci_dev *pdev; 373 374 pdrv = linux_pci_find(dev, &id); 375 pdev = device_get_softc(dev); 376 377 MPASS(pdrv != NULL); 378 MPASS(pdev != NULL); 379 380 return (linux_pci_attach_device(dev, pdrv, id, pdev)); 381 } 382 383 int 384 linux_pci_attach_device(device_t dev, struct pci_driver *pdrv, 385 const struct pci_device_id *id, struct pci_dev *pdev) 386 { 387 struct resource_list_entry *rle; 388 device_t parent; 389 uintptr_t rid; 390 int error; 391 bool isdrm; 392 393 linux_set_current(curthread); 394 395 parent = device_get_parent(dev); 396 isdrm = pdrv != NULL && pdrv->isdrm; 397 398 if (isdrm) { 399 struct pci_devinfo *dinfo; 400 401 dinfo = device_get_ivars(parent); 402 device_set_ivars(dev, dinfo); 403 } 404 405 lkpifill_pci_dev(dev, pdev); 406 if (isdrm) 407 PCI_GET_ID(device_get_parent(parent), parent, PCI_ID_RID, &rid); 408 else 409 PCI_GET_ID(parent, dev, PCI_ID_RID, &rid); 410 pdev->devfn = rid; 411 pdev->pdrv = pdrv; 412 rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 0, false); 413 if (rle != NULL) 414 pdev->dev.irq = rle->start; 415 else 416 pdev->dev.irq = LINUX_IRQ_INVALID; 417 pdev->irq = pdev->dev.irq; 418 error = linux_pdev_dma_init(pdev); 419 if (error) 420 goto out_dma_init; 421 422 TAILQ_INIT(&pdev->mmio); 423 424 spin_lock(&pci_lock); 425 list_add(&pdev->links, &pci_devices); 426 spin_unlock(&pci_lock); 427 428 if (pdrv != NULL) { 429 error = pdrv->probe(pdev, id); 430 if (error) 431 goto out_probe; 432 } 433 return (0); 434 435 out_probe: 436 free(pdev->bus, M_DEVBUF); 437 linux_pdev_dma_uninit(pdev); 438 out_dma_init: 439 spin_lock(&pci_lock); 440 list_del(&pdev->links); 441 spin_unlock(&pci_lock); 442 put_device(&pdev->dev); 443 return (-error); 444 } 445 446 static int 447 linux_pci_detach(device_t dev) 448 { 449 struct pci_dev *pdev; 450 451 pdev = device_get_softc(dev); 452 453 MPASS(pdev != NULL); 454 455 device_set_desc(dev, NULL); 456 457 return (linux_pci_detach_device(pdev)); 458 } 459 460 int 461 linux_pci_detach_device(struct pci_dev *pdev) 462 { 463 464 linux_set_current(curthread); 465 466 if (pdev->pdrv != NULL) 467 pdev->pdrv->remove(pdev); 468 469 if (pdev->root != NULL) 470 pci_dev_put(pdev->root); 471 free(pdev->bus, M_DEVBUF); 472 linux_pdev_dma_uninit(pdev); 473 474 spin_lock(&pci_lock); 475 list_del(&pdev->links); 476 spin_unlock(&pci_lock); 477 put_device(&pdev->dev); 478 479 return (0); 480 } 481 482 static int 483 lkpi_pci_disable_dev(struct device *dev) 484 { 485 486 (void) pci_disable_io(dev->bsddev, SYS_RES_MEMORY); 487 (void) pci_disable_io(dev->bsddev, SYS_RES_IOPORT); 488 return (0); 489 } 490 491 void 492 lkpi_pci_devres_release(struct device *dev, void *p) 493 { 494 struct pci_devres *dr; 495 struct pci_dev *pdev; 496 int bar; 497 498 pdev = to_pci_dev(dev); 499 dr = p; 500 501 if (pdev->msix_enabled) 502 lkpi_pci_disable_msix(pdev); 503 if (pdev->msi_enabled) 504 lkpi_pci_disable_msi(pdev); 505 506 if (dr->enable_io && lkpi_pci_disable_dev(dev) == 0) 507 dr->enable_io = false; 508 509 if (dr->region_mask == 0) 510 return; 511 for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) { 512 513 if ((dr->region_mask & (1 << bar)) == 0) 514 continue; 515 pci_release_region(pdev, bar); 516 } 517 } 518 519 void 520 lkpi_pcim_iomap_table_release(struct device *dev, void *p) 521 { 522 struct pcim_iomap_devres *dr; 523 struct pci_dev *pdev; 524 int bar; 525 526 dr = p; 527 pdev = to_pci_dev(dev); 528 for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) { 529 530 if (dr->mmio_table[bar] == NULL) 531 continue; 532 533 pci_iounmap(pdev, dr->mmio_table[bar]); 534 } 535 } 536 537 static int 538 linux_pci_suspend(device_t dev) 539 { 540 const struct dev_pm_ops *pmops; 541 struct pm_message pm = { }; 542 struct pci_dev *pdev; 543 int error; 544 545 error = 0; 546 linux_set_current(curthread); 547 pdev = device_get_softc(dev); 548 pmops = pdev->pdrv->driver.pm; 549 550 if (pdev->pdrv->suspend != NULL) 551 error = -pdev->pdrv->suspend(pdev, pm); 552 else if (pmops != NULL && pmops->suspend != NULL) { 553 error = -pmops->suspend(&pdev->dev); 554 if (error == 0 && pmops->suspend_late != NULL) 555 error = -pmops->suspend_late(&pdev->dev); 556 } 557 return (error); 558 } 559 560 static int 561 linux_pci_resume(device_t dev) 562 { 563 const struct dev_pm_ops *pmops; 564 struct pci_dev *pdev; 565 int error; 566 567 error = 0; 568 linux_set_current(curthread); 569 pdev = device_get_softc(dev); 570 pmops = pdev->pdrv->driver.pm; 571 572 if (pdev->pdrv->resume != NULL) 573 error = -pdev->pdrv->resume(pdev); 574 else if (pmops != NULL && pmops->resume != NULL) { 575 if (pmops->resume_early != NULL) 576 error = -pmops->resume_early(&pdev->dev); 577 if (error == 0 && pmops->resume != NULL) 578 error = -pmops->resume(&pdev->dev); 579 } 580 return (error); 581 } 582 583 static int 584 linux_pci_shutdown(device_t dev) 585 { 586 struct pci_dev *pdev; 587 588 linux_set_current(curthread); 589 pdev = device_get_softc(dev); 590 if (pdev->pdrv->shutdown != NULL) 591 pdev->pdrv->shutdown(pdev); 592 return (0); 593 } 594 595 static int 596 linux_pci_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *pf_config) 597 { 598 struct pci_dev *pdev; 599 int error; 600 601 linux_set_current(curthread); 602 pdev = device_get_softc(dev); 603 if (pdev->pdrv->bsd_iov_init != NULL) 604 error = pdev->pdrv->bsd_iov_init(dev, num_vfs, pf_config); 605 else 606 error = EINVAL; 607 return (error); 608 } 609 610 static void 611 linux_pci_iov_uninit(device_t dev) 612 { 613 struct pci_dev *pdev; 614 615 linux_set_current(curthread); 616 pdev = device_get_softc(dev); 617 if (pdev->pdrv->bsd_iov_uninit != NULL) 618 pdev->pdrv->bsd_iov_uninit(dev); 619 } 620 621 static int 622 linux_pci_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *vf_config) 623 { 624 struct pci_dev *pdev; 625 int error; 626 627 linux_set_current(curthread); 628 pdev = device_get_softc(dev); 629 if (pdev->pdrv->bsd_iov_add_vf != NULL) 630 error = pdev->pdrv->bsd_iov_add_vf(dev, vfnum, vf_config); 631 else 632 error = EINVAL; 633 return (error); 634 } 635 636 static int 637 _linux_pci_register_driver(struct pci_driver *pdrv, devclass_t dc) 638 { 639 int error; 640 641 linux_set_current(curthread); 642 spin_lock(&pci_lock); 643 list_add(&pdrv->node, &pci_drivers); 644 spin_unlock(&pci_lock); 645 pdrv->bsddriver.name = pdrv->name; 646 pdrv->bsddriver.methods = pci_methods; 647 pdrv->bsddriver.size = sizeof(struct pci_dev); 648 649 mtx_lock(&Giant); 650 error = devclass_add_driver(dc, &pdrv->bsddriver, 651 BUS_PASS_DEFAULT, &pdrv->bsdclass); 652 mtx_unlock(&Giant); 653 return (-error); 654 } 655 656 int 657 linux_pci_register_driver(struct pci_driver *pdrv) 658 { 659 devclass_t dc; 660 661 dc = devclass_find("pci"); 662 if (dc == NULL) 663 return (-ENXIO); 664 pdrv->isdrm = false; 665 return (_linux_pci_register_driver(pdrv, dc)); 666 } 667 668 struct resource_list_entry * 669 linux_pci_reserve_bar(struct pci_dev *pdev, struct resource_list *rl, 670 int type, int rid) 671 { 672 device_t dev; 673 struct resource *res; 674 675 KASSERT(type == SYS_RES_IOPORT || type == SYS_RES_MEMORY, 676 ("trying to reserve non-BAR type %d", type)); 677 678 dev = pdev->pdrv != NULL && pdev->pdrv->isdrm ? 679 device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev; 680 res = pci_reserve_map(device_get_parent(dev), dev, type, &rid, 0, ~0, 681 1, 1, 0); 682 if (res == NULL) 683 return (NULL); 684 return (resource_list_find(rl, type, rid)); 685 } 686 687 unsigned long 688 pci_resource_start(struct pci_dev *pdev, int bar) 689 { 690 struct resource_list_entry *rle; 691 rman_res_t newstart; 692 device_t dev; 693 int error; 694 695 if ((rle = linux_pci_get_bar(pdev, bar, true)) == NULL) 696 return (0); 697 dev = pdev->pdrv != NULL && pdev->pdrv->isdrm ? 698 device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev; 699 error = bus_translate_resource(dev, rle->type, rle->start, &newstart); 700 if (error != 0) { 701 device_printf(pdev->dev.bsddev, 702 "translate of %#jx failed: %d\n", 703 (uintmax_t)rle->start, error); 704 return (0); 705 } 706 return (newstart); 707 } 708 709 unsigned long 710 pci_resource_len(struct pci_dev *pdev, int bar) 711 { 712 struct resource_list_entry *rle; 713 714 if ((rle = linux_pci_get_bar(pdev, bar, true)) == NULL) 715 return (0); 716 return (rle->count); 717 } 718 719 int 720 linux_pci_register_drm_driver(struct pci_driver *pdrv) 721 { 722 devclass_t dc; 723 724 dc = devclass_create("vgapci"); 725 if (dc == NULL) 726 return (-ENXIO); 727 pdrv->isdrm = true; 728 pdrv->name = "drmn"; 729 return (_linux_pci_register_driver(pdrv, dc)); 730 } 731 732 void 733 linux_pci_unregister_driver(struct pci_driver *pdrv) 734 { 735 devclass_t bus; 736 737 bus = devclass_find("pci"); 738 739 spin_lock(&pci_lock); 740 list_del(&pdrv->node); 741 spin_unlock(&pci_lock); 742 mtx_lock(&Giant); 743 if (bus != NULL) 744 devclass_delete_driver(bus, &pdrv->bsddriver); 745 mtx_unlock(&Giant); 746 } 747 748 void 749 linux_pci_unregister_drm_driver(struct pci_driver *pdrv) 750 { 751 devclass_t bus; 752 753 bus = devclass_find("vgapci"); 754 755 spin_lock(&pci_lock); 756 list_del(&pdrv->node); 757 spin_unlock(&pci_lock); 758 mtx_lock(&Giant); 759 if (bus != NULL) 760 devclass_delete_driver(bus, &pdrv->bsddriver); 761 mtx_unlock(&Giant); 762 } 763 764 CTASSERT(sizeof(dma_addr_t) <= sizeof(uint64_t)); 765 766 struct linux_dma_obj { 767 void *vaddr; 768 uint64_t dma_addr; 769 bus_dmamap_t dmamap; 770 bus_dma_tag_t dmat; 771 }; 772 773 static uma_zone_t linux_dma_trie_zone; 774 static uma_zone_t linux_dma_obj_zone; 775 776 static void 777 linux_dma_init(void *arg) 778 { 779 780 linux_dma_trie_zone = uma_zcreate("linux_dma_pctrie", 781 pctrie_node_size(), NULL, NULL, pctrie_zone_init, NULL, 782 UMA_ALIGN_PTR, 0); 783 linux_dma_obj_zone = uma_zcreate("linux_dma_object", 784 sizeof(struct linux_dma_obj), NULL, NULL, NULL, NULL, 785 UMA_ALIGN_PTR, 0); 786 787 } 788 SYSINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_init, NULL); 789 790 static void 791 linux_dma_uninit(void *arg) 792 { 793 794 uma_zdestroy(linux_dma_obj_zone); 795 uma_zdestroy(linux_dma_trie_zone); 796 } 797 SYSUNINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_uninit, NULL); 798 799 static void * 800 linux_dma_trie_alloc(struct pctrie *ptree) 801 { 802 803 return (uma_zalloc(linux_dma_trie_zone, M_NOWAIT)); 804 } 805 806 static void 807 linux_dma_trie_free(struct pctrie *ptree, void *node) 808 { 809 810 uma_zfree(linux_dma_trie_zone, node); 811 } 812 813 PCTRIE_DEFINE(LINUX_DMA, linux_dma_obj, dma_addr, linux_dma_trie_alloc, 814 linux_dma_trie_free); 815 816 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 817 static dma_addr_t 818 linux_dma_map_phys_common(struct device *dev, vm_paddr_t phys, size_t len, 819 bus_dma_tag_t dmat) 820 { 821 struct linux_dma_priv *priv; 822 struct linux_dma_obj *obj; 823 int error, nseg; 824 bus_dma_segment_t seg; 825 826 priv = dev->dma_priv; 827 828 /* 829 * If the resultant mapping will be entirely 1:1 with the 830 * physical address, short-circuit the remainder of the 831 * bus_dma API. This avoids tracking collisions in the pctrie 832 * with the additional benefit of reducing overhead. 833 */ 834 if (bus_dma_id_mapped(dmat, phys, len)) 835 return (phys); 836 837 obj = uma_zalloc(linux_dma_obj_zone, M_NOWAIT); 838 if (obj == NULL) { 839 return (0); 840 } 841 obj->dmat = dmat; 842 843 DMA_PRIV_LOCK(priv); 844 if (bus_dmamap_create(obj->dmat, 0, &obj->dmamap) != 0) { 845 DMA_PRIV_UNLOCK(priv); 846 uma_zfree(linux_dma_obj_zone, obj); 847 return (0); 848 } 849 850 nseg = -1; 851 if (_bus_dmamap_load_phys(obj->dmat, obj->dmamap, phys, len, 852 BUS_DMA_NOWAIT, &seg, &nseg) != 0) { 853 bus_dmamap_destroy(obj->dmat, obj->dmamap); 854 DMA_PRIV_UNLOCK(priv); 855 uma_zfree(linux_dma_obj_zone, obj); 856 return (0); 857 } 858 859 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); 860 obj->dma_addr = seg.ds_addr; 861 862 error = LINUX_DMA_PCTRIE_INSERT(&priv->ptree, obj); 863 if (error != 0) { 864 bus_dmamap_unload(obj->dmat, obj->dmamap); 865 bus_dmamap_destroy(obj->dmat, obj->dmamap); 866 DMA_PRIV_UNLOCK(priv); 867 uma_zfree(linux_dma_obj_zone, obj); 868 return (0); 869 } 870 DMA_PRIV_UNLOCK(priv); 871 return (obj->dma_addr); 872 } 873 #else 874 static dma_addr_t 875 linux_dma_map_phys_common(struct device *dev __unused, vm_paddr_t phys, 876 size_t len __unused, bus_dma_tag_t dmat __unused) 877 { 878 return (phys); 879 } 880 #endif 881 882 dma_addr_t 883 linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len) 884 { 885 struct linux_dma_priv *priv; 886 887 priv = dev->dma_priv; 888 return (linux_dma_map_phys_common(dev, phys, len, priv->dmat)); 889 } 890 891 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 892 void 893 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len) 894 { 895 struct linux_dma_priv *priv; 896 struct linux_dma_obj *obj; 897 898 priv = dev->dma_priv; 899 900 if (pctrie_is_empty(&priv->ptree)) 901 return; 902 903 DMA_PRIV_LOCK(priv); 904 obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr); 905 if (obj == NULL) { 906 DMA_PRIV_UNLOCK(priv); 907 return; 908 } 909 LINUX_DMA_PCTRIE_REMOVE(&priv->ptree, dma_addr); 910 bus_dmamap_unload(obj->dmat, obj->dmamap); 911 bus_dmamap_destroy(obj->dmat, obj->dmamap); 912 DMA_PRIV_UNLOCK(priv); 913 914 uma_zfree(linux_dma_obj_zone, obj); 915 } 916 #else 917 void 918 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len) 919 { 920 } 921 #endif 922 923 void * 924 linux_dma_alloc_coherent(struct device *dev, size_t size, 925 dma_addr_t *dma_handle, gfp_t flag) 926 { 927 struct linux_dma_priv *priv; 928 vm_paddr_t high; 929 size_t align; 930 void *mem; 931 932 if (dev == NULL || dev->dma_priv == NULL) { 933 *dma_handle = 0; 934 return (NULL); 935 } 936 priv = dev->dma_priv; 937 if (priv->dma_coherent_mask) 938 high = priv->dma_coherent_mask; 939 else 940 /* Coherent is lower 32bit only by default in Linux. */ 941 high = BUS_SPACE_MAXADDR_32BIT; 942 align = PAGE_SIZE << get_order(size); 943 /* Always zero the allocation. */ 944 flag |= M_ZERO; 945 mem = (void *)kmem_alloc_contig(size, flag & GFP_NATIVE_MASK, 0, high, 946 align, 0, VM_MEMATTR_DEFAULT); 947 if (mem != NULL) { 948 *dma_handle = linux_dma_map_phys_common(dev, vtophys(mem), size, 949 priv->dmat_coherent); 950 if (*dma_handle == 0) { 951 kmem_free((vm_offset_t)mem, size); 952 mem = NULL; 953 } 954 } else { 955 *dma_handle = 0; 956 } 957 return (mem); 958 } 959 960 int 961 linux_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents, 962 enum dma_data_direction dir __unused, unsigned long attrs __unused) 963 { 964 struct linux_dma_priv *priv; 965 struct scatterlist *sg; 966 int i, nseg; 967 bus_dma_segment_t seg; 968 969 priv = dev->dma_priv; 970 971 DMA_PRIV_LOCK(priv); 972 973 /* create common DMA map in the first S/G entry */ 974 if (bus_dmamap_create(priv->dmat, 0, &sgl->dma_map) != 0) { 975 DMA_PRIV_UNLOCK(priv); 976 return (0); 977 } 978 979 /* load all S/G list entries */ 980 for_each_sg(sgl, sg, nents, i) { 981 nseg = -1; 982 if (_bus_dmamap_load_phys(priv->dmat, sgl->dma_map, 983 sg_phys(sg), sg->length, BUS_DMA_NOWAIT, 984 &seg, &nseg) != 0) { 985 bus_dmamap_unload(priv->dmat, sgl->dma_map); 986 bus_dmamap_destroy(priv->dmat, sgl->dma_map); 987 DMA_PRIV_UNLOCK(priv); 988 return (0); 989 } 990 KASSERT(nseg == 0, 991 ("More than one segment (nseg=%d)", nseg + 1)); 992 993 sg_dma_address(sg) = seg.ds_addr; 994 } 995 DMA_PRIV_UNLOCK(priv); 996 997 return (nents); 998 } 999 1000 void 1001 linux_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, 1002 int nents __unused, enum dma_data_direction dir __unused, 1003 unsigned long attrs __unused) 1004 { 1005 struct linux_dma_priv *priv; 1006 1007 priv = dev->dma_priv; 1008 1009 DMA_PRIV_LOCK(priv); 1010 bus_dmamap_unload(priv->dmat, sgl->dma_map); 1011 bus_dmamap_destroy(priv->dmat, sgl->dma_map); 1012 DMA_PRIV_UNLOCK(priv); 1013 } 1014 1015 struct dma_pool { 1016 struct device *pool_device; 1017 uma_zone_t pool_zone; 1018 struct mtx pool_lock; 1019 bus_dma_tag_t pool_dmat; 1020 size_t pool_entry_size; 1021 struct pctrie pool_ptree; 1022 }; 1023 1024 #define DMA_POOL_LOCK(pool) mtx_lock(&(pool)->pool_lock) 1025 #define DMA_POOL_UNLOCK(pool) mtx_unlock(&(pool)->pool_lock) 1026 1027 static inline int 1028 dma_pool_obj_ctor(void *mem, int size, void *arg, int flags) 1029 { 1030 struct linux_dma_obj *obj = mem; 1031 struct dma_pool *pool = arg; 1032 int error, nseg; 1033 bus_dma_segment_t seg; 1034 1035 nseg = -1; 1036 DMA_POOL_LOCK(pool); 1037 error = _bus_dmamap_load_phys(pool->pool_dmat, obj->dmamap, 1038 vtophys(obj->vaddr), pool->pool_entry_size, BUS_DMA_NOWAIT, 1039 &seg, &nseg); 1040 DMA_POOL_UNLOCK(pool); 1041 if (error != 0) { 1042 return (error); 1043 } 1044 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); 1045 obj->dma_addr = seg.ds_addr; 1046 1047 return (0); 1048 } 1049 1050 static void 1051 dma_pool_obj_dtor(void *mem, int size, void *arg) 1052 { 1053 struct linux_dma_obj *obj = mem; 1054 struct dma_pool *pool = arg; 1055 1056 DMA_POOL_LOCK(pool); 1057 bus_dmamap_unload(pool->pool_dmat, obj->dmamap); 1058 DMA_POOL_UNLOCK(pool); 1059 } 1060 1061 static int 1062 dma_pool_obj_import(void *arg, void **store, int count, int domain __unused, 1063 int flags) 1064 { 1065 struct dma_pool *pool = arg; 1066 struct linux_dma_priv *priv; 1067 struct linux_dma_obj *obj; 1068 int error, i; 1069 1070 priv = pool->pool_device->dma_priv; 1071 for (i = 0; i < count; i++) { 1072 obj = uma_zalloc(linux_dma_obj_zone, flags); 1073 if (obj == NULL) 1074 break; 1075 1076 error = bus_dmamem_alloc(pool->pool_dmat, &obj->vaddr, 1077 BUS_DMA_NOWAIT, &obj->dmamap); 1078 if (error!= 0) { 1079 uma_zfree(linux_dma_obj_zone, obj); 1080 break; 1081 } 1082 1083 store[i] = obj; 1084 } 1085 1086 return (i); 1087 } 1088 1089 static void 1090 dma_pool_obj_release(void *arg, void **store, int count) 1091 { 1092 struct dma_pool *pool = arg; 1093 struct linux_dma_priv *priv; 1094 struct linux_dma_obj *obj; 1095 int i; 1096 1097 priv = pool->pool_device->dma_priv; 1098 for (i = 0; i < count; i++) { 1099 obj = store[i]; 1100 bus_dmamem_free(pool->pool_dmat, obj->vaddr, obj->dmamap); 1101 uma_zfree(linux_dma_obj_zone, obj); 1102 } 1103 } 1104 1105 struct dma_pool * 1106 linux_dma_pool_create(char *name, struct device *dev, size_t size, 1107 size_t align, size_t boundary) 1108 { 1109 struct linux_dma_priv *priv; 1110 struct dma_pool *pool; 1111 1112 priv = dev->dma_priv; 1113 1114 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 1115 pool->pool_device = dev; 1116 pool->pool_entry_size = size; 1117 1118 if (bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 1119 align, boundary, /* alignment, boundary */ 1120 priv->dma_mask, /* lowaddr */ 1121 BUS_SPACE_MAXADDR, /* highaddr */ 1122 NULL, NULL, /* filtfunc, filtfuncarg */ 1123 size, /* maxsize */ 1124 1, /* nsegments */ 1125 size, /* maxsegsz */ 1126 0, /* flags */ 1127 NULL, NULL, /* lockfunc, lockfuncarg */ 1128 &pool->pool_dmat)) { 1129 kfree(pool); 1130 return (NULL); 1131 } 1132 1133 pool->pool_zone = uma_zcache_create(name, -1, dma_pool_obj_ctor, 1134 dma_pool_obj_dtor, NULL, NULL, dma_pool_obj_import, 1135 dma_pool_obj_release, pool, 0); 1136 1137 mtx_init(&pool->pool_lock, "lkpi-dma-pool", NULL, MTX_DEF); 1138 pctrie_init(&pool->pool_ptree); 1139 1140 return (pool); 1141 } 1142 1143 void 1144 linux_dma_pool_destroy(struct dma_pool *pool) 1145 { 1146 1147 uma_zdestroy(pool->pool_zone); 1148 bus_dma_tag_destroy(pool->pool_dmat); 1149 mtx_destroy(&pool->pool_lock); 1150 kfree(pool); 1151 } 1152 1153 void 1154 lkpi_dmam_pool_destroy(struct device *dev, void *p) 1155 { 1156 struct dma_pool *pool; 1157 1158 pool = *(struct dma_pool **)p; 1159 LINUX_DMA_PCTRIE_RECLAIM(&pool->pool_ptree); 1160 linux_dma_pool_destroy(pool); 1161 } 1162 1163 void * 1164 linux_dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, 1165 dma_addr_t *handle) 1166 { 1167 struct linux_dma_obj *obj; 1168 1169 obj = uma_zalloc_arg(pool->pool_zone, pool, mem_flags & GFP_NATIVE_MASK); 1170 if (obj == NULL) 1171 return (NULL); 1172 1173 DMA_POOL_LOCK(pool); 1174 if (LINUX_DMA_PCTRIE_INSERT(&pool->pool_ptree, obj) != 0) { 1175 DMA_POOL_UNLOCK(pool); 1176 uma_zfree_arg(pool->pool_zone, obj, pool); 1177 return (NULL); 1178 } 1179 DMA_POOL_UNLOCK(pool); 1180 1181 *handle = obj->dma_addr; 1182 return (obj->vaddr); 1183 } 1184 1185 void 1186 linux_dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma_addr) 1187 { 1188 struct linux_dma_obj *obj; 1189 1190 DMA_POOL_LOCK(pool); 1191 obj = LINUX_DMA_PCTRIE_LOOKUP(&pool->pool_ptree, dma_addr); 1192 if (obj == NULL) { 1193 DMA_POOL_UNLOCK(pool); 1194 return; 1195 } 1196 LINUX_DMA_PCTRIE_REMOVE(&pool->pool_ptree, dma_addr); 1197 DMA_POOL_UNLOCK(pool); 1198 1199 uma_zfree_arg(pool->pool_zone, obj, pool); 1200 } 1201 1202 static int 1203 linux_backlight_get_status(device_t dev, struct backlight_props *props) 1204 { 1205 struct pci_dev *pdev; 1206 1207 linux_set_current(curthread); 1208 pdev = device_get_softc(dev); 1209 1210 props->brightness = pdev->dev.bd->props.brightness; 1211 props->brightness = props->brightness * 100 / pdev->dev.bd->props.max_brightness; 1212 props->nlevels = 0; 1213 1214 return (0); 1215 } 1216 1217 static int 1218 linux_backlight_get_info(device_t dev, struct backlight_info *info) 1219 { 1220 struct pci_dev *pdev; 1221 1222 linux_set_current(curthread); 1223 pdev = device_get_softc(dev); 1224 1225 info->type = BACKLIGHT_TYPE_PANEL; 1226 strlcpy(info->name, pdev->dev.bd->name, BACKLIGHTMAXNAMELENGTH); 1227 return (0); 1228 } 1229 1230 static int 1231 linux_backlight_update_status(device_t dev, struct backlight_props *props) 1232 { 1233 struct pci_dev *pdev; 1234 1235 linux_set_current(curthread); 1236 pdev = device_get_softc(dev); 1237 1238 pdev->dev.bd->props.brightness = pdev->dev.bd->props.max_brightness * 1239 props->brightness / 100; 1240 pdev->dev.bd->props.power = props->brightness == 0 ? 1241 4/* FB_BLANK_POWERDOWN */ : 0/* FB_BLANK_UNBLANK */; 1242 return (pdev->dev.bd->ops->update_status(pdev->dev.bd)); 1243 } 1244 1245 struct backlight_device * 1246 linux_backlight_device_register(const char *name, struct device *dev, 1247 void *data, const struct backlight_ops *ops, struct backlight_properties *props) 1248 { 1249 1250 dev->bd = malloc(sizeof(*dev->bd), M_DEVBUF, M_WAITOK | M_ZERO); 1251 dev->bd->ops = ops; 1252 dev->bd->props.type = props->type; 1253 dev->bd->props.max_brightness = props->max_brightness; 1254 dev->bd->props.brightness = props->brightness; 1255 dev->bd->props.power = props->power; 1256 dev->bd->data = data; 1257 dev->bd->dev = dev; 1258 dev->bd->name = strdup(name, M_DEVBUF); 1259 1260 dev->backlight_dev = backlight_register(name, dev->bsddev); 1261 1262 return (dev->bd); 1263 } 1264 1265 void 1266 linux_backlight_device_unregister(struct backlight_device *bd) 1267 { 1268 1269 backlight_destroy(bd->dev->backlight_dev); 1270 free(bd->name, M_DEVBUF); 1271 free(bd, M_DEVBUF); 1272 } 1273