1 /*- 2 * Copyright (c) 2015-2016 Mellanox Technologies, Ltd. 3 * All rights reserved. 4 * Copyright (c) 2020-2021 The FreeBSD Foundation 5 * 6 * Portions of this software were developed by Björn Zeeb 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice unmodified, this list of conditions, and the following 14 * disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/param.h> 35 #include <sys/systm.h> 36 #include <sys/bus.h> 37 #include <sys/malloc.h> 38 #include <sys/kernel.h> 39 #include <sys/sysctl.h> 40 #include <sys/lock.h> 41 #include <sys/mutex.h> 42 #include <sys/fcntl.h> 43 #include <sys/file.h> 44 #include <sys/filio.h> 45 #include <sys/pciio.h> 46 #include <sys/pctrie.h> 47 #include <sys/rwlock.h> 48 49 #include <vm/vm.h> 50 #include <vm/pmap.h> 51 52 #include <machine/stdarg.h> 53 54 #include <dev/pci/pcivar.h> 55 #include <dev/pci/pci_private.h> 56 #include <dev/pci/pci_iov.h> 57 #include <dev/backlight/backlight.h> 58 59 #include <linux/kobject.h> 60 #include <linux/device.h> 61 #include <linux/slab.h> 62 #include <linux/module.h> 63 #include <linux/cdev.h> 64 #include <linux/file.h> 65 #include <linux/sysfs.h> 66 #include <linux/mm.h> 67 #include <linux/io.h> 68 #include <linux/vmalloc.h> 69 #include <linux/pci.h> 70 #include <linux/compat.h> 71 72 #include <linux/backlight.h> 73 74 #include "backlight_if.h" 75 #include "pcib_if.h" 76 77 /* Undef the linux function macro defined in linux/pci.h */ 78 #undef pci_get_class 79 80 static device_probe_t linux_pci_probe; 81 static device_attach_t linux_pci_attach; 82 static device_detach_t linux_pci_detach; 83 static device_suspend_t linux_pci_suspend; 84 static device_resume_t linux_pci_resume; 85 static device_shutdown_t linux_pci_shutdown; 86 static pci_iov_init_t linux_pci_iov_init; 87 static pci_iov_uninit_t linux_pci_iov_uninit; 88 static pci_iov_add_vf_t linux_pci_iov_add_vf; 89 static int linux_backlight_get_status(device_t dev, struct backlight_props *props); 90 static int linux_backlight_update_status(device_t dev, struct backlight_props *props); 91 static int linux_backlight_get_info(device_t dev, struct backlight_info *info); 92 93 static device_method_t pci_methods[] = { 94 DEVMETHOD(device_probe, linux_pci_probe), 95 DEVMETHOD(device_attach, linux_pci_attach), 96 DEVMETHOD(device_detach, linux_pci_detach), 97 DEVMETHOD(device_suspend, linux_pci_suspend), 98 DEVMETHOD(device_resume, linux_pci_resume), 99 DEVMETHOD(device_shutdown, linux_pci_shutdown), 100 DEVMETHOD(pci_iov_init, linux_pci_iov_init), 101 DEVMETHOD(pci_iov_uninit, linux_pci_iov_uninit), 102 DEVMETHOD(pci_iov_add_vf, linux_pci_iov_add_vf), 103 104 /* backlight interface */ 105 DEVMETHOD(backlight_update_status, linux_backlight_update_status), 106 DEVMETHOD(backlight_get_status, linux_backlight_get_status), 107 DEVMETHOD(backlight_get_info, linux_backlight_get_info), 108 DEVMETHOD_END 109 }; 110 111 struct linux_dma_priv { 112 uint64_t dma_mask; 113 struct mtx lock; 114 bus_dma_tag_t dmat; 115 struct pctrie ptree; 116 }; 117 #define DMA_PRIV_LOCK(priv) mtx_lock(&(priv)->lock) 118 #define DMA_PRIV_UNLOCK(priv) mtx_unlock(&(priv)->lock) 119 120 static int 121 linux_pdev_dma_init(struct pci_dev *pdev) 122 { 123 struct linux_dma_priv *priv; 124 int error; 125 126 priv = malloc(sizeof(*priv), M_DEVBUF, M_WAITOK | M_ZERO); 127 pdev->dev.dma_priv = priv; 128 129 mtx_init(&priv->lock, "lkpi-priv-dma", NULL, MTX_DEF); 130 131 pctrie_init(&priv->ptree); 132 133 /* create a default DMA tag */ 134 error = linux_dma_tag_init(&pdev->dev, DMA_BIT_MASK(64)); 135 if (error) { 136 mtx_destroy(&priv->lock); 137 free(priv, M_DEVBUF); 138 pdev->dev.dma_priv = NULL; 139 } 140 return (error); 141 } 142 143 static int 144 linux_pdev_dma_uninit(struct pci_dev *pdev) 145 { 146 struct linux_dma_priv *priv; 147 148 priv = pdev->dev.dma_priv; 149 if (priv->dmat) 150 bus_dma_tag_destroy(priv->dmat); 151 mtx_destroy(&priv->lock); 152 free(priv, M_DEVBUF); 153 pdev->dev.dma_priv = NULL; 154 return (0); 155 } 156 157 int 158 linux_dma_tag_init(struct device *dev, u64 dma_mask) 159 { 160 struct linux_dma_priv *priv; 161 int error; 162 163 priv = dev->dma_priv; 164 165 if (priv->dmat) { 166 if (priv->dma_mask == dma_mask) 167 return (0); 168 169 bus_dma_tag_destroy(priv->dmat); 170 } 171 172 priv->dma_mask = dma_mask; 173 174 error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 175 1, 0, /* alignment, boundary */ 176 dma_mask, /* lowaddr */ 177 BUS_SPACE_MAXADDR, /* highaddr */ 178 NULL, NULL, /* filtfunc, filtfuncarg */ 179 BUS_SPACE_MAXSIZE, /* maxsize */ 180 1, /* nsegments */ 181 BUS_SPACE_MAXSIZE, /* maxsegsz */ 182 0, /* flags */ 183 NULL, NULL, /* lockfunc, lockfuncarg */ 184 &priv->dmat); 185 return (-error); 186 } 187 188 static struct pci_driver * 189 linux_pci_find(device_t dev, const struct pci_device_id **idp) 190 { 191 const struct pci_device_id *id; 192 struct pci_driver *pdrv; 193 uint16_t vendor; 194 uint16_t device; 195 uint16_t subvendor; 196 uint16_t subdevice; 197 198 vendor = pci_get_vendor(dev); 199 device = pci_get_device(dev); 200 subvendor = pci_get_subvendor(dev); 201 subdevice = pci_get_subdevice(dev); 202 203 spin_lock(&pci_lock); 204 list_for_each_entry(pdrv, &pci_drivers, links) { 205 for (id = pdrv->id_table; id->vendor != 0; id++) { 206 if (vendor == id->vendor && 207 (PCI_ANY_ID == id->device || device == id->device) && 208 (PCI_ANY_ID == id->subvendor || subvendor == id->subvendor) && 209 (PCI_ANY_ID == id->subdevice || subdevice == id->subdevice)) { 210 *idp = id; 211 spin_unlock(&pci_lock); 212 return (pdrv); 213 } 214 } 215 } 216 spin_unlock(&pci_lock); 217 return (NULL); 218 } 219 220 static void 221 lkpi_pci_dev_release(struct device *dev) 222 { 223 224 lkpi_devres_release_free_list(dev); 225 spin_lock_destroy(&dev->devres_lock); 226 } 227 228 static void 229 lkpifill_pci_dev(device_t dev, struct pci_dev *pdev) 230 { 231 232 pdev->devfn = PCI_DEVFN(pci_get_slot(dev), pci_get_function(dev)); 233 pdev->vendor = pci_get_vendor(dev); 234 pdev->device = pci_get_device(dev); 235 pdev->subsystem_vendor = pci_get_subvendor(dev); 236 pdev->subsystem_device = pci_get_subdevice(dev); 237 pdev->class = pci_get_class(dev); 238 pdev->revision = pci_get_revid(dev); 239 pdev->bus = malloc(sizeof(*pdev->bus), M_DEVBUF, M_WAITOK | M_ZERO); 240 pdev->bus->self = pdev; 241 pdev->bus->number = pci_get_bus(dev); 242 pdev->bus->domain = pci_get_domain(dev); 243 pdev->dev.bsddev = dev; 244 pdev->dev.parent = &linux_root_device; 245 pdev->dev.release = lkpi_pci_dev_release; 246 INIT_LIST_HEAD(&pdev->dev.irqents); 247 kobject_init(&pdev->dev.kobj, &linux_dev_ktype); 248 kobject_set_name(&pdev->dev.kobj, device_get_nameunit(dev)); 249 kobject_add(&pdev->dev.kobj, &linux_root_device.kobj, 250 kobject_name(&pdev->dev.kobj)); 251 spin_lock_init(&pdev->dev.devres_lock); 252 INIT_LIST_HEAD(&pdev->dev.devres_head); 253 } 254 255 static void 256 lkpinew_pci_dev_release(struct device *dev) 257 { 258 struct pci_dev *pdev; 259 260 pdev = to_pci_dev(dev); 261 if (pdev->root != NULL) 262 pci_dev_put(pdev->root); 263 free(pdev->bus, M_DEVBUF); 264 free(pdev, M_DEVBUF); 265 } 266 267 struct pci_dev * 268 lkpinew_pci_dev(device_t dev) 269 { 270 struct pci_dev *pdev; 271 272 pdev = malloc(sizeof(*pdev), M_DEVBUF, M_WAITOK|M_ZERO); 273 lkpifill_pci_dev(dev, pdev); 274 pdev->dev.release = lkpinew_pci_dev_release; 275 276 return (pdev); 277 } 278 279 struct pci_dev * 280 lkpi_pci_get_class(unsigned int class, struct pci_dev *from) 281 { 282 device_t dev; 283 device_t devfrom = NULL; 284 struct pci_dev *pdev; 285 286 if (from != NULL) 287 devfrom = from->dev.bsddev; 288 289 dev = pci_find_class_from(class >> 16, (class >> 8) & 0xFF, devfrom); 290 if (dev == NULL) 291 return (NULL); 292 293 pdev = lkpinew_pci_dev(dev); 294 return (pdev); 295 } 296 297 struct pci_dev * 298 lkpi_pci_get_domain_bus_and_slot(int domain, unsigned int bus, 299 unsigned int devfn) 300 { 301 device_t dev; 302 struct pci_dev *pdev; 303 304 dev = pci_find_dbsf(domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); 305 if (dev == NULL) 306 return (NULL); 307 308 pdev = lkpinew_pci_dev(dev); 309 return (pdev); 310 } 311 312 static int 313 linux_pci_probe(device_t dev) 314 { 315 const struct pci_device_id *id; 316 struct pci_driver *pdrv; 317 318 if ((pdrv = linux_pci_find(dev, &id)) == NULL) 319 return (ENXIO); 320 if (device_get_driver(dev) != &pdrv->bsddriver) 321 return (ENXIO); 322 device_set_desc(dev, pdrv->name); 323 return (0); 324 } 325 326 static int 327 linux_pci_attach(device_t dev) 328 { 329 const struct pci_device_id *id; 330 struct pci_driver *pdrv; 331 struct pci_dev *pdev; 332 333 pdrv = linux_pci_find(dev, &id); 334 pdev = device_get_softc(dev); 335 336 MPASS(pdrv != NULL); 337 MPASS(pdev != NULL); 338 339 return (linux_pci_attach_device(dev, pdrv, id, pdev)); 340 } 341 342 int 343 linux_pci_attach_device(device_t dev, struct pci_driver *pdrv, 344 const struct pci_device_id *id, struct pci_dev *pdev) 345 { 346 struct resource_list_entry *rle; 347 device_t parent; 348 uintptr_t rid; 349 int error; 350 bool isdrm; 351 352 linux_set_current(curthread); 353 354 parent = device_get_parent(dev); 355 isdrm = pdrv != NULL && pdrv->isdrm; 356 357 if (isdrm) { 358 struct pci_devinfo *dinfo; 359 360 dinfo = device_get_ivars(parent); 361 device_set_ivars(dev, dinfo); 362 } 363 364 lkpifill_pci_dev(dev, pdev); 365 if (isdrm) 366 PCI_GET_ID(device_get_parent(parent), parent, PCI_ID_RID, &rid); 367 else 368 PCI_GET_ID(parent, dev, PCI_ID_RID, &rid); 369 pdev->devfn = rid; 370 pdev->pdrv = pdrv; 371 rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 0); 372 if (rle != NULL) 373 pdev->dev.irq = rle->start; 374 else 375 pdev->dev.irq = LINUX_IRQ_INVALID; 376 pdev->irq = pdev->dev.irq; 377 error = linux_pdev_dma_init(pdev); 378 if (error) 379 goto out_dma_init; 380 381 TAILQ_INIT(&pdev->mmio); 382 383 spin_lock(&pci_lock); 384 list_add(&pdev->links, &pci_devices); 385 spin_unlock(&pci_lock); 386 387 if (pdrv != NULL) { 388 error = pdrv->probe(pdev, id); 389 if (error) 390 goto out_probe; 391 } 392 return (0); 393 394 out_probe: 395 free(pdev->bus, M_DEVBUF); 396 linux_pdev_dma_uninit(pdev); 397 out_dma_init: 398 spin_lock(&pci_lock); 399 list_del(&pdev->links); 400 spin_unlock(&pci_lock); 401 put_device(&pdev->dev); 402 return (-error); 403 } 404 405 static int 406 linux_pci_detach(device_t dev) 407 { 408 struct pci_dev *pdev; 409 410 pdev = device_get_softc(dev); 411 412 MPASS(pdev != NULL); 413 414 device_set_desc(dev, NULL); 415 416 return (linux_pci_detach_device(pdev)); 417 } 418 419 int 420 linux_pci_detach_device(struct pci_dev *pdev) 421 { 422 423 linux_set_current(curthread); 424 425 if (pdev->pdrv != NULL) 426 pdev->pdrv->remove(pdev); 427 428 if (pdev->root != NULL) 429 pci_dev_put(pdev->root); 430 free(pdev->bus, M_DEVBUF); 431 linux_pdev_dma_uninit(pdev); 432 433 spin_lock(&pci_lock); 434 list_del(&pdev->links); 435 spin_unlock(&pci_lock); 436 put_device(&pdev->dev); 437 438 return (0); 439 } 440 441 static int 442 lkpi_pci_disable_dev(struct device *dev) 443 { 444 445 (void) pci_disable_io(dev->bsddev, SYS_RES_MEMORY); 446 (void) pci_disable_io(dev->bsddev, SYS_RES_IOPORT); 447 return (0); 448 } 449 450 void 451 lkpi_pci_devres_release(struct device *dev, void *p) 452 { 453 struct pci_devres *dr; 454 struct pci_dev *pdev; 455 int bar; 456 457 pdev = to_pci_dev(dev); 458 dr = p; 459 460 if (pdev->msix_enabled) 461 lkpi_pci_disable_msix(pdev); 462 if (pdev->msi_enabled) 463 lkpi_pci_disable_msi(pdev); 464 465 if (dr->enable_io && lkpi_pci_disable_dev(dev) == 0) 466 dr->enable_io = false; 467 468 if (dr->region_mask == 0) 469 return; 470 for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) { 471 472 if ((dr->region_mask & (1 << bar)) == 0) 473 continue; 474 pci_release_region(pdev, bar); 475 } 476 } 477 478 void 479 lkpi_pcim_iomap_table_release(struct device *dev, void *p) 480 { 481 struct pcim_iomap_devres *dr; 482 struct pci_dev *pdev; 483 int bar; 484 485 dr = p; 486 pdev = to_pci_dev(dev); 487 for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) { 488 489 if (dr->mmio_table[bar] == NULL) 490 continue; 491 492 pci_iounmap(pdev, dr->mmio_table[bar]); 493 } 494 } 495 496 static int 497 linux_pci_suspend(device_t dev) 498 { 499 const struct dev_pm_ops *pmops; 500 struct pm_message pm = { }; 501 struct pci_dev *pdev; 502 int error; 503 504 error = 0; 505 linux_set_current(curthread); 506 pdev = device_get_softc(dev); 507 pmops = pdev->pdrv->driver.pm; 508 509 if (pdev->pdrv->suspend != NULL) 510 error = -pdev->pdrv->suspend(pdev, pm); 511 else if (pmops != NULL && pmops->suspend != NULL) { 512 error = -pmops->suspend(&pdev->dev); 513 if (error == 0 && pmops->suspend_late != NULL) 514 error = -pmops->suspend_late(&pdev->dev); 515 } 516 return (error); 517 } 518 519 static int 520 linux_pci_resume(device_t dev) 521 { 522 const struct dev_pm_ops *pmops; 523 struct pci_dev *pdev; 524 int error; 525 526 error = 0; 527 linux_set_current(curthread); 528 pdev = device_get_softc(dev); 529 pmops = pdev->pdrv->driver.pm; 530 531 if (pdev->pdrv->resume != NULL) 532 error = -pdev->pdrv->resume(pdev); 533 else if (pmops != NULL && pmops->resume != NULL) { 534 if (pmops->resume_early != NULL) 535 error = -pmops->resume_early(&pdev->dev); 536 if (error == 0 && pmops->resume != NULL) 537 error = -pmops->resume(&pdev->dev); 538 } 539 return (error); 540 } 541 542 static int 543 linux_pci_shutdown(device_t dev) 544 { 545 struct pci_dev *pdev; 546 547 linux_set_current(curthread); 548 pdev = device_get_softc(dev); 549 if (pdev->pdrv->shutdown != NULL) 550 pdev->pdrv->shutdown(pdev); 551 return (0); 552 } 553 554 static int 555 linux_pci_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *pf_config) 556 { 557 struct pci_dev *pdev; 558 int error; 559 560 linux_set_current(curthread); 561 pdev = device_get_softc(dev); 562 if (pdev->pdrv->bsd_iov_init != NULL) 563 error = pdev->pdrv->bsd_iov_init(dev, num_vfs, pf_config); 564 else 565 error = EINVAL; 566 return (error); 567 } 568 569 static void 570 linux_pci_iov_uninit(device_t dev) 571 { 572 struct pci_dev *pdev; 573 574 linux_set_current(curthread); 575 pdev = device_get_softc(dev); 576 if (pdev->pdrv->bsd_iov_uninit != NULL) 577 pdev->pdrv->bsd_iov_uninit(dev); 578 } 579 580 static int 581 linux_pci_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *vf_config) 582 { 583 struct pci_dev *pdev; 584 int error; 585 586 linux_set_current(curthread); 587 pdev = device_get_softc(dev); 588 if (pdev->pdrv->bsd_iov_add_vf != NULL) 589 error = pdev->pdrv->bsd_iov_add_vf(dev, vfnum, vf_config); 590 else 591 error = EINVAL; 592 return (error); 593 } 594 595 static int 596 _linux_pci_register_driver(struct pci_driver *pdrv, devclass_t dc) 597 { 598 int error; 599 600 linux_set_current(curthread); 601 spin_lock(&pci_lock); 602 list_add(&pdrv->links, &pci_drivers); 603 spin_unlock(&pci_lock); 604 pdrv->bsddriver.name = pdrv->name; 605 pdrv->bsddriver.methods = pci_methods; 606 pdrv->bsddriver.size = sizeof(struct pci_dev); 607 608 mtx_lock(&Giant); 609 error = devclass_add_driver(dc, &pdrv->bsddriver, 610 BUS_PASS_DEFAULT, &pdrv->bsdclass); 611 mtx_unlock(&Giant); 612 return (-error); 613 } 614 615 int 616 linux_pci_register_driver(struct pci_driver *pdrv) 617 { 618 devclass_t dc; 619 620 dc = devclass_find("pci"); 621 if (dc == NULL) 622 return (-ENXIO); 623 pdrv->isdrm = false; 624 return (_linux_pci_register_driver(pdrv, dc)); 625 } 626 627 unsigned long 628 pci_resource_start(struct pci_dev *pdev, int bar) 629 { 630 struct resource_list_entry *rle; 631 rman_res_t newstart; 632 device_t dev; 633 634 if ((rle = linux_pci_get_bar(pdev, bar)) == NULL) 635 return (0); 636 dev = pdev->pdrv != NULL && pdev->pdrv->isdrm ? 637 device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev; 638 if (BUS_TRANSLATE_RESOURCE(dev, rle->type, rle->start, &newstart)) { 639 device_printf(pdev->dev.bsddev, "translate of %#jx failed\n", 640 (uintmax_t)rle->start); 641 return (0); 642 } 643 return (newstart); 644 } 645 646 unsigned long 647 pci_resource_len(struct pci_dev *pdev, int bar) 648 { 649 struct resource_list_entry *rle; 650 651 if ((rle = linux_pci_get_bar(pdev, bar)) == NULL) 652 return (0); 653 return (rle->count); 654 } 655 656 int 657 linux_pci_register_drm_driver(struct pci_driver *pdrv) 658 { 659 devclass_t dc; 660 661 dc = devclass_create("vgapci"); 662 if (dc == NULL) 663 return (-ENXIO); 664 pdrv->isdrm = true; 665 pdrv->name = "drmn"; 666 return (_linux_pci_register_driver(pdrv, dc)); 667 } 668 669 void 670 linux_pci_unregister_driver(struct pci_driver *pdrv) 671 { 672 devclass_t bus; 673 674 bus = devclass_find("pci"); 675 676 spin_lock(&pci_lock); 677 list_del(&pdrv->links); 678 spin_unlock(&pci_lock); 679 mtx_lock(&Giant); 680 if (bus != NULL) 681 devclass_delete_driver(bus, &pdrv->bsddriver); 682 mtx_unlock(&Giant); 683 } 684 685 void 686 linux_pci_unregister_drm_driver(struct pci_driver *pdrv) 687 { 688 devclass_t bus; 689 690 bus = devclass_find("vgapci"); 691 692 spin_lock(&pci_lock); 693 list_del(&pdrv->links); 694 spin_unlock(&pci_lock); 695 mtx_lock(&Giant); 696 if (bus != NULL) 697 devclass_delete_driver(bus, &pdrv->bsddriver); 698 mtx_unlock(&Giant); 699 } 700 701 CTASSERT(sizeof(dma_addr_t) <= sizeof(uint64_t)); 702 703 struct linux_dma_obj { 704 void *vaddr; 705 uint64_t dma_addr; 706 bus_dmamap_t dmamap; 707 }; 708 709 static uma_zone_t linux_dma_trie_zone; 710 static uma_zone_t linux_dma_obj_zone; 711 712 static void 713 linux_dma_init(void *arg) 714 { 715 716 linux_dma_trie_zone = uma_zcreate("linux_dma_pctrie", 717 pctrie_node_size(), NULL, NULL, pctrie_zone_init, NULL, 718 UMA_ALIGN_PTR, 0); 719 linux_dma_obj_zone = uma_zcreate("linux_dma_object", 720 sizeof(struct linux_dma_obj), NULL, NULL, NULL, NULL, 721 UMA_ALIGN_PTR, 0); 722 723 } 724 SYSINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_init, NULL); 725 726 static void 727 linux_dma_uninit(void *arg) 728 { 729 730 uma_zdestroy(linux_dma_obj_zone); 731 uma_zdestroy(linux_dma_trie_zone); 732 } 733 SYSUNINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_uninit, NULL); 734 735 static void * 736 linux_dma_trie_alloc(struct pctrie *ptree) 737 { 738 739 return (uma_zalloc(linux_dma_trie_zone, M_NOWAIT)); 740 } 741 742 static void 743 linux_dma_trie_free(struct pctrie *ptree, void *node) 744 { 745 746 uma_zfree(linux_dma_trie_zone, node); 747 } 748 749 PCTRIE_DEFINE(LINUX_DMA, linux_dma_obj, dma_addr, linux_dma_trie_alloc, 750 linux_dma_trie_free); 751 752 void * 753 linux_dma_alloc_coherent(struct device *dev, size_t size, 754 dma_addr_t *dma_handle, gfp_t flag) 755 { 756 struct linux_dma_priv *priv; 757 vm_paddr_t high; 758 size_t align; 759 void *mem; 760 761 if (dev == NULL || dev->dma_priv == NULL) { 762 *dma_handle = 0; 763 return (NULL); 764 } 765 priv = dev->dma_priv; 766 if (priv->dma_mask) 767 high = priv->dma_mask; 768 else if (flag & GFP_DMA32) 769 high = BUS_SPACE_MAXADDR_32BIT; 770 else 771 high = BUS_SPACE_MAXADDR; 772 align = PAGE_SIZE << get_order(size); 773 mem = (void *)kmem_alloc_contig(size, flag & GFP_NATIVE_MASK, 0, high, 774 align, 0, VM_MEMATTR_DEFAULT); 775 if (mem != NULL) { 776 *dma_handle = linux_dma_map_phys(dev, vtophys(mem), size); 777 if (*dma_handle == 0) { 778 kmem_free((vm_offset_t)mem, size); 779 mem = NULL; 780 } 781 } else { 782 *dma_handle = 0; 783 } 784 return (mem); 785 } 786 787 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 788 dma_addr_t 789 linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len) 790 { 791 struct linux_dma_priv *priv; 792 struct linux_dma_obj *obj; 793 int error, nseg; 794 bus_dma_segment_t seg; 795 796 priv = dev->dma_priv; 797 798 /* 799 * If the resultant mapping will be entirely 1:1 with the 800 * physical address, short-circuit the remainder of the 801 * bus_dma API. This avoids tracking collisions in the pctrie 802 * with the additional benefit of reducing overhead. 803 */ 804 if (bus_dma_id_mapped(priv->dmat, phys, len)) 805 return (phys); 806 807 obj = uma_zalloc(linux_dma_obj_zone, M_NOWAIT); 808 if (obj == NULL) { 809 return (0); 810 } 811 812 DMA_PRIV_LOCK(priv); 813 if (bus_dmamap_create(priv->dmat, 0, &obj->dmamap) != 0) { 814 DMA_PRIV_UNLOCK(priv); 815 uma_zfree(linux_dma_obj_zone, obj); 816 return (0); 817 } 818 819 nseg = -1; 820 if (_bus_dmamap_load_phys(priv->dmat, obj->dmamap, phys, len, 821 BUS_DMA_NOWAIT, &seg, &nseg) != 0) { 822 bus_dmamap_destroy(priv->dmat, obj->dmamap); 823 DMA_PRIV_UNLOCK(priv); 824 uma_zfree(linux_dma_obj_zone, obj); 825 return (0); 826 } 827 828 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); 829 obj->dma_addr = seg.ds_addr; 830 831 error = LINUX_DMA_PCTRIE_INSERT(&priv->ptree, obj); 832 if (error != 0) { 833 bus_dmamap_unload(priv->dmat, obj->dmamap); 834 bus_dmamap_destroy(priv->dmat, obj->dmamap); 835 DMA_PRIV_UNLOCK(priv); 836 uma_zfree(linux_dma_obj_zone, obj); 837 return (0); 838 } 839 DMA_PRIV_UNLOCK(priv); 840 return (obj->dma_addr); 841 } 842 #else 843 dma_addr_t 844 linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len) 845 { 846 return (phys); 847 } 848 #endif 849 850 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 851 void 852 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len) 853 { 854 struct linux_dma_priv *priv; 855 struct linux_dma_obj *obj; 856 857 priv = dev->dma_priv; 858 859 if (pctrie_is_empty(&priv->ptree)) 860 return; 861 862 DMA_PRIV_LOCK(priv); 863 obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr); 864 if (obj == NULL) { 865 DMA_PRIV_UNLOCK(priv); 866 return; 867 } 868 LINUX_DMA_PCTRIE_REMOVE(&priv->ptree, dma_addr); 869 bus_dmamap_unload(priv->dmat, obj->dmamap); 870 bus_dmamap_destroy(priv->dmat, obj->dmamap); 871 DMA_PRIV_UNLOCK(priv); 872 873 uma_zfree(linux_dma_obj_zone, obj); 874 } 875 #else 876 void 877 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len) 878 { 879 } 880 #endif 881 882 int 883 linux_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents, 884 enum dma_data_direction dir __unused, unsigned long attrs __unused) 885 { 886 struct linux_dma_priv *priv; 887 struct scatterlist *sg; 888 int i, nseg; 889 bus_dma_segment_t seg; 890 891 priv = dev->dma_priv; 892 893 DMA_PRIV_LOCK(priv); 894 895 /* create common DMA map in the first S/G entry */ 896 if (bus_dmamap_create(priv->dmat, 0, &sgl->dma_map) != 0) { 897 DMA_PRIV_UNLOCK(priv); 898 return (0); 899 } 900 901 /* load all S/G list entries */ 902 for_each_sg(sgl, sg, nents, i) { 903 nseg = -1; 904 if (_bus_dmamap_load_phys(priv->dmat, sgl->dma_map, 905 sg_phys(sg), sg->length, BUS_DMA_NOWAIT, 906 &seg, &nseg) != 0) { 907 bus_dmamap_unload(priv->dmat, sgl->dma_map); 908 bus_dmamap_destroy(priv->dmat, sgl->dma_map); 909 DMA_PRIV_UNLOCK(priv); 910 return (0); 911 } 912 KASSERT(nseg == 0, 913 ("More than one segment (nseg=%d)", nseg + 1)); 914 915 sg_dma_address(sg) = seg.ds_addr; 916 } 917 DMA_PRIV_UNLOCK(priv); 918 919 return (nents); 920 } 921 922 void 923 linux_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, 924 int nents __unused, enum dma_data_direction dir __unused, 925 unsigned long attrs __unused) 926 { 927 struct linux_dma_priv *priv; 928 929 priv = dev->dma_priv; 930 931 DMA_PRIV_LOCK(priv); 932 bus_dmamap_unload(priv->dmat, sgl->dma_map); 933 bus_dmamap_destroy(priv->dmat, sgl->dma_map); 934 DMA_PRIV_UNLOCK(priv); 935 } 936 937 struct dma_pool { 938 struct device *pool_device; 939 uma_zone_t pool_zone; 940 struct mtx pool_lock; 941 bus_dma_tag_t pool_dmat; 942 size_t pool_entry_size; 943 struct pctrie pool_ptree; 944 }; 945 946 #define DMA_POOL_LOCK(pool) mtx_lock(&(pool)->pool_lock) 947 #define DMA_POOL_UNLOCK(pool) mtx_unlock(&(pool)->pool_lock) 948 949 static inline int 950 dma_pool_obj_ctor(void *mem, int size, void *arg, int flags) 951 { 952 struct linux_dma_obj *obj = mem; 953 struct dma_pool *pool = arg; 954 int error, nseg; 955 bus_dma_segment_t seg; 956 957 nseg = -1; 958 DMA_POOL_LOCK(pool); 959 error = _bus_dmamap_load_phys(pool->pool_dmat, obj->dmamap, 960 vtophys(obj->vaddr), pool->pool_entry_size, BUS_DMA_NOWAIT, 961 &seg, &nseg); 962 DMA_POOL_UNLOCK(pool); 963 if (error != 0) { 964 return (error); 965 } 966 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); 967 obj->dma_addr = seg.ds_addr; 968 969 return (0); 970 } 971 972 static void 973 dma_pool_obj_dtor(void *mem, int size, void *arg) 974 { 975 struct linux_dma_obj *obj = mem; 976 struct dma_pool *pool = arg; 977 978 DMA_POOL_LOCK(pool); 979 bus_dmamap_unload(pool->pool_dmat, obj->dmamap); 980 DMA_POOL_UNLOCK(pool); 981 } 982 983 static int 984 dma_pool_obj_import(void *arg, void **store, int count, int domain __unused, 985 int flags) 986 { 987 struct dma_pool *pool = arg; 988 struct linux_dma_priv *priv; 989 struct linux_dma_obj *obj; 990 int error, i; 991 992 priv = pool->pool_device->dma_priv; 993 for (i = 0; i < count; i++) { 994 obj = uma_zalloc(linux_dma_obj_zone, flags); 995 if (obj == NULL) 996 break; 997 998 error = bus_dmamem_alloc(pool->pool_dmat, &obj->vaddr, 999 BUS_DMA_NOWAIT, &obj->dmamap); 1000 if (error!= 0) { 1001 uma_zfree(linux_dma_obj_zone, obj); 1002 break; 1003 } 1004 1005 store[i] = obj; 1006 } 1007 1008 return (i); 1009 } 1010 1011 static void 1012 dma_pool_obj_release(void *arg, void **store, int count) 1013 { 1014 struct dma_pool *pool = arg; 1015 struct linux_dma_priv *priv; 1016 struct linux_dma_obj *obj; 1017 int i; 1018 1019 priv = pool->pool_device->dma_priv; 1020 for (i = 0; i < count; i++) { 1021 obj = store[i]; 1022 bus_dmamem_free(pool->pool_dmat, obj->vaddr, obj->dmamap); 1023 uma_zfree(linux_dma_obj_zone, obj); 1024 } 1025 } 1026 1027 struct dma_pool * 1028 linux_dma_pool_create(char *name, struct device *dev, size_t size, 1029 size_t align, size_t boundary) 1030 { 1031 struct linux_dma_priv *priv; 1032 struct dma_pool *pool; 1033 1034 priv = dev->dma_priv; 1035 1036 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 1037 pool->pool_device = dev; 1038 pool->pool_entry_size = size; 1039 1040 if (bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 1041 align, boundary, /* alignment, boundary */ 1042 priv->dma_mask, /* lowaddr */ 1043 BUS_SPACE_MAXADDR, /* highaddr */ 1044 NULL, NULL, /* filtfunc, filtfuncarg */ 1045 size, /* maxsize */ 1046 1, /* nsegments */ 1047 size, /* maxsegsz */ 1048 0, /* flags */ 1049 NULL, NULL, /* lockfunc, lockfuncarg */ 1050 &pool->pool_dmat)) { 1051 kfree(pool); 1052 return (NULL); 1053 } 1054 1055 pool->pool_zone = uma_zcache_create(name, -1, dma_pool_obj_ctor, 1056 dma_pool_obj_dtor, NULL, NULL, dma_pool_obj_import, 1057 dma_pool_obj_release, pool, 0); 1058 1059 mtx_init(&pool->pool_lock, "lkpi-dma-pool", NULL, MTX_DEF); 1060 pctrie_init(&pool->pool_ptree); 1061 1062 return (pool); 1063 } 1064 1065 void 1066 linux_dma_pool_destroy(struct dma_pool *pool) 1067 { 1068 1069 uma_zdestroy(pool->pool_zone); 1070 bus_dma_tag_destroy(pool->pool_dmat); 1071 mtx_destroy(&pool->pool_lock); 1072 kfree(pool); 1073 } 1074 1075 void 1076 lkpi_dmam_pool_destroy(struct device *dev, void *p) 1077 { 1078 struct dma_pool *pool; 1079 1080 pool = *(struct dma_pool **)p; 1081 LINUX_DMA_PCTRIE_RECLAIM(&pool->pool_ptree); 1082 linux_dma_pool_destroy(pool); 1083 } 1084 1085 void * 1086 linux_dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, 1087 dma_addr_t *handle) 1088 { 1089 struct linux_dma_obj *obj; 1090 1091 obj = uma_zalloc_arg(pool->pool_zone, pool, mem_flags & GFP_NATIVE_MASK); 1092 if (obj == NULL) 1093 return (NULL); 1094 1095 DMA_POOL_LOCK(pool); 1096 if (LINUX_DMA_PCTRIE_INSERT(&pool->pool_ptree, obj) != 0) { 1097 DMA_POOL_UNLOCK(pool); 1098 uma_zfree_arg(pool->pool_zone, obj, pool); 1099 return (NULL); 1100 } 1101 DMA_POOL_UNLOCK(pool); 1102 1103 *handle = obj->dma_addr; 1104 return (obj->vaddr); 1105 } 1106 1107 void 1108 linux_dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma_addr) 1109 { 1110 struct linux_dma_obj *obj; 1111 1112 DMA_POOL_LOCK(pool); 1113 obj = LINUX_DMA_PCTRIE_LOOKUP(&pool->pool_ptree, dma_addr); 1114 if (obj == NULL) { 1115 DMA_POOL_UNLOCK(pool); 1116 return; 1117 } 1118 LINUX_DMA_PCTRIE_REMOVE(&pool->pool_ptree, dma_addr); 1119 DMA_POOL_UNLOCK(pool); 1120 1121 uma_zfree_arg(pool->pool_zone, obj, pool); 1122 } 1123 1124 static int 1125 linux_backlight_get_status(device_t dev, struct backlight_props *props) 1126 { 1127 struct pci_dev *pdev; 1128 1129 linux_set_current(curthread); 1130 pdev = device_get_softc(dev); 1131 1132 props->brightness = pdev->dev.bd->props.brightness; 1133 props->brightness = props->brightness * 100 / pdev->dev.bd->props.max_brightness; 1134 props->nlevels = 0; 1135 1136 return (0); 1137 } 1138 1139 static int 1140 linux_backlight_get_info(device_t dev, struct backlight_info *info) 1141 { 1142 struct pci_dev *pdev; 1143 1144 linux_set_current(curthread); 1145 pdev = device_get_softc(dev); 1146 1147 info->type = BACKLIGHT_TYPE_PANEL; 1148 strlcpy(info->name, pdev->dev.bd->name, BACKLIGHTMAXNAMELENGTH); 1149 return (0); 1150 } 1151 1152 static int 1153 linux_backlight_update_status(device_t dev, struct backlight_props *props) 1154 { 1155 struct pci_dev *pdev; 1156 1157 linux_set_current(curthread); 1158 pdev = device_get_softc(dev); 1159 1160 pdev->dev.bd->props.brightness = pdev->dev.bd->props.max_brightness * 1161 props->brightness / 100; 1162 return (pdev->dev.bd->ops->update_status(pdev->dev.bd)); 1163 } 1164 1165 struct backlight_device * 1166 linux_backlight_device_register(const char *name, struct device *dev, 1167 void *data, const struct backlight_ops *ops, struct backlight_properties *props) 1168 { 1169 1170 dev->bd = malloc(sizeof(*dev->bd), M_DEVBUF, M_WAITOK | M_ZERO); 1171 dev->bd->ops = ops; 1172 dev->bd->props.type = props->type; 1173 dev->bd->props.max_brightness = props->max_brightness; 1174 dev->bd->props.brightness = props->brightness; 1175 dev->bd->props.power = props->power; 1176 dev->bd->data = data; 1177 dev->bd->dev = dev; 1178 dev->bd->name = strdup(name, M_DEVBUF); 1179 1180 dev->backlight_dev = backlight_register(name, dev->bsddev); 1181 1182 return (dev->bd); 1183 } 1184 1185 void 1186 linux_backlight_device_unregister(struct backlight_device *bd) 1187 { 1188 1189 backlight_destroy(bd->dev->backlight_dev); 1190 free(bd->name, M_DEVBUF); 1191 free(bd, M_DEVBUF); 1192 } 1193