1 /*- 2 * Copyright (c) 2015-2016 Mellanox Technologies, Ltd. 3 * All rights reserved. 4 * Copyright (c) 2020-2022 The FreeBSD Foundation 5 * 6 * Portions of this software were developed by Björn Zeeb 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice unmodified, this list of conditions, and the following 14 * disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/bus.h> 34 #include <sys/malloc.h> 35 #include <sys/kernel.h> 36 #include <sys/sysctl.h> 37 #include <sys/lock.h> 38 #include <sys/mutex.h> 39 #include <sys/fcntl.h> 40 #include <sys/file.h> 41 #include <sys/filio.h> 42 #include <sys/pciio.h> 43 #include <sys/pctrie.h> 44 #include <sys/rman.h> 45 #include <sys/rwlock.h> 46 47 #include <vm/vm.h> 48 #include <vm/pmap.h> 49 50 #include <machine/bus.h> 51 #include <machine/resource.h> 52 #include <machine/stdarg.h> 53 54 #include <dev/pci/pcivar.h> 55 #include <dev/pci/pci_private.h> 56 #include <dev/pci/pci_iov.h> 57 #include <dev/backlight/backlight.h> 58 59 #include <linux/kernel.h> 60 #include <linux/kobject.h> 61 #include <linux/device.h> 62 #include <linux/slab.h> 63 #include <linux/module.h> 64 #include <linux/cdev.h> 65 #include <linux/file.h> 66 #include <linux/sysfs.h> 67 #include <linux/mm.h> 68 #include <linux/io.h> 69 #include <linux/vmalloc.h> 70 #include <linux/pci.h> 71 #include <linux/compat.h> 72 73 #include <linux/backlight.h> 74 75 #include "backlight_if.h" 76 #include "pcib_if.h" 77 78 /* Undef the linux function macro defined in linux/pci.h */ 79 #undef pci_get_class 80 81 extern int linuxkpi_debug; 82 83 SYSCTL_DECL(_compat_linuxkpi); 84 85 static counter_u64_t lkpi_pci_nseg1_fail; 86 SYSCTL_COUNTER_U64(_compat_linuxkpi, OID_AUTO, lkpi_pci_nseg1_fail, CTLFLAG_RD, 87 &lkpi_pci_nseg1_fail, "Count of busdma mapping failures of single-segment"); 88 89 static device_probe_t linux_pci_probe; 90 static device_attach_t linux_pci_attach; 91 static device_detach_t linux_pci_detach; 92 static device_suspend_t linux_pci_suspend; 93 static device_resume_t linux_pci_resume; 94 static device_shutdown_t linux_pci_shutdown; 95 static pci_iov_init_t linux_pci_iov_init; 96 static pci_iov_uninit_t linux_pci_iov_uninit; 97 static pci_iov_add_vf_t linux_pci_iov_add_vf; 98 static int linux_backlight_get_status(device_t dev, struct backlight_props *props); 99 static int linux_backlight_update_status(device_t dev, struct backlight_props *props); 100 static int linux_backlight_get_info(device_t dev, struct backlight_info *info); 101 static void lkpi_pcim_iomap_table_release(struct device *, void *); 102 103 static device_method_t pci_methods[] = { 104 DEVMETHOD(device_probe, linux_pci_probe), 105 DEVMETHOD(device_attach, linux_pci_attach), 106 DEVMETHOD(device_detach, linux_pci_detach), 107 DEVMETHOD(device_suspend, linux_pci_suspend), 108 DEVMETHOD(device_resume, linux_pci_resume), 109 DEVMETHOD(device_shutdown, linux_pci_shutdown), 110 DEVMETHOD(pci_iov_init, linux_pci_iov_init), 111 DEVMETHOD(pci_iov_uninit, linux_pci_iov_uninit), 112 DEVMETHOD(pci_iov_add_vf, linux_pci_iov_add_vf), 113 114 /* backlight interface */ 115 DEVMETHOD(backlight_update_status, linux_backlight_update_status), 116 DEVMETHOD(backlight_get_status, linux_backlight_get_status), 117 DEVMETHOD(backlight_get_info, linux_backlight_get_info), 118 DEVMETHOD_END 119 }; 120 121 const char *pci_power_names[] = { 122 "UNKNOWN", "D0", "D1", "D2", "D3hot", "D3cold" 123 }; 124 125 /* We need some meta-struct to keep track of these for devres. */ 126 struct pci_devres { 127 bool enable_io; 128 /* PCIR_MAX_BAR_0 + 1 = 6 => BIT(0..5). */ 129 uint8_t region_mask; 130 struct resource *region_table[PCIR_MAX_BAR_0 + 1]; /* Not needed. */ 131 }; 132 struct pcim_iomap_devres { 133 void *mmio_table[PCIR_MAX_BAR_0 + 1]; 134 struct resource *res_table[PCIR_MAX_BAR_0 + 1]; 135 }; 136 137 struct linux_dma_priv { 138 uint64_t dma_mask; 139 bus_dma_tag_t dmat; 140 uint64_t dma_coherent_mask; 141 bus_dma_tag_t dmat_coherent; 142 struct mtx lock; 143 struct pctrie ptree; 144 }; 145 #define DMA_PRIV_LOCK(priv) mtx_lock(&(priv)->lock) 146 #define DMA_PRIV_UNLOCK(priv) mtx_unlock(&(priv)->lock) 147 148 static int 149 linux_pdev_dma_uninit(struct pci_dev *pdev) 150 { 151 struct linux_dma_priv *priv; 152 153 priv = pdev->dev.dma_priv; 154 if (priv->dmat) 155 bus_dma_tag_destroy(priv->dmat); 156 if (priv->dmat_coherent) 157 bus_dma_tag_destroy(priv->dmat_coherent); 158 mtx_destroy(&priv->lock); 159 pdev->dev.dma_priv = NULL; 160 free(priv, M_DEVBUF); 161 return (0); 162 } 163 164 static int 165 linux_pdev_dma_init(struct pci_dev *pdev) 166 { 167 struct linux_dma_priv *priv; 168 int error; 169 170 priv = malloc(sizeof(*priv), M_DEVBUF, M_WAITOK | M_ZERO); 171 172 mtx_init(&priv->lock, "lkpi-priv-dma", NULL, MTX_DEF); 173 pctrie_init(&priv->ptree); 174 175 pdev->dev.dma_priv = priv; 176 177 /* Create a default DMA tags. */ 178 error = linux_dma_tag_init(&pdev->dev, DMA_BIT_MASK(64)); 179 if (error != 0) 180 goto err; 181 /* Coherent is lower 32bit only by default in Linux. */ 182 error = linux_dma_tag_init_coherent(&pdev->dev, DMA_BIT_MASK(32)); 183 if (error != 0) 184 goto err; 185 186 return (error); 187 188 err: 189 linux_pdev_dma_uninit(pdev); 190 return (error); 191 } 192 193 int 194 linux_dma_tag_init(struct device *dev, u64 dma_mask) 195 { 196 struct linux_dma_priv *priv; 197 int error; 198 199 priv = dev->dma_priv; 200 201 if (priv->dmat) { 202 if (priv->dma_mask == dma_mask) 203 return (0); 204 205 bus_dma_tag_destroy(priv->dmat); 206 } 207 208 priv->dma_mask = dma_mask; 209 210 error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 211 1, 0, /* alignment, boundary */ 212 dma_mask, /* lowaddr */ 213 BUS_SPACE_MAXADDR, /* highaddr */ 214 NULL, NULL, /* filtfunc, filtfuncarg */ 215 BUS_SPACE_MAXSIZE, /* maxsize */ 216 1, /* nsegments */ 217 BUS_SPACE_MAXSIZE, /* maxsegsz */ 218 0, /* flags */ 219 NULL, NULL, /* lockfunc, lockfuncarg */ 220 &priv->dmat); 221 return (-error); 222 } 223 224 int 225 linux_dma_tag_init_coherent(struct device *dev, u64 dma_mask) 226 { 227 struct linux_dma_priv *priv; 228 int error; 229 230 priv = dev->dma_priv; 231 232 if (priv->dmat_coherent) { 233 if (priv->dma_coherent_mask == dma_mask) 234 return (0); 235 236 bus_dma_tag_destroy(priv->dmat_coherent); 237 } 238 239 priv->dma_coherent_mask = dma_mask; 240 241 error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 242 1, 0, /* alignment, boundary */ 243 dma_mask, /* lowaddr */ 244 BUS_SPACE_MAXADDR, /* highaddr */ 245 NULL, NULL, /* filtfunc, filtfuncarg */ 246 BUS_SPACE_MAXSIZE, /* maxsize */ 247 1, /* nsegments */ 248 BUS_SPACE_MAXSIZE, /* maxsegsz */ 249 0, /* flags */ 250 NULL, NULL, /* lockfunc, lockfuncarg */ 251 &priv->dmat_coherent); 252 return (-error); 253 } 254 255 static struct pci_driver * 256 linux_pci_find(device_t dev, const struct pci_device_id **idp) 257 { 258 const struct pci_device_id *id; 259 struct pci_driver *pdrv; 260 uint16_t vendor; 261 uint16_t device; 262 uint16_t subvendor; 263 uint16_t subdevice; 264 265 vendor = pci_get_vendor(dev); 266 device = pci_get_device(dev); 267 subvendor = pci_get_subvendor(dev); 268 subdevice = pci_get_subdevice(dev); 269 270 spin_lock(&pci_lock); 271 list_for_each_entry(pdrv, &pci_drivers, node) { 272 for (id = pdrv->id_table; id->vendor != 0; id++) { 273 if (vendor == id->vendor && 274 (PCI_ANY_ID == id->device || device == id->device) && 275 (PCI_ANY_ID == id->subvendor || subvendor == id->subvendor) && 276 (PCI_ANY_ID == id->subdevice || subdevice == id->subdevice)) { 277 *idp = id; 278 spin_unlock(&pci_lock); 279 return (pdrv); 280 } 281 } 282 } 283 spin_unlock(&pci_lock); 284 return (NULL); 285 } 286 287 struct pci_dev * 288 lkpi_pci_get_device(uint16_t vendor, uint16_t device, struct pci_dev *odev) 289 { 290 struct pci_dev *pdev; 291 292 KASSERT(odev == NULL, ("%s: odev argument not yet supported\n", __func__)); 293 294 spin_lock(&pci_lock); 295 list_for_each_entry(pdev, &pci_devices, links) { 296 if (pdev->vendor == vendor && pdev->device == device) 297 break; 298 } 299 spin_unlock(&pci_lock); 300 301 return (pdev); 302 } 303 304 static void 305 lkpi_pci_dev_release(struct device *dev) 306 { 307 308 lkpi_devres_release_free_list(dev); 309 spin_lock_destroy(&dev->devres_lock); 310 } 311 312 static void 313 lkpifill_pci_dev(device_t dev, struct pci_dev *pdev) 314 { 315 316 pdev->devfn = PCI_DEVFN(pci_get_slot(dev), pci_get_function(dev)); 317 pdev->vendor = pci_get_vendor(dev); 318 pdev->device = pci_get_device(dev); 319 pdev->subsystem_vendor = pci_get_subvendor(dev); 320 pdev->subsystem_device = pci_get_subdevice(dev); 321 pdev->class = pci_get_class(dev); 322 pdev->revision = pci_get_revid(dev); 323 pdev->path_name = kasprintf(GFP_KERNEL, "%04d:%02d:%02d.%d", 324 pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev), 325 pci_get_function(dev)); 326 pdev->bus = malloc(sizeof(*pdev->bus), M_DEVBUF, M_WAITOK | M_ZERO); 327 /* 328 * This should be the upstream bridge; pci_upstream_bridge() 329 * handles that case on demand as otherwise we'll shadow the 330 * entire PCI hierarchy. 331 */ 332 pdev->bus->self = pdev; 333 pdev->bus->number = pci_get_bus(dev); 334 pdev->bus->domain = pci_get_domain(dev); 335 pdev->dev.bsddev = dev; 336 pdev->dev.parent = &linux_root_device; 337 pdev->dev.release = lkpi_pci_dev_release; 338 INIT_LIST_HEAD(&pdev->dev.irqents); 339 340 if (pci_msi_count(dev) > 0) 341 pdev->msi_desc = malloc(pci_msi_count(dev) * 342 sizeof(*pdev->msi_desc), M_DEVBUF, M_WAITOK | M_ZERO); 343 344 kobject_init(&pdev->dev.kobj, &linux_dev_ktype); 345 kobject_set_name(&pdev->dev.kobj, device_get_nameunit(dev)); 346 kobject_add(&pdev->dev.kobj, &linux_root_device.kobj, 347 kobject_name(&pdev->dev.kobj)); 348 spin_lock_init(&pdev->dev.devres_lock); 349 INIT_LIST_HEAD(&pdev->dev.devres_head); 350 } 351 352 static void 353 lkpinew_pci_dev_release(struct device *dev) 354 { 355 struct pci_dev *pdev; 356 int i; 357 358 pdev = to_pci_dev(dev); 359 if (pdev->root != NULL) 360 pci_dev_put(pdev->root); 361 if (pdev->bus->self != pdev) 362 pci_dev_put(pdev->bus->self); 363 free(pdev->bus, M_DEVBUF); 364 if (pdev->msi_desc != NULL) { 365 for (i = pci_msi_count(pdev->dev.bsddev) - 1; i >= 0; i--) 366 free(pdev->msi_desc[i], M_DEVBUF); 367 free(pdev->msi_desc, M_DEVBUF); 368 } 369 kfree(pdev->path_name); 370 free(pdev, M_DEVBUF); 371 } 372 373 struct pci_dev * 374 lkpinew_pci_dev(device_t dev) 375 { 376 struct pci_dev *pdev; 377 378 pdev = malloc(sizeof(*pdev), M_DEVBUF, M_WAITOK|M_ZERO); 379 lkpifill_pci_dev(dev, pdev); 380 pdev->dev.release = lkpinew_pci_dev_release; 381 382 return (pdev); 383 } 384 385 struct pci_dev * 386 lkpi_pci_get_class(unsigned int class, struct pci_dev *from) 387 { 388 device_t dev; 389 device_t devfrom = NULL; 390 struct pci_dev *pdev; 391 392 if (from != NULL) 393 devfrom = from->dev.bsddev; 394 395 dev = pci_find_class_from(class >> 16, (class >> 8) & 0xFF, devfrom); 396 if (dev == NULL) 397 return (NULL); 398 399 pdev = lkpinew_pci_dev(dev); 400 return (pdev); 401 } 402 403 struct pci_dev * 404 lkpi_pci_get_domain_bus_and_slot(int domain, unsigned int bus, 405 unsigned int devfn) 406 { 407 device_t dev; 408 struct pci_dev *pdev; 409 410 dev = pci_find_dbsf(domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); 411 if (dev == NULL) 412 return (NULL); 413 414 pdev = lkpinew_pci_dev(dev); 415 return (pdev); 416 } 417 418 static int 419 linux_pci_probe(device_t dev) 420 { 421 const struct pci_device_id *id; 422 struct pci_driver *pdrv; 423 424 if ((pdrv = linux_pci_find(dev, &id)) == NULL) 425 return (ENXIO); 426 if (device_get_driver(dev) != &pdrv->bsddriver) 427 return (ENXIO); 428 device_set_desc(dev, pdrv->name); 429 430 /* Assume BSS initialized (should never return BUS_PROBE_SPECIFIC). */ 431 if (pdrv->bsd_probe_return == 0) 432 return (BUS_PROBE_DEFAULT); 433 else 434 return (pdrv->bsd_probe_return); 435 } 436 437 static int 438 linux_pci_attach(device_t dev) 439 { 440 const struct pci_device_id *id; 441 struct pci_driver *pdrv; 442 struct pci_dev *pdev; 443 444 pdrv = linux_pci_find(dev, &id); 445 pdev = device_get_softc(dev); 446 447 MPASS(pdrv != NULL); 448 MPASS(pdev != NULL); 449 450 return (linux_pci_attach_device(dev, pdrv, id, pdev)); 451 } 452 453 static struct resource_list_entry * 454 linux_pci_reserve_bar(struct pci_dev *pdev, struct resource_list *rl, 455 int type, int rid) 456 { 457 device_t dev; 458 struct resource *res; 459 460 KASSERT(type == SYS_RES_IOPORT || type == SYS_RES_MEMORY, 461 ("trying to reserve non-BAR type %d", type)); 462 463 dev = pdev->pdrv != NULL && pdev->pdrv->isdrm ? 464 device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev; 465 res = pci_reserve_map(device_get_parent(dev), dev, type, &rid, 0, ~0, 466 1, 1, 0); 467 if (res == NULL) 468 return (NULL); 469 return (resource_list_find(rl, type, rid)); 470 } 471 472 static struct resource_list_entry * 473 linux_pci_get_rle(struct pci_dev *pdev, int type, int rid, bool reserve_bar) 474 { 475 struct pci_devinfo *dinfo; 476 struct resource_list *rl; 477 struct resource_list_entry *rle; 478 479 dinfo = device_get_ivars(pdev->dev.bsddev); 480 rl = &dinfo->resources; 481 rle = resource_list_find(rl, type, rid); 482 /* Reserve resources for this BAR if needed. */ 483 if (rle == NULL && reserve_bar) 484 rle = linux_pci_reserve_bar(pdev, rl, type, rid); 485 return (rle); 486 } 487 488 int 489 linux_pci_attach_device(device_t dev, struct pci_driver *pdrv, 490 const struct pci_device_id *id, struct pci_dev *pdev) 491 { 492 struct resource_list_entry *rle; 493 device_t parent; 494 uintptr_t rid; 495 int error; 496 bool isdrm; 497 498 linux_set_current(curthread); 499 500 parent = device_get_parent(dev); 501 isdrm = pdrv != NULL && pdrv->isdrm; 502 503 if (isdrm) { 504 struct pci_devinfo *dinfo; 505 506 dinfo = device_get_ivars(parent); 507 device_set_ivars(dev, dinfo); 508 } 509 510 lkpifill_pci_dev(dev, pdev); 511 if (isdrm) 512 PCI_GET_ID(device_get_parent(parent), parent, PCI_ID_RID, &rid); 513 else 514 PCI_GET_ID(parent, dev, PCI_ID_RID, &rid); 515 pdev->devfn = rid; 516 pdev->pdrv = pdrv; 517 rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 0, false); 518 if (rle != NULL) 519 pdev->dev.irq = rle->start; 520 else 521 pdev->dev.irq = LINUX_IRQ_INVALID; 522 pdev->irq = pdev->dev.irq; 523 error = linux_pdev_dma_init(pdev); 524 if (error) 525 goto out_dma_init; 526 527 TAILQ_INIT(&pdev->mmio); 528 spin_lock_init(&pdev->pcie_cap_lock); 529 530 spin_lock(&pci_lock); 531 list_add(&pdev->links, &pci_devices); 532 spin_unlock(&pci_lock); 533 534 if (pdrv != NULL) { 535 error = pdrv->probe(pdev, id); 536 if (error) 537 goto out_probe; 538 } 539 return (0); 540 541 out_probe: 542 free(pdev->bus, M_DEVBUF); 543 spin_lock_destroy(&pdev->pcie_cap_lock); 544 linux_pdev_dma_uninit(pdev); 545 out_dma_init: 546 spin_lock(&pci_lock); 547 list_del(&pdev->links); 548 spin_unlock(&pci_lock); 549 put_device(&pdev->dev); 550 return (-error); 551 } 552 553 static int 554 linux_pci_detach(device_t dev) 555 { 556 struct pci_dev *pdev; 557 558 pdev = device_get_softc(dev); 559 560 MPASS(pdev != NULL); 561 562 device_set_desc(dev, NULL); 563 564 return (linux_pci_detach_device(pdev)); 565 } 566 567 int 568 linux_pci_detach_device(struct pci_dev *pdev) 569 { 570 571 linux_set_current(curthread); 572 573 if (pdev->pdrv != NULL) 574 pdev->pdrv->remove(pdev); 575 576 if (pdev->root != NULL) 577 pci_dev_put(pdev->root); 578 free(pdev->bus, M_DEVBUF); 579 linux_pdev_dma_uninit(pdev); 580 581 spin_lock(&pci_lock); 582 list_del(&pdev->links); 583 spin_unlock(&pci_lock); 584 spin_lock_destroy(&pdev->pcie_cap_lock); 585 put_device(&pdev->dev); 586 587 return (0); 588 } 589 590 static int 591 lkpi_pci_disable_dev(struct device *dev) 592 { 593 594 (void) pci_disable_io(dev->bsddev, SYS_RES_MEMORY); 595 (void) pci_disable_io(dev->bsddev, SYS_RES_IOPORT); 596 return (0); 597 } 598 599 static struct pci_devres * 600 lkpi_pci_devres_get_alloc(struct pci_dev *pdev) 601 { 602 struct pci_devres *dr; 603 604 dr = lkpi_devres_find(&pdev->dev, lkpi_pci_devres_release, NULL, NULL); 605 if (dr == NULL) { 606 dr = lkpi_devres_alloc(lkpi_pci_devres_release, sizeof(*dr), 607 GFP_KERNEL | __GFP_ZERO); 608 if (dr != NULL) 609 lkpi_devres_add(&pdev->dev, dr); 610 } 611 612 return (dr); 613 } 614 615 static struct pci_devres * 616 lkpi_pci_devres_find(struct pci_dev *pdev) 617 { 618 if (!pdev->managed) 619 return (NULL); 620 621 return (lkpi_pci_devres_get_alloc(pdev)); 622 } 623 624 void 625 lkpi_pci_devres_release(struct device *dev, void *p) 626 { 627 struct pci_devres *dr; 628 struct pci_dev *pdev; 629 int bar; 630 631 pdev = to_pci_dev(dev); 632 dr = p; 633 634 if (pdev->msix_enabled) 635 lkpi_pci_disable_msix(pdev); 636 if (pdev->msi_enabled) 637 lkpi_pci_disable_msi(pdev); 638 639 if (dr->enable_io && lkpi_pci_disable_dev(dev) == 0) 640 dr->enable_io = false; 641 642 if (dr->region_mask == 0) 643 return; 644 for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) { 645 646 if ((dr->region_mask & (1 << bar)) == 0) 647 continue; 648 pci_release_region(pdev, bar); 649 } 650 } 651 652 int 653 linuxkpi_pcim_enable_device(struct pci_dev *pdev) 654 { 655 struct pci_devres *dr; 656 int error; 657 658 /* Here we cannot run through the pdev->managed check. */ 659 dr = lkpi_pci_devres_get_alloc(pdev); 660 if (dr == NULL) 661 return (-ENOMEM); 662 663 /* If resources were enabled before do not do it again. */ 664 if (dr->enable_io) 665 return (0); 666 667 error = pci_enable_device(pdev); 668 if (error == 0) 669 dr->enable_io = true; 670 671 /* This device is not managed. */ 672 pdev->managed = true; 673 674 return (error); 675 } 676 677 static struct pcim_iomap_devres * 678 lkpi_pcim_iomap_devres_find(struct pci_dev *pdev) 679 { 680 struct pcim_iomap_devres *dr; 681 682 dr = lkpi_devres_find(&pdev->dev, lkpi_pcim_iomap_table_release, 683 NULL, NULL); 684 if (dr == NULL) { 685 dr = lkpi_devres_alloc(lkpi_pcim_iomap_table_release, 686 sizeof(*dr), GFP_KERNEL | __GFP_ZERO); 687 if (dr != NULL) 688 lkpi_devres_add(&pdev->dev, dr); 689 } 690 691 if (dr == NULL) 692 device_printf(pdev->dev.bsddev, "%s: NULL\n", __func__); 693 694 return (dr); 695 } 696 697 void __iomem ** 698 linuxkpi_pcim_iomap_table(struct pci_dev *pdev) 699 { 700 struct pcim_iomap_devres *dr; 701 702 dr = lkpi_pcim_iomap_devres_find(pdev); 703 if (dr == NULL) 704 return (NULL); 705 706 /* 707 * If the driver has manually set a flag to be able to request the 708 * resource to use bus_read/write_<n>, return the shadow table. 709 */ 710 if (pdev->want_iomap_res) 711 return ((void **)dr->res_table); 712 713 /* This is the Linux default. */ 714 return (dr->mmio_table); 715 } 716 717 static struct resource * 718 _lkpi_pci_iomap(struct pci_dev *pdev, int bar, int mmio_size __unused) 719 { 720 struct pci_mmio_region *mmio, *p; 721 int type; 722 723 type = pci_resource_type(pdev, bar); 724 if (type < 0) { 725 device_printf(pdev->dev.bsddev, "%s: bar %d type %d\n", 726 __func__, bar, type); 727 return (NULL); 728 } 729 730 /* 731 * Check for duplicate mappings. 732 * This can happen if a driver calls pci_request_region() first. 733 */ 734 TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) { 735 if (mmio->type == type && mmio->rid == PCIR_BAR(bar)) { 736 return (mmio->res); 737 } 738 } 739 740 mmio = malloc(sizeof(*mmio), M_DEVBUF, M_WAITOK | M_ZERO); 741 mmio->rid = PCIR_BAR(bar); 742 mmio->type = type; 743 mmio->res = bus_alloc_resource_any(pdev->dev.bsddev, mmio->type, 744 &mmio->rid, RF_ACTIVE|RF_SHAREABLE); 745 if (mmio->res == NULL) { 746 device_printf(pdev->dev.bsddev, "%s: failed to alloc " 747 "bar %d type %d rid %d\n", 748 __func__, bar, type, PCIR_BAR(bar)); 749 free(mmio, M_DEVBUF); 750 return (NULL); 751 } 752 TAILQ_INSERT_TAIL(&pdev->mmio, mmio, next); 753 754 return (mmio->res); 755 } 756 757 void * 758 linuxkpi_pci_iomap_range(struct pci_dev *pdev, int mmio_bar, 759 unsigned long mmio_off, unsigned long mmio_size) 760 { 761 struct resource *res; 762 763 res = _lkpi_pci_iomap(pdev, mmio_bar, mmio_size); 764 if (res == NULL) 765 return (NULL); 766 /* This is a FreeBSD extension so we can use bus_*(). */ 767 if (pdev->want_iomap_res) 768 return (res); 769 MPASS(mmio_off < rman_get_size(res)); 770 return ((void *)(rman_get_bushandle(res) + mmio_off)); 771 } 772 773 void * 774 linuxkpi_pci_iomap(struct pci_dev *pdev, int mmio_bar, int mmio_size) 775 { 776 return (linuxkpi_pci_iomap_range(pdev, mmio_bar, 0, mmio_size)); 777 } 778 779 void 780 linuxkpi_pci_iounmap(struct pci_dev *pdev, void *res) 781 { 782 struct pci_mmio_region *mmio, *p; 783 bus_space_handle_t bh = (bus_space_handle_t)res; 784 785 TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) { 786 if (pdev->want_iomap_res) { 787 if (res != mmio->res) 788 continue; 789 } else { 790 if (bh < rman_get_bushandle(mmio->res) || 791 bh >= rman_get_bushandle(mmio->res) + 792 rman_get_size(mmio->res)) 793 continue; 794 } 795 bus_release_resource(pdev->dev.bsddev, 796 mmio->type, mmio->rid, mmio->res); 797 TAILQ_REMOVE(&pdev->mmio, mmio, next); 798 free(mmio, M_DEVBUF); 799 return; 800 } 801 } 802 803 int 804 linuxkpi_pcim_iomap_regions(struct pci_dev *pdev, uint32_t mask, const char *name) 805 { 806 struct pcim_iomap_devres *dr; 807 void *res; 808 uint32_t mappings; 809 int bar; 810 811 dr = lkpi_pcim_iomap_devres_find(pdev); 812 if (dr == NULL) 813 return (-ENOMEM); 814 815 /* Now iomap all the requested (by "mask") ones. */ 816 for (bar = mappings = 0; mappings != mask; bar++) { 817 if ((mask & (1 << bar)) == 0) 818 continue; 819 820 /* Request double is not allowed. */ 821 if (dr->mmio_table[bar] != NULL) { 822 device_printf(pdev->dev.bsddev, "%s: bar %d %p\n", 823 __func__, bar, dr->mmio_table[bar]); 824 goto err; 825 } 826 827 res = _lkpi_pci_iomap(pdev, bar, 0); 828 if (res == NULL) 829 goto err; 830 dr->mmio_table[bar] = (void *)rman_get_bushandle(res); 831 dr->res_table[bar] = res; 832 833 mappings |= (1 << bar); 834 } 835 836 return (0); 837 err: 838 for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) { 839 if ((mappings & (1 << bar)) != 0) { 840 res = dr->mmio_table[bar]; 841 if (res == NULL) 842 continue; 843 pci_iounmap(pdev, res); 844 } 845 } 846 847 return (-EINVAL); 848 } 849 850 static void 851 lkpi_pcim_iomap_table_release(struct device *dev, void *p) 852 { 853 struct pcim_iomap_devres *dr; 854 struct pci_dev *pdev; 855 int bar; 856 857 dr = p; 858 pdev = to_pci_dev(dev); 859 for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) { 860 861 if (dr->mmio_table[bar] == NULL) 862 continue; 863 864 pci_iounmap(pdev, dr->mmio_table[bar]); 865 } 866 } 867 868 static int 869 linux_pci_suspend(device_t dev) 870 { 871 const struct dev_pm_ops *pmops; 872 struct pm_message pm = { }; 873 struct pci_dev *pdev; 874 int error; 875 876 error = 0; 877 linux_set_current(curthread); 878 pdev = device_get_softc(dev); 879 pmops = pdev->pdrv->driver.pm; 880 881 if (pdev->pdrv->suspend != NULL) 882 error = -pdev->pdrv->suspend(pdev, pm); 883 else if (pmops != NULL && pmops->suspend != NULL) { 884 error = -pmops->suspend(&pdev->dev); 885 if (error == 0 && pmops->suspend_late != NULL) 886 error = -pmops->suspend_late(&pdev->dev); 887 if (error == 0 && pmops->suspend_noirq != NULL) 888 error = -pmops->suspend_noirq(&pdev->dev); 889 } 890 return (error); 891 } 892 893 static int 894 linux_pci_resume(device_t dev) 895 { 896 const struct dev_pm_ops *pmops; 897 struct pci_dev *pdev; 898 int error; 899 900 error = 0; 901 linux_set_current(curthread); 902 pdev = device_get_softc(dev); 903 pmops = pdev->pdrv->driver.pm; 904 905 if (pdev->pdrv->resume != NULL) 906 error = -pdev->pdrv->resume(pdev); 907 else if (pmops != NULL && pmops->resume != NULL) { 908 if (pmops->resume_early != NULL) 909 error = -pmops->resume_early(&pdev->dev); 910 if (error == 0 && pmops->resume != NULL) 911 error = -pmops->resume(&pdev->dev); 912 } 913 return (error); 914 } 915 916 static int 917 linux_pci_shutdown(device_t dev) 918 { 919 struct pci_dev *pdev; 920 921 linux_set_current(curthread); 922 pdev = device_get_softc(dev); 923 if (pdev->pdrv->shutdown != NULL) 924 pdev->pdrv->shutdown(pdev); 925 return (0); 926 } 927 928 static int 929 linux_pci_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *pf_config) 930 { 931 struct pci_dev *pdev; 932 int error; 933 934 linux_set_current(curthread); 935 pdev = device_get_softc(dev); 936 if (pdev->pdrv->bsd_iov_init != NULL) 937 error = pdev->pdrv->bsd_iov_init(dev, num_vfs, pf_config); 938 else 939 error = EINVAL; 940 return (error); 941 } 942 943 static void 944 linux_pci_iov_uninit(device_t dev) 945 { 946 struct pci_dev *pdev; 947 948 linux_set_current(curthread); 949 pdev = device_get_softc(dev); 950 if (pdev->pdrv->bsd_iov_uninit != NULL) 951 pdev->pdrv->bsd_iov_uninit(dev); 952 } 953 954 static int 955 linux_pci_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *vf_config) 956 { 957 struct pci_dev *pdev; 958 int error; 959 960 linux_set_current(curthread); 961 pdev = device_get_softc(dev); 962 if (pdev->pdrv->bsd_iov_add_vf != NULL) 963 error = pdev->pdrv->bsd_iov_add_vf(dev, vfnum, vf_config); 964 else 965 error = EINVAL; 966 return (error); 967 } 968 969 static int 970 _linux_pci_register_driver(struct pci_driver *pdrv, devclass_t dc) 971 { 972 int error; 973 974 linux_set_current(curthread); 975 spin_lock(&pci_lock); 976 list_add(&pdrv->node, &pci_drivers); 977 spin_unlock(&pci_lock); 978 if (pdrv->bsddriver.name == NULL) 979 pdrv->bsddriver.name = pdrv->name; 980 pdrv->bsddriver.methods = pci_methods; 981 pdrv->bsddriver.size = sizeof(struct pci_dev); 982 983 bus_topo_lock(); 984 error = devclass_add_driver(dc, &pdrv->bsddriver, 985 BUS_PASS_DEFAULT, &pdrv->bsdclass); 986 bus_topo_unlock(); 987 return (-error); 988 } 989 990 int 991 linux_pci_register_driver(struct pci_driver *pdrv) 992 { 993 devclass_t dc; 994 995 pdrv->isdrm = strcmp(pdrv->name, "drmn") == 0; 996 dc = pdrv->isdrm ? devclass_create("vgapci") : devclass_find("pci"); 997 if (dc == NULL) 998 return (-ENXIO); 999 return (_linux_pci_register_driver(pdrv, dc)); 1000 } 1001 1002 static struct resource_list_entry * 1003 lkpi_pci_get_bar(struct pci_dev *pdev, int bar, bool reserve) 1004 { 1005 int type; 1006 1007 type = pci_resource_type(pdev, bar); 1008 if (type < 0) 1009 return (NULL); 1010 bar = PCIR_BAR(bar); 1011 return (linux_pci_get_rle(pdev, type, bar, reserve)); 1012 } 1013 1014 struct device * 1015 lkpi_pci_find_irq_dev(unsigned int irq) 1016 { 1017 struct pci_dev *pdev; 1018 struct device *found; 1019 1020 found = NULL; 1021 spin_lock(&pci_lock); 1022 list_for_each_entry(pdev, &pci_devices, links) { 1023 if (irq == pdev->dev.irq || 1024 (irq >= pdev->dev.irq_start && irq < pdev->dev.irq_end)) { 1025 found = &pdev->dev; 1026 break; 1027 } 1028 } 1029 spin_unlock(&pci_lock); 1030 return (found); 1031 } 1032 1033 unsigned long 1034 pci_resource_start(struct pci_dev *pdev, int bar) 1035 { 1036 struct resource_list_entry *rle; 1037 rman_res_t newstart; 1038 device_t dev; 1039 int error; 1040 1041 if ((rle = lkpi_pci_get_bar(pdev, bar, true)) == NULL) 1042 return (0); 1043 dev = pdev->pdrv != NULL && pdev->pdrv->isdrm ? 1044 device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev; 1045 error = bus_translate_resource(dev, rle->type, rle->start, &newstart); 1046 if (error != 0) { 1047 device_printf(pdev->dev.bsddev, 1048 "translate of %#jx failed: %d\n", 1049 (uintmax_t)rle->start, error); 1050 return (0); 1051 } 1052 return (newstart); 1053 } 1054 1055 unsigned long 1056 pci_resource_len(struct pci_dev *pdev, int bar) 1057 { 1058 struct resource_list_entry *rle; 1059 1060 if ((rle = lkpi_pci_get_bar(pdev, bar, true)) == NULL) 1061 return (0); 1062 return (rle->count); 1063 } 1064 1065 int 1066 pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) 1067 { 1068 struct resource *res; 1069 struct pci_devres *dr; 1070 struct pci_mmio_region *mmio; 1071 int rid; 1072 int type; 1073 1074 type = pci_resource_type(pdev, bar); 1075 if (type < 0) 1076 return (-ENODEV); 1077 rid = PCIR_BAR(bar); 1078 res = bus_alloc_resource_any(pdev->dev.bsddev, type, &rid, 1079 RF_ACTIVE|RF_SHAREABLE); 1080 if (res == NULL) { 1081 device_printf(pdev->dev.bsddev, "%s: failed to alloc " 1082 "bar %d type %d rid %d\n", 1083 __func__, bar, type, PCIR_BAR(bar)); 1084 return (-ENODEV); 1085 } 1086 1087 /* 1088 * It seems there is an implicit devres tracking on these if the device 1089 * is managed; otherwise the resources are not automatiaclly freed on 1090 * FreeBSD/LinuxKPI tough they should be/are expected to be by Linux 1091 * drivers. 1092 */ 1093 dr = lkpi_pci_devres_find(pdev); 1094 if (dr != NULL) { 1095 dr->region_mask |= (1 << bar); 1096 dr->region_table[bar] = res; 1097 } 1098 1099 /* Even if the device is not managed we need to track it for iomap. */ 1100 mmio = malloc(sizeof(*mmio), M_DEVBUF, M_WAITOK | M_ZERO); 1101 mmio->rid = PCIR_BAR(bar); 1102 mmio->type = type; 1103 mmio->res = res; 1104 TAILQ_INSERT_TAIL(&pdev->mmio, mmio, next); 1105 1106 return (0); 1107 } 1108 1109 int 1110 linuxkpi_pci_request_regions(struct pci_dev *pdev, const char *res_name) 1111 { 1112 int error; 1113 int i; 1114 1115 for (i = 0; i <= PCIR_MAX_BAR_0; i++) { 1116 error = pci_request_region(pdev, i, res_name); 1117 if (error && error != -ENODEV) { 1118 pci_release_regions(pdev); 1119 return (error); 1120 } 1121 } 1122 return (0); 1123 } 1124 1125 void 1126 linuxkpi_pci_release_region(struct pci_dev *pdev, int bar) 1127 { 1128 struct resource_list_entry *rle; 1129 struct pci_devres *dr; 1130 struct pci_mmio_region *mmio, *p; 1131 1132 if ((rle = lkpi_pci_get_bar(pdev, bar, false)) == NULL) 1133 return; 1134 1135 /* 1136 * As we implicitly track the requests we also need to clear them on 1137 * release. Do clear before resource release. 1138 */ 1139 dr = lkpi_pci_devres_find(pdev); 1140 if (dr != NULL) { 1141 KASSERT(dr->region_table[bar] == rle->res, ("%s: pdev %p bar %d" 1142 " region_table res %p != rel->res %p\n", __func__, pdev, 1143 bar, dr->region_table[bar], rle->res)); 1144 dr->region_table[bar] = NULL; 1145 dr->region_mask &= ~(1 << bar); 1146 } 1147 1148 TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) { 1149 if (rle->res != (void *)rman_get_bushandle(mmio->res)) 1150 continue; 1151 TAILQ_REMOVE(&pdev->mmio, mmio, next); 1152 free(mmio, M_DEVBUF); 1153 } 1154 1155 bus_release_resource(pdev->dev.bsddev, rle->type, rle->rid, rle->res); 1156 } 1157 1158 void 1159 linuxkpi_pci_release_regions(struct pci_dev *pdev) 1160 { 1161 int i; 1162 1163 for (i = 0; i <= PCIR_MAX_BAR_0; i++) 1164 pci_release_region(pdev, i); 1165 } 1166 1167 int 1168 linux_pci_register_drm_driver(struct pci_driver *pdrv) 1169 { 1170 devclass_t dc; 1171 1172 dc = devclass_create("vgapci"); 1173 if (dc == NULL) 1174 return (-ENXIO); 1175 pdrv->isdrm = true; 1176 pdrv->name = "drmn"; 1177 return (_linux_pci_register_driver(pdrv, dc)); 1178 } 1179 1180 void 1181 linux_pci_unregister_driver(struct pci_driver *pdrv) 1182 { 1183 devclass_t bus; 1184 1185 bus = devclass_find(pdrv->isdrm ? "vgapci" : "pci"); 1186 1187 spin_lock(&pci_lock); 1188 list_del(&pdrv->node); 1189 spin_unlock(&pci_lock); 1190 bus_topo_lock(); 1191 if (bus != NULL) 1192 devclass_delete_driver(bus, &pdrv->bsddriver); 1193 bus_topo_unlock(); 1194 } 1195 1196 void 1197 linux_pci_unregister_drm_driver(struct pci_driver *pdrv) 1198 { 1199 devclass_t bus; 1200 1201 bus = devclass_find("vgapci"); 1202 1203 spin_lock(&pci_lock); 1204 list_del(&pdrv->node); 1205 spin_unlock(&pci_lock); 1206 bus_topo_lock(); 1207 if (bus != NULL) 1208 devclass_delete_driver(bus, &pdrv->bsddriver); 1209 bus_topo_unlock(); 1210 } 1211 1212 int 1213 linuxkpi_pci_enable_msix(struct pci_dev *pdev, struct msix_entry *entries, 1214 int nreq) 1215 { 1216 struct resource_list_entry *rle; 1217 int error; 1218 int avail; 1219 int i; 1220 1221 avail = pci_msix_count(pdev->dev.bsddev); 1222 if (avail < nreq) { 1223 if (avail == 0) 1224 return -EINVAL; 1225 return avail; 1226 } 1227 avail = nreq; 1228 if ((error = -pci_alloc_msix(pdev->dev.bsddev, &avail)) != 0) 1229 return error; 1230 /* 1231 * Handle case where "pci_alloc_msix()" may allocate less 1232 * interrupts than available and return with no error: 1233 */ 1234 if (avail < nreq) { 1235 pci_release_msi(pdev->dev.bsddev); 1236 return avail; 1237 } 1238 rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 1, false); 1239 pdev->dev.irq_start = rle->start; 1240 pdev->dev.irq_end = rle->start + avail; 1241 for (i = 0; i < nreq; i++) 1242 entries[i].vector = pdev->dev.irq_start + i; 1243 pdev->msix_enabled = true; 1244 return (0); 1245 } 1246 1247 int 1248 _lkpi_pci_enable_msi_range(struct pci_dev *pdev, int minvec, int maxvec) 1249 { 1250 struct resource_list_entry *rle; 1251 int error; 1252 int nvec; 1253 1254 if (maxvec < minvec) 1255 return (-EINVAL); 1256 1257 nvec = pci_msi_count(pdev->dev.bsddev); 1258 if (nvec < 1 || nvec < minvec) 1259 return (-ENOSPC); 1260 1261 nvec = min(nvec, maxvec); 1262 if ((error = -pci_alloc_msi(pdev->dev.bsddev, &nvec)) != 0) 1263 return error; 1264 1265 /* Native PCI might only ever ask for 32 vectors. */ 1266 if (nvec < minvec) { 1267 pci_release_msi(pdev->dev.bsddev); 1268 return (-ENOSPC); 1269 } 1270 1271 rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 1, false); 1272 pdev->dev.irq_start = rle->start; 1273 pdev->dev.irq_end = rle->start + nvec; 1274 pdev->irq = rle->start; 1275 pdev->msi_enabled = true; 1276 return (0); 1277 } 1278 1279 int 1280 pci_alloc_irq_vectors(struct pci_dev *pdev, int minv, int maxv, 1281 unsigned int flags) 1282 { 1283 int error; 1284 1285 if (flags & PCI_IRQ_MSIX) { 1286 struct msix_entry *entries; 1287 int i; 1288 1289 entries = kcalloc(maxv, sizeof(*entries), GFP_KERNEL); 1290 if (entries == NULL) { 1291 error = -ENOMEM; 1292 goto out; 1293 } 1294 for (i = 0; i < maxv; ++i) 1295 entries[i].entry = i; 1296 error = pci_enable_msix(pdev, entries, maxv); 1297 out: 1298 kfree(entries); 1299 if (error == 0 && pdev->msix_enabled) 1300 return (pdev->dev.irq_end - pdev->dev.irq_start); 1301 } 1302 if (flags & PCI_IRQ_MSI) { 1303 if (pci_msi_count(pdev->dev.bsddev) < minv) 1304 return (-ENOSPC); 1305 error = _lkpi_pci_enable_msi_range(pdev, minv, maxv); 1306 if (error == 0 && pdev->msi_enabled) 1307 return (pdev->dev.irq_end - pdev->dev.irq_start); 1308 } 1309 if (flags & PCI_IRQ_INTX) { 1310 if (pdev->irq) 1311 return (1); 1312 } 1313 1314 return (-EINVAL); 1315 } 1316 1317 struct msi_desc * 1318 lkpi_pci_msi_desc_alloc(int irq) 1319 { 1320 struct device *dev; 1321 struct pci_dev *pdev; 1322 struct msi_desc *desc; 1323 struct pci_devinfo *dinfo; 1324 struct pcicfg_msi *msi; 1325 int vec; 1326 1327 dev = lkpi_pci_find_irq_dev(irq); 1328 if (dev == NULL) 1329 return (NULL); 1330 1331 pdev = to_pci_dev(dev); 1332 1333 if (pdev->msi_desc == NULL) 1334 return (NULL); 1335 1336 if (irq < pdev->dev.irq_start || irq >= pdev->dev.irq_end) 1337 return (NULL); 1338 1339 vec = pdev->dev.irq_start - irq; 1340 1341 if (pdev->msi_desc[vec] != NULL) 1342 return (pdev->msi_desc[vec]); 1343 1344 dinfo = device_get_ivars(dev->bsddev); 1345 msi = &dinfo->cfg.msi; 1346 1347 desc = malloc(sizeof(*desc), M_DEVBUF, M_WAITOK | M_ZERO); 1348 1349 desc->pci.msi_attrib.is_64 = 1350 (msi->msi_ctrl & PCIM_MSICTRL_64BIT) ? true : false; 1351 desc->msg.data = msi->msi_data; 1352 1353 pdev->msi_desc[vec] = desc; 1354 1355 return (desc); 1356 } 1357 1358 bool 1359 pci_device_is_present(struct pci_dev *pdev) 1360 { 1361 device_t dev; 1362 1363 dev = pdev->dev.bsddev; 1364 1365 return (bus_child_present(dev)); 1366 } 1367 1368 CTASSERT(sizeof(dma_addr_t) <= sizeof(uint64_t)); 1369 1370 struct linux_dma_obj { 1371 void *vaddr; 1372 uint64_t dma_addr; 1373 bus_dmamap_t dmamap; 1374 bus_dma_tag_t dmat; 1375 }; 1376 1377 static uma_zone_t linux_dma_trie_zone; 1378 static uma_zone_t linux_dma_obj_zone; 1379 1380 static void 1381 linux_dma_init(void *arg) 1382 { 1383 1384 linux_dma_trie_zone = uma_zcreate("linux_dma_pctrie", 1385 pctrie_node_size(), NULL, NULL, pctrie_zone_init, NULL, 1386 UMA_ALIGN_PTR, 0); 1387 linux_dma_obj_zone = uma_zcreate("linux_dma_object", 1388 sizeof(struct linux_dma_obj), NULL, NULL, NULL, NULL, 1389 UMA_ALIGN_PTR, 0); 1390 lkpi_pci_nseg1_fail = counter_u64_alloc(M_WAITOK); 1391 } 1392 SYSINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_init, NULL); 1393 1394 static void 1395 linux_dma_uninit(void *arg) 1396 { 1397 1398 counter_u64_free(lkpi_pci_nseg1_fail); 1399 uma_zdestroy(linux_dma_obj_zone); 1400 uma_zdestroy(linux_dma_trie_zone); 1401 } 1402 SYSUNINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_uninit, NULL); 1403 1404 static void * 1405 linux_dma_trie_alloc(struct pctrie *ptree) 1406 { 1407 1408 return (uma_zalloc(linux_dma_trie_zone, M_NOWAIT)); 1409 } 1410 1411 static void 1412 linux_dma_trie_free(struct pctrie *ptree, void *node) 1413 { 1414 1415 uma_zfree(linux_dma_trie_zone, node); 1416 } 1417 1418 PCTRIE_DEFINE(LINUX_DMA, linux_dma_obj, dma_addr, linux_dma_trie_alloc, 1419 linux_dma_trie_free); 1420 1421 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 1422 static dma_addr_t 1423 linux_dma_map_phys_common(struct device *dev, vm_paddr_t phys, size_t len, 1424 bus_dma_tag_t dmat) 1425 { 1426 struct linux_dma_priv *priv; 1427 struct linux_dma_obj *obj; 1428 int error, nseg; 1429 bus_dma_segment_t seg; 1430 1431 priv = dev->dma_priv; 1432 1433 /* 1434 * If the resultant mapping will be entirely 1:1 with the 1435 * physical address, short-circuit the remainder of the 1436 * bus_dma API. This avoids tracking collisions in the pctrie 1437 * with the additional benefit of reducing overhead. 1438 */ 1439 if (bus_dma_id_mapped(dmat, phys, len)) 1440 return (phys); 1441 1442 obj = uma_zalloc(linux_dma_obj_zone, M_NOWAIT); 1443 if (obj == NULL) { 1444 return (0); 1445 } 1446 obj->dmat = dmat; 1447 1448 DMA_PRIV_LOCK(priv); 1449 if (bus_dmamap_create(obj->dmat, 0, &obj->dmamap) != 0) { 1450 DMA_PRIV_UNLOCK(priv); 1451 uma_zfree(linux_dma_obj_zone, obj); 1452 return (0); 1453 } 1454 1455 nseg = -1; 1456 if (_bus_dmamap_load_phys(obj->dmat, obj->dmamap, phys, len, 1457 BUS_DMA_NOWAIT, &seg, &nseg) != 0) { 1458 bus_dmamap_destroy(obj->dmat, obj->dmamap); 1459 DMA_PRIV_UNLOCK(priv); 1460 uma_zfree(linux_dma_obj_zone, obj); 1461 counter_u64_add(lkpi_pci_nseg1_fail, 1); 1462 if (linuxkpi_debug) 1463 dump_stack(); 1464 return (0); 1465 } 1466 1467 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); 1468 obj->dma_addr = seg.ds_addr; 1469 1470 error = LINUX_DMA_PCTRIE_INSERT(&priv->ptree, obj); 1471 if (error != 0) { 1472 bus_dmamap_unload(obj->dmat, obj->dmamap); 1473 bus_dmamap_destroy(obj->dmat, obj->dmamap); 1474 DMA_PRIV_UNLOCK(priv); 1475 uma_zfree(linux_dma_obj_zone, obj); 1476 return (0); 1477 } 1478 DMA_PRIV_UNLOCK(priv); 1479 return (obj->dma_addr); 1480 } 1481 #else 1482 static dma_addr_t 1483 linux_dma_map_phys_common(struct device *dev __unused, vm_paddr_t phys, 1484 size_t len __unused, bus_dma_tag_t dmat __unused) 1485 { 1486 return (phys); 1487 } 1488 #endif 1489 1490 dma_addr_t 1491 linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len) 1492 { 1493 struct linux_dma_priv *priv; 1494 1495 priv = dev->dma_priv; 1496 return (linux_dma_map_phys_common(dev, phys, len, priv->dmat)); 1497 } 1498 1499 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 1500 void 1501 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len) 1502 { 1503 struct linux_dma_priv *priv; 1504 struct linux_dma_obj *obj; 1505 1506 priv = dev->dma_priv; 1507 1508 if (pctrie_is_empty(&priv->ptree)) 1509 return; 1510 1511 DMA_PRIV_LOCK(priv); 1512 obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr); 1513 if (obj == NULL) { 1514 DMA_PRIV_UNLOCK(priv); 1515 return; 1516 } 1517 LINUX_DMA_PCTRIE_REMOVE(&priv->ptree, dma_addr); 1518 bus_dmamap_unload(obj->dmat, obj->dmamap); 1519 bus_dmamap_destroy(obj->dmat, obj->dmamap); 1520 DMA_PRIV_UNLOCK(priv); 1521 1522 uma_zfree(linux_dma_obj_zone, obj); 1523 } 1524 #else 1525 void 1526 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len) 1527 { 1528 } 1529 #endif 1530 1531 void * 1532 linux_dma_alloc_coherent(struct device *dev, size_t size, 1533 dma_addr_t *dma_handle, gfp_t flag) 1534 { 1535 struct linux_dma_priv *priv; 1536 vm_paddr_t high; 1537 size_t align; 1538 void *mem; 1539 1540 if (dev == NULL || dev->dma_priv == NULL) { 1541 *dma_handle = 0; 1542 return (NULL); 1543 } 1544 priv = dev->dma_priv; 1545 if (priv->dma_coherent_mask) 1546 high = priv->dma_coherent_mask; 1547 else 1548 /* Coherent is lower 32bit only by default in Linux. */ 1549 high = BUS_SPACE_MAXADDR_32BIT; 1550 align = PAGE_SIZE << get_order(size); 1551 /* Always zero the allocation. */ 1552 flag |= M_ZERO; 1553 mem = kmem_alloc_contig(size, flag & GFP_NATIVE_MASK, 0, high, 1554 align, 0, VM_MEMATTR_DEFAULT); 1555 if (mem != NULL) { 1556 *dma_handle = linux_dma_map_phys_common(dev, vtophys(mem), size, 1557 priv->dmat_coherent); 1558 if (*dma_handle == 0) { 1559 kmem_free(mem, size); 1560 mem = NULL; 1561 } 1562 } else { 1563 *dma_handle = 0; 1564 } 1565 return (mem); 1566 } 1567 1568 struct lkpi_devres_dmam_coherent { 1569 size_t size; 1570 dma_addr_t *handle; 1571 void *mem; 1572 }; 1573 1574 static void 1575 lkpi_dmam_free_coherent(struct device *dev, void *p) 1576 { 1577 struct lkpi_devres_dmam_coherent *dr; 1578 1579 dr = p; 1580 dma_free_coherent(dev, dr->size, dr->mem, *dr->handle); 1581 } 1582 1583 void * 1584 linuxkpi_dmam_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 1585 gfp_t flag) 1586 { 1587 struct lkpi_devres_dmam_coherent *dr; 1588 1589 dr = lkpi_devres_alloc(lkpi_dmam_free_coherent, 1590 sizeof(*dr), GFP_KERNEL | __GFP_ZERO); 1591 1592 if (dr == NULL) 1593 return (NULL); 1594 1595 dr->size = size; 1596 dr->mem = linux_dma_alloc_coherent(dev, size, dma_handle, flag); 1597 dr->handle = dma_handle; 1598 if (dr->mem == NULL) { 1599 lkpi_devres_free(dr); 1600 return (NULL); 1601 } 1602 1603 lkpi_devres_add(dev, dr); 1604 return (dr->mem); 1605 } 1606 1607 void 1608 linuxkpi_dma_sync(struct device *dev, dma_addr_t dma_addr, size_t size, 1609 bus_dmasync_op_t op) 1610 { 1611 struct linux_dma_priv *priv; 1612 struct linux_dma_obj *obj; 1613 1614 priv = dev->dma_priv; 1615 1616 if (pctrie_is_empty(&priv->ptree)) 1617 return; 1618 1619 DMA_PRIV_LOCK(priv); 1620 obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr); 1621 if (obj == NULL) { 1622 DMA_PRIV_UNLOCK(priv); 1623 return; 1624 } 1625 1626 bus_dmamap_sync(obj->dmat, obj->dmamap, op); 1627 DMA_PRIV_UNLOCK(priv); 1628 } 1629 1630 int 1631 linux_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents, 1632 enum dma_data_direction direction, unsigned long attrs __unused) 1633 { 1634 struct linux_dma_priv *priv; 1635 struct scatterlist *sg; 1636 int i, nseg; 1637 bus_dma_segment_t seg; 1638 1639 priv = dev->dma_priv; 1640 1641 DMA_PRIV_LOCK(priv); 1642 1643 /* create common DMA map in the first S/G entry */ 1644 if (bus_dmamap_create(priv->dmat, 0, &sgl->dma_map) != 0) { 1645 DMA_PRIV_UNLOCK(priv); 1646 return (0); 1647 } 1648 1649 /* load all S/G list entries */ 1650 for_each_sg(sgl, sg, nents, i) { 1651 nseg = -1; 1652 if (_bus_dmamap_load_phys(priv->dmat, sgl->dma_map, 1653 sg_phys(sg), sg->length, BUS_DMA_NOWAIT, 1654 &seg, &nseg) != 0) { 1655 bus_dmamap_unload(priv->dmat, sgl->dma_map); 1656 bus_dmamap_destroy(priv->dmat, sgl->dma_map); 1657 DMA_PRIV_UNLOCK(priv); 1658 return (0); 1659 } 1660 KASSERT(nseg == 0, 1661 ("More than one segment (nseg=%d)", nseg + 1)); 1662 1663 sg_dma_address(sg) = seg.ds_addr; 1664 } 1665 1666 switch (direction) { 1667 case DMA_BIDIRECTIONAL: 1668 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREWRITE); 1669 break; 1670 case DMA_TO_DEVICE: 1671 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREREAD); 1672 break; 1673 case DMA_FROM_DEVICE: 1674 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREWRITE); 1675 break; 1676 default: 1677 break; 1678 } 1679 1680 DMA_PRIV_UNLOCK(priv); 1681 1682 return (nents); 1683 } 1684 1685 void 1686 linux_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, 1687 int nents __unused, enum dma_data_direction direction, 1688 unsigned long attrs __unused) 1689 { 1690 struct linux_dma_priv *priv; 1691 1692 priv = dev->dma_priv; 1693 1694 DMA_PRIV_LOCK(priv); 1695 1696 switch (direction) { 1697 case DMA_BIDIRECTIONAL: 1698 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTREAD); 1699 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREREAD); 1700 break; 1701 case DMA_TO_DEVICE: 1702 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTWRITE); 1703 break; 1704 case DMA_FROM_DEVICE: 1705 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTREAD); 1706 break; 1707 default: 1708 break; 1709 } 1710 1711 bus_dmamap_unload(priv->dmat, sgl->dma_map); 1712 bus_dmamap_destroy(priv->dmat, sgl->dma_map); 1713 DMA_PRIV_UNLOCK(priv); 1714 } 1715 1716 struct dma_pool { 1717 struct device *pool_device; 1718 uma_zone_t pool_zone; 1719 struct mtx pool_lock; 1720 bus_dma_tag_t pool_dmat; 1721 size_t pool_entry_size; 1722 struct pctrie pool_ptree; 1723 }; 1724 1725 #define DMA_POOL_LOCK(pool) mtx_lock(&(pool)->pool_lock) 1726 #define DMA_POOL_UNLOCK(pool) mtx_unlock(&(pool)->pool_lock) 1727 1728 static inline int 1729 dma_pool_obj_ctor(void *mem, int size, void *arg, int flags) 1730 { 1731 struct linux_dma_obj *obj = mem; 1732 struct dma_pool *pool = arg; 1733 int error, nseg; 1734 bus_dma_segment_t seg; 1735 1736 nseg = -1; 1737 DMA_POOL_LOCK(pool); 1738 error = _bus_dmamap_load_phys(pool->pool_dmat, obj->dmamap, 1739 vtophys(obj->vaddr), pool->pool_entry_size, BUS_DMA_NOWAIT, 1740 &seg, &nseg); 1741 DMA_POOL_UNLOCK(pool); 1742 if (error != 0) { 1743 return (error); 1744 } 1745 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); 1746 obj->dma_addr = seg.ds_addr; 1747 1748 return (0); 1749 } 1750 1751 static void 1752 dma_pool_obj_dtor(void *mem, int size, void *arg) 1753 { 1754 struct linux_dma_obj *obj = mem; 1755 struct dma_pool *pool = arg; 1756 1757 DMA_POOL_LOCK(pool); 1758 bus_dmamap_unload(pool->pool_dmat, obj->dmamap); 1759 DMA_POOL_UNLOCK(pool); 1760 } 1761 1762 static int 1763 dma_pool_obj_import(void *arg, void **store, int count, int domain __unused, 1764 int flags) 1765 { 1766 struct dma_pool *pool = arg; 1767 struct linux_dma_obj *obj; 1768 int error, i; 1769 1770 for (i = 0; i < count; i++) { 1771 obj = uma_zalloc(linux_dma_obj_zone, flags); 1772 if (obj == NULL) 1773 break; 1774 1775 error = bus_dmamem_alloc(pool->pool_dmat, &obj->vaddr, 1776 BUS_DMA_NOWAIT, &obj->dmamap); 1777 if (error!= 0) { 1778 uma_zfree(linux_dma_obj_zone, obj); 1779 break; 1780 } 1781 1782 store[i] = obj; 1783 } 1784 1785 return (i); 1786 } 1787 1788 static void 1789 dma_pool_obj_release(void *arg, void **store, int count) 1790 { 1791 struct dma_pool *pool = arg; 1792 struct linux_dma_obj *obj; 1793 int i; 1794 1795 for (i = 0; i < count; i++) { 1796 obj = store[i]; 1797 bus_dmamem_free(pool->pool_dmat, obj->vaddr, obj->dmamap); 1798 uma_zfree(linux_dma_obj_zone, obj); 1799 } 1800 } 1801 1802 struct dma_pool * 1803 linux_dma_pool_create(char *name, struct device *dev, size_t size, 1804 size_t align, size_t boundary) 1805 { 1806 struct linux_dma_priv *priv; 1807 struct dma_pool *pool; 1808 1809 priv = dev->dma_priv; 1810 1811 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 1812 pool->pool_device = dev; 1813 pool->pool_entry_size = size; 1814 1815 if (bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 1816 align, boundary, /* alignment, boundary */ 1817 priv->dma_mask, /* lowaddr */ 1818 BUS_SPACE_MAXADDR, /* highaddr */ 1819 NULL, NULL, /* filtfunc, filtfuncarg */ 1820 size, /* maxsize */ 1821 1, /* nsegments */ 1822 size, /* maxsegsz */ 1823 0, /* flags */ 1824 NULL, NULL, /* lockfunc, lockfuncarg */ 1825 &pool->pool_dmat)) { 1826 kfree(pool); 1827 return (NULL); 1828 } 1829 1830 pool->pool_zone = uma_zcache_create(name, -1, dma_pool_obj_ctor, 1831 dma_pool_obj_dtor, NULL, NULL, dma_pool_obj_import, 1832 dma_pool_obj_release, pool, 0); 1833 1834 mtx_init(&pool->pool_lock, "lkpi-dma-pool", NULL, MTX_DEF); 1835 pctrie_init(&pool->pool_ptree); 1836 1837 return (pool); 1838 } 1839 1840 void 1841 linux_dma_pool_destroy(struct dma_pool *pool) 1842 { 1843 1844 uma_zdestroy(pool->pool_zone); 1845 bus_dma_tag_destroy(pool->pool_dmat); 1846 mtx_destroy(&pool->pool_lock); 1847 kfree(pool); 1848 } 1849 1850 void 1851 lkpi_dmam_pool_destroy(struct device *dev, void *p) 1852 { 1853 struct dma_pool *pool; 1854 1855 pool = *(struct dma_pool **)p; 1856 LINUX_DMA_PCTRIE_RECLAIM(&pool->pool_ptree); 1857 linux_dma_pool_destroy(pool); 1858 } 1859 1860 void * 1861 linux_dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, 1862 dma_addr_t *handle) 1863 { 1864 struct linux_dma_obj *obj; 1865 1866 obj = uma_zalloc_arg(pool->pool_zone, pool, mem_flags & GFP_NATIVE_MASK); 1867 if (obj == NULL) 1868 return (NULL); 1869 1870 DMA_POOL_LOCK(pool); 1871 if (LINUX_DMA_PCTRIE_INSERT(&pool->pool_ptree, obj) != 0) { 1872 DMA_POOL_UNLOCK(pool); 1873 uma_zfree_arg(pool->pool_zone, obj, pool); 1874 return (NULL); 1875 } 1876 DMA_POOL_UNLOCK(pool); 1877 1878 *handle = obj->dma_addr; 1879 return (obj->vaddr); 1880 } 1881 1882 void 1883 linux_dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma_addr) 1884 { 1885 struct linux_dma_obj *obj; 1886 1887 DMA_POOL_LOCK(pool); 1888 obj = LINUX_DMA_PCTRIE_LOOKUP(&pool->pool_ptree, dma_addr); 1889 if (obj == NULL) { 1890 DMA_POOL_UNLOCK(pool); 1891 return; 1892 } 1893 LINUX_DMA_PCTRIE_REMOVE(&pool->pool_ptree, dma_addr); 1894 DMA_POOL_UNLOCK(pool); 1895 1896 uma_zfree_arg(pool->pool_zone, obj, pool); 1897 } 1898 1899 static int 1900 linux_backlight_get_status(device_t dev, struct backlight_props *props) 1901 { 1902 struct pci_dev *pdev; 1903 1904 linux_set_current(curthread); 1905 pdev = device_get_softc(dev); 1906 1907 props->brightness = pdev->dev.bd->props.brightness; 1908 props->brightness = props->brightness * 100 / pdev->dev.bd->props.max_brightness; 1909 props->nlevels = 0; 1910 1911 return (0); 1912 } 1913 1914 static int 1915 linux_backlight_get_info(device_t dev, struct backlight_info *info) 1916 { 1917 struct pci_dev *pdev; 1918 1919 linux_set_current(curthread); 1920 pdev = device_get_softc(dev); 1921 1922 info->type = BACKLIGHT_TYPE_PANEL; 1923 strlcpy(info->name, pdev->dev.bd->name, BACKLIGHTMAXNAMELENGTH); 1924 return (0); 1925 } 1926 1927 static int 1928 linux_backlight_update_status(device_t dev, struct backlight_props *props) 1929 { 1930 struct pci_dev *pdev; 1931 1932 linux_set_current(curthread); 1933 pdev = device_get_softc(dev); 1934 1935 pdev->dev.bd->props.brightness = pdev->dev.bd->props.max_brightness * 1936 props->brightness / 100; 1937 pdev->dev.bd->props.power = props->brightness == 0 ? 1938 4/* FB_BLANK_POWERDOWN */ : 0/* FB_BLANK_UNBLANK */; 1939 return (pdev->dev.bd->ops->update_status(pdev->dev.bd)); 1940 } 1941 1942 struct backlight_device * 1943 linux_backlight_device_register(const char *name, struct device *dev, 1944 void *data, const struct backlight_ops *ops, struct backlight_properties *props) 1945 { 1946 1947 dev->bd = malloc(sizeof(*dev->bd), M_DEVBUF, M_WAITOK | M_ZERO); 1948 dev->bd->ops = ops; 1949 dev->bd->props.type = props->type; 1950 dev->bd->props.max_brightness = props->max_brightness; 1951 dev->bd->props.brightness = props->brightness; 1952 dev->bd->props.power = props->power; 1953 dev->bd->data = data; 1954 dev->bd->dev = dev; 1955 dev->bd->name = strdup(name, M_DEVBUF); 1956 1957 dev->backlight_dev = backlight_register(name, dev->bsddev); 1958 1959 return (dev->bd); 1960 } 1961 1962 void 1963 linux_backlight_device_unregister(struct backlight_device *bd) 1964 { 1965 1966 backlight_destroy(bd->dev->backlight_dev); 1967 free(bd->name, M_DEVBUF); 1968 free(bd, M_DEVBUF); 1969 } 1970