1 /*- 2 * Copyright (c) 2015-2016 Mellanox Technologies, Ltd. 3 * All rights reserved. 4 * Copyright (c) 2020-2022 The FreeBSD Foundation 5 * 6 * Portions of this software were developed by Björn Zeeb 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice unmodified, this list of conditions, and the following 14 * disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/bus.h> 34 #include <sys/malloc.h> 35 #include <sys/kernel.h> 36 #include <sys/sysctl.h> 37 #include <sys/lock.h> 38 #include <sys/mutex.h> 39 #include <sys/fcntl.h> 40 #include <sys/file.h> 41 #include <sys/filio.h> 42 #include <sys/pciio.h> 43 #include <sys/pctrie.h> 44 #include <sys/rman.h> 45 #include <sys/rwlock.h> 46 47 #include <vm/vm.h> 48 #include <vm/pmap.h> 49 50 #include <machine/bus.h> 51 #include <machine/resource.h> 52 #include <machine/stdarg.h> 53 54 #include <dev/pci/pcivar.h> 55 #include <dev/pci/pci_private.h> 56 #include <dev/pci/pci_iov.h> 57 #include <dev/backlight/backlight.h> 58 59 #include <linux/kernel.h> 60 #include <linux/kobject.h> 61 #include <linux/device.h> 62 #include <linux/slab.h> 63 #include <linux/module.h> 64 #include <linux/cdev.h> 65 #include <linux/file.h> 66 #include <linux/sysfs.h> 67 #include <linux/mm.h> 68 #include <linux/io.h> 69 #include <linux/vmalloc.h> 70 #include <linux/pci.h> 71 #include <linux/compat.h> 72 73 #include <linux/backlight.h> 74 75 #include "backlight_if.h" 76 #include "pcib_if.h" 77 78 /* Undef the linux function macro defined in linux/pci.h */ 79 #undef pci_get_class 80 81 extern int linuxkpi_debug; 82 83 SYSCTL_DECL(_compat_linuxkpi); 84 85 static counter_u64_t lkpi_pci_nseg1_fail; 86 SYSCTL_COUNTER_U64(_compat_linuxkpi, OID_AUTO, lkpi_pci_nseg1_fail, CTLFLAG_RD, 87 &lkpi_pci_nseg1_fail, "Count of busdma mapping failures of single-segment"); 88 89 static device_probe_t linux_pci_probe; 90 static device_attach_t linux_pci_attach; 91 static device_detach_t linux_pci_detach; 92 static device_suspend_t linux_pci_suspend; 93 static device_resume_t linux_pci_resume; 94 static device_shutdown_t linux_pci_shutdown; 95 static pci_iov_init_t linux_pci_iov_init; 96 static pci_iov_uninit_t linux_pci_iov_uninit; 97 static pci_iov_add_vf_t linux_pci_iov_add_vf; 98 static int linux_backlight_get_status(device_t dev, struct backlight_props *props); 99 static int linux_backlight_update_status(device_t dev, struct backlight_props *props); 100 static int linux_backlight_get_info(device_t dev, struct backlight_info *info); 101 static void lkpi_pcim_iomap_table_release(struct device *, void *); 102 103 static device_method_t pci_methods[] = { 104 DEVMETHOD(device_probe, linux_pci_probe), 105 DEVMETHOD(device_attach, linux_pci_attach), 106 DEVMETHOD(device_detach, linux_pci_detach), 107 DEVMETHOD(device_suspend, linux_pci_suspend), 108 DEVMETHOD(device_resume, linux_pci_resume), 109 DEVMETHOD(device_shutdown, linux_pci_shutdown), 110 DEVMETHOD(pci_iov_init, linux_pci_iov_init), 111 DEVMETHOD(pci_iov_uninit, linux_pci_iov_uninit), 112 DEVMETHOD(pci_iov_add_vf, linux_pci_iov_add_vf), 113 114 /* backlight interface */ 115 DEVMETHOD(backlight_update_status, linux_backlight_update_status), 116 DEVMETHOD(backlight_get_status, linux_backlight_get_status), 117 DEVMETHOD(backlight_get_info, linux_backlight_get_info), 118 DEVMETHOD_END 119 }; 120 121 const char *pci_power_names[] = { 122 "UNKNOWN", "D0", "D1", "D2", "D3hot", "D3cold" 123 }; 124 125 /* We need some meta-struct to keep track of these for devres. */ 126 struct pci_devres { 127 bool enable_io; 128 /* PCIR_MAX_BAR_0 + 1 = 6 => BIT(0..5). */ 129 uint8_t region_mask; 130 struct resource *region_table[PCIR_MAX_BAR_0 + 1]; /* Not needed. */ 131 }; 132 struct pcim_iomap_devres { 133 void *mmio_table[PCIR_MAX_BAR_0 + 1]; 134 struct resource *res_table[PCIR_MAX_BAR_0 + 1]; 135 }; 136 137 struct linux_dma_priv { 138 uint64_t dma_mask; 139 bus_dma_tag_t dmat; 140 uint64_t dma_coherent_mask; 141 bus_dma_tag_t dmat_coherent; 142 struct mtx lock; 143 struct pctrie ptree; 144 }; 145 #define DMA_PRIV_LOCK(priv) mtx_lock(&(priv)->lock) 146 #define DMA_PRIV_UNLOCK(priv) mtx_unlock(&(priv)->lock) 147 148 static int 149 linux_pdev_dma_uninit(struct pci_dev *pdev) 150 { 151 struct linux_dma_priv *priv; 152 153 priv = pdev->dev.dma_priv; 154 if (priv->dmat) 155 bus_dma_tag_destroy(priv->dmat); 156 if (priv->dmat_coherent) 157 bus_dma_tag_destroy(priv->dmat_coherent); 158 mtx_destroy(&priv->lock); 159 pdev->dev.dma_priv = NULL; 160 free(priv, M_DEVBUF); 161 return (0); 162 } 163 164 static int 165 linux_pdev_dma_init(struct pci_dev *pdev) 166 { 167 struct linux_dma_priv *priv; 168 int error; 169 170 priv = malloc(sizeof(*priv), M_DEVBUF, M_WAITOK | M_ZERO); 171 172 mtx_init(&priv->lock, "lkpi-priv-dma", NULL, MTX_DEF); 173 pctrie_init(&priv->ptree); 174 175 pdev->dev.dma_priv = priv; 176 177 /* Create a default DMA tags. */ 178 error = linux_dma_tag_init(&pdev->dev, DMA_BIT_MASK(64)); 179 if (error != 0) 180 goto err; 181 /* Coherent is lower 32bit only by default in Linux. */ 182 error = linux_dma_tag_init_coherent(&pdev->dev, DMA_BIT_MASK(32)); 183 if (error != 0) 184 goto err; 185 186 return (error); 187 188 err: 189 linux_pdev_dma_uninit(pdev); 190 return (error); 191 } 192 193 int 194 linux_dma_tag_init(struct device *dev, u64 dma_mask) 195 { 196 struct linux_dma_priv *priv; 197 int error; 198 199 priv = dev->dma_priv; 200 201 if (priv->dmat) { 202 if (priv->dma_mask == dma_mask) 203 return (0); 204 205 bus_dma_tag_destroy(priv->dmat); 206 } 207 208 priv->dma_mask = dma_mask; 209 210 error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 211 1, 0, /* alignment, boundary */ 212 dma_mask, /* lowaddr */ 213 BUS_SPACE_MAXADDR, /* highaddr */ 214 NULL, NULL, /* filtfunc, filtfuncarg */ 215 BUS_SPACE_MAXSIZE, /* maxsize */ 216 1, /* nsegments */ 217 BUS_SPACE_MAXSIZE, /* maxsegsz */ 218 0, /* flags */ 219 NULL, NULL, /* lockfunc, lockfuncarg */ 220 &priv->dmat); 221 return (-error); 222 } 223 224 int 225 linux_dma_tag_init_coherent(struct device *dev, u64 dma_mask) 226 { 227 struct linux_dma_priv *priv; 228 int error; 229 230 priv = dev->dma_priv; 231 232 if (priv->dmat_coherent) { 233 if (priv->dma_coherent_mask == dma_mask) 234 return (0); 235 236 bus_dma_tag_destroy(priv->dmat_coherent); 237 } 238 239 priv->dma_coherent_mask = dma_mask; 240 241 error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 242 1, 0, /* alignment, boundary */ 243 dma_mask, /* lowaddr */ 244 BUS_SPACE_MAXADDR, /* highaddr */ 245 NULL, NULL, /* filtfunc, filtfuncarg */ 246 BUS_SPACE_MAXSIZE, /* maxsize */ 247 1, /* nsegments */ 248 BUS_SPACE_MAXSIZE, /* maxsegsz */ 249 0, /* flags */ 250 NULL, NULL, /* lockfunc, lockfuncarg */ 251 &priv->dmat_coherent); 252 return (-error); 253 } 254 255 static struct pci_driver * 256 linux_pci_find(device_t dev, const struct pci_device_id **idp) 257 { 258 const struct pci_device_id *id; 259 struct pci_driver *pdrv; 260 uint16_t vendor; 261 uint16_t device; 262 uint16_t subvendor; 263 uint16_t subdevice; 264 265 vendor = pci_get_vendor(dev); 266 device = pci_get_device(dev); 267 subvendor = pci_get_subvendor(dev); 268 subdevice = pci_get_subdevice(dev); 269 270 spin_lock(&pci_lock); 271 list_for_each_entry(pdrv, &pci_drivers, node) { 272 for (id = pdrv->id_table; id->vendor != 0; id++) { 273 if (vendor == id->vendor && 274 (PCI_ANY_ID == id->device || device == id->device) && 275 (PCI_ANY_ID == id->subvendor || subvendor == id->subvendor) && 276 (PCI_ANY_ID == id->subdevice || subdevice == id->subdevice)) { 277 *idp = id; 278 spin_unlock(&pci_lock); 279 return (pdrv); 280 } 281 } 282 } 283 spin_unlock(&pci_lock); 284 return (NULL); 285 } 286 287 struct pci_dev * 288 lkpi_pci_get_device(uint16_t vendor, uint16_t device, struct pci_dev *odev) 289 { 290 struct pci_dev *pdev; 291 292 KASSERT(odev == NULL, ("%s: odev argument not yet supported\n", __func__)); 293 294 spin_lock(&pci_lock); 295 list_for_each_entry(pdev, &pci_devices, links) { 296 if (pdev->vendor == vendor && pdev->device == device) 297 break; 298 } 299 spin_unlock(&pci_lock); 300 301 return (pdev); 302 } 303 304 static void 305 lkpi_pci_dev_release(struct device *dev) 306 { 307 308 lkpi_devres_release_free_list(dev); 309 spin_lock_destroy(&dev->devres_lock); 310 } 311 312 static void 313 lkpifill_pci_dev(device_t dev, struct pci_dev *pdev) 314 { 315 316 pdev->devfn = PCI_DEVFN(pci_get_slot(dev), pci_get_function(dev)); 317 pdev->vendor = pci_get_vendor(dev); 318 pdev->device = pci_get_device(dev); 319 pdev->subsystem_vendor = pci_get_subvendor(dev); 320 pdev->subsystem_device = pci_get_subdevice(dev); 321 pdev->class = pci_get_class(dev); 322 pdev->revision = pci_get_revid(dev); 323 pdev->path_name = kasprintf(GFP_KERNEL, "%04d:%02d:%02d.%d", 324 pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev), 325 pci_get_function(dev)); 326 pdev->bus = malloc(sizeof(*pdev->bus), M_DEVBUF, M_WAITOK | M_ZERO); 327 /* 328 * This should be the upstream bridge; pci_upstream_bridge() 329 * handles that case on demand as otherwise we'll shadow the 330 * entire PCI hierarchy. 331 */ 332 pdev->bus->self = pdev; 333 pdev->bus->number = pci_get_bus(dev); 334 pdev->bus->domain = pci_get_domain(dev); 335 pdev->dev.bsddev = dev; 336 pdev->dev.parent = &linux_root_device; 337 pdev->dev.release = lkpi_pci_dev_release; 338 INIT_LIST_HEAD(&pdev->dev.irqents); 339 340 if (pci_msi_count(dev) > 0) 341 pdev->msi_desc = malloc(pci_msi_count(dev) * 342 sizeof(*pdev->msi_desc), M_DEVBUF, M_WAITOK | M_ZERO); 343 344 kobject_init(&pdev->dev.kobj, &linux_dev_ktype); 345 kobject_set_name(&pdev->dev.kobj, device_get_nameunit(dev)); 346 kobject_add(&pdev->dev.kobj, &linux_root_device.kobj, 347 kobject_name(&pdev->dev.kobj)); 348 spin_lock_init(&pdev->dev.devres_lock); 349 INIT_LIST_HEAD(&pdev->dev.devres_head); 350 } 351 352 static void 353 lkpinew_pci_dev_release(struct device *dev) 354 { 355 struct pci_dev *pdev; 356 int i; 357 358 pdev = to_pci_dev(dev); 359 if (pdev->root != NULL) 360 pci_dev_put(pdev->root); 361 if (pdev->bus->self != pdev) 362 pci_dev_put(pdev->bus->self); 363 free(pdev->bus, M_DEVBUF); 364 if (pdev->msi_desc != NULL) { 365 for (i = pci_msi_count(pdev->dev.bsddev) - 1; i >= 0; i--) 366 free(pdev->msi_desc[i], M_DEVBUF); 367 free(pdev->msi_desc, M_DEVBUF); 368 } 369 kfree(pdev->path_name); 370 free(pdev, M_DEVBUF); 371 } 372 373 struct pci_dev * 374 lkpinew_pci_dev(device_t dev) 375 { 376 struct pci_dev *pdev; 377 378 pdev = malloc(sizeof(*pdev), M_DEVBUF, M_WAITOK|M_ZERO); 379 lkpifill_pci_dev(dev, pdev); 380 pdev->dev.release = lkpinew_pci_dev_release; 381 382 return (pdev); 383 } 384 385 struct pci_dev * 386 lkpi_pci_get_class(unsigned int class, struct pci_dev *from) 387 { 388 device_t dev; 389 device_t devfrom = NULL; 390 struct pci_dev *pdev; 391 392 if (from != NULL) 393 devfrom = from->dev.bsddev; 394 395 dev = pci_find_class_from(class >> 16, (class >> 8) & 0xFF, devfrom); 396 if (dev == NULL) 397 return (NULL); 398 399 pdev = lkpinew_pci_dev(dev); 400 return (pdev); 401 } 402 403 struct pci_dev * 404 lkpi_pci_get_base_class(unsigned int baseclass, struct pci_dev *from) 405 { 406 device_t dev; 407 device_t devfrom = NULL; 408 struct pci_dev *pdev; 409 410 if (from != NULL) 411 devfrom = from->dev.bsddev; 412 413 dev = pci_find_base_class_from(baseclass, devfrom); 414 if (dev == NULL) 415 return (NULL); 416 417 pdev = lkpinew_pci_dev(dev); 418 return (pdev); 419 } 420 421 struct pci_dev * 422 lkpi_pci_get_domain_bus_and_slot(int domain, unsigned int bus, 423 unsigned int devfn) 424 { 425 device_t dev; 426 struct pci_dev *pdev; 427 428 dev = pci_find_dbsf(domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); 429 if (dev == NULL) 430 return (NULL); 431 432 pdev = lkpinew_pci_dev(dev); 433 return (pdev); 434 } 435 436 static int 437 linux_pci_probe(device_t dev) 438 { 439 const struct pci_device_id *id; 440 struct pci_driver *pdrv; 441 442 if ((pdrv = linux_pci_find(dev, &id)) == NULL) 443 return (ENXIO); 444 if (device_get_driver(dev) != &pdrv->bsddriver) 445 return (ENXIO); 446 device_set_desc(dev, pdrv->name); 447 448 /* Assume BSS initialized (should never return BUS_PROBE_SPECIFIC). */ 449 if (pdrv->bsd_probe_return == 0) 450 return (BUS_PROBE_DEFAULT); 451 else 452 return (pdrv->bsd_probe_return); 453 } 454 455 static int 456 linux_pci_attach(device_t dev) 457 { 458 const struct pci_device_id *id; 459 struct pci_driver *pdrv; 460 struct pci_dev *pdev; 461 462 pdrv = linux_pci_find(dev, &id); 463 pdev = device_get_softc(dev); 464 465 MPASS(pdrv != NULL); 466 MPASS(pdev != NULL); 467 468 return (linux_pci_attach_device(dev, pdrv, id, pdev)); 469 } 470 471 static struct resource_list_entry * 472 linux_pci_reserve_bar(struct pci_dev *pdev, struct resource_list *rl, 473 int type, int rid) 474 { 475 device_t dev; 476 struct resource *res; 477 478 KASSERT(type == SYS_RES_IOPORT || type == SYS_RES_MEMORY, 479 ("trying to reserve non-BAR type %d", type)); 480 481 dev = pdev->pdrv != NULL && pdev->pdrv->isdrm ? 482 device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev; 483 res = pci_reserve_map(device_get_parent(dev), dev, type, &rid, 0, ~0, 484 1, 1, 0); 485 if (res == NULL) 486 return (NULL); 487 return (resource_list_find(rl, type, rid)); 488 } 489 490 static struct resource_list_entry * 491 linux_pci_get_rle(struct pci_dev *pdev, int type, int rid, bool reserve_bar) 492 { 493 struct pci_devinfo *dinfo; 494 struct resource_list *rl; 495 struct resource_list_entry *rle; 496 497 dinfo = device_get_ivars(pdev->dev.bsddev); 498 rl = &dinfo->resources; 499 rle = resource_list_find(rl, type, rid); 500 /* Reserve resources for this BAR if needed. */ 501 if (rle == NULL && reserve_bar) 502 rle = linux_pci_reserve_bar(pdev, rl, type, rid); 503 return (rle); 504 } 505 506 int 507 linux_pci_attach_device(device_t dev, struct pci_driver *pdrv, 508 const struct pci_device_id *id, struct pci_dev *pdev) 509 { 510 struct resource_list_entry *rle; 511 device_t parent; 512 uintptr_t rid; 513 int error; 514 bool isdrm; 515 516 linux_set_current(curthread); 517 518 parent = device_get_parent(dev); 519 isdrm = pdrv != NULL && pdrv->isdrm; 520 521 if (isdrm) { 522 struct pci_devinfo *dinfo; 523 524 dinfo = device_get_ivars(parent); 525 device_set_ivars(dev, dinfo); 526 } 527 528 lkpifill_pci_dev(dev, pdev); 529 if (isdrm) 530 PCI_GET_ID(device_get_parent(parent), parent, PCI_ID_RID, &rid); 531 else 532 PCI_GET_ID(parent, dev, PCI_ID_RID, &rid); 533 pdev->devfn = rid; 534 pdev->pdrv = pdrv; 535 rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 0, false); 536 if (rle != NULL) 537 pdev->dev.irq = rle->start; 538 else 539 pdev->dev.irq = LINUX_IRQ_INVALID; 540 pdev->irq = pdev->dev.irq; 541 error = linux_pdev_dma_init(pdev); 542 if (error) 543 goto out_dma_init; 544 545 TAILQ_INIT(&pdev->mmio); 546 spin_lock_init(&pdev->pcie_cap_lock); 547 548 spin_lock(&pci_lock); 549 list_add(&pdev->links, &pci_devices); 550 spin_unlock(&pci_lock); 551 552 if (pdrv != NULL) { 553 error = pdrv->probe(pdev, id); 554 if (error) 555 goto out_probe; 556 } 557 return (0); 558 559 out_probe: 560 free(pdev->bus, M_DEVBUF); 561 spin_lock_destroy(&pdev->pcie_cap_lock); 562 linux_pdev_dma_uninit(pdev); 563 out_dma_init: 564 spin_lock(&pci_lock); 565 list_del(&pdev->links); 566 spin_unlock(&pci_lock); 567 put_device(&pdev->dev); 568 return (-error); 569 } 570 571 static int 572 linux_pci_detach(device_t dev) 573 { 574 struct pci_dev *pdev; 575 576 pdev = device_get_softc(dev); 577 578 MPASS(pdev != NULL); 579 580 device_set_desc(dev, NULL); 581 582 return (linux_pci_detach_device(pdev)); 583 } 584 585 int 586 linux_pci_detach_device(struct pci_dev *pdev) 587 { 588 589 linux_set_current(curthread); 590 591 if (pdev->pdrv != NULL) 592 pdev->pdrv->remove(pdev); 593 594 if (pdev->root != NULL) 595 pci_dev_put(pdev->root); 596 free(pdev->bus, M_DEVBUF); 597 linux_pdev_dma_uninit(pdev); 598 599 spin_lock(&pci_lock); 600 list_del(&pdev->links); 601 spin_unlock(&pci_lock); 602 spin_lock_destroy(&pdev->pcie_cap_lock); 603 put_device(&pdev->dev); 604 605 return (0); 606 } 607 608 static int 609 lkpi_pci_disable_dev(struct device *dev) 610 { 611 612 (void) pci_disable_io(dev->bsddev, SYS_RES_MEMORY); 613 (void) pci_disable_io(dev->bsddev, SYS_RES_IOPORT); 614 return (0); 615 } 616 617 static struct pci_devres * 618 lkpi_pci_devres_get_alloc(struct pci_dev *pdev) 619 { 620 struct pci_devres *dr; 621 622 dr = lkpi_devres_find(&pdev->dev, lkpi_pci_devres_release, NULL, NULL); 623 if (dr == NULL) { 624 dr = lkpi_devres_alloc(lkpi_pci_devres_release, sizeof(*dr), 625 GFP_KERNEL | __GFP_ZERO); 626 if (dr != NULL) 627 lkpi_devres_add(&pdev->dev, dr); 628 } 629 630 return (dr); 631 } 632 633 static struct pci_devres * 634 lkpi_pci_devres_find(struct pci_dev *pdev) 635 { 636 if (!pdev->managed) 637 return (NULL); 638 639 return (lkpi_pci_devres_get_alloc(pdev)); 640 } 641 642 void 643 lkpi_pci_devres_release(struct device *dev, void *p) 644 { 645 struct pci_devres *dr; 646 struct pci_dev *pdev; 647 int bar; 648 649 pdev = to_pci_dev(dev); 650 dr = p; 651 652 if (pdev->msix_enabled) 653 lkpi_pci_disable_msix(pdev); 654 if (pdev->msi_enabled) 655 lkpi_pci_disable_msi(pdev); 656 657 if (dr->enable_io && lkpi_pci_disable_dev(dev) == 0) 658 dr->enable_io = false; 659 660 if (dr->region_mask == 0) 661 return; 662 for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) { 663 664 if ((dr->region_mask & (1 << bar)) == 0) 665 continue; 666 pci_release_region(pdev, bar); 667 } 668 } 669 670 int 671 linuxkpi_pcim_enable_device(struct pci_dev *pdev) 672 { 673 struct pci_devres *dr; 674 int error; 675 676 /* Here we cannot run through the pdev->managed check. */ 677 dr = lkpi_pci_devres_get_alloc(pdev); 678 if (dr == NULL) 679 return (-ENOMEM); 680 681 /* If resources were enabled before do not do it again. */ 682 if (dr->enable_io) 683 return (0); 684 685 error = pci_enable_device(pdev); 686 if (error == 0) 687 dr->enable_io = true; 688 689 /* This device is not managed. */ 690 pdev->managed = true; 691 692 return (error); 693 } 694 695 static struct pcim_iomap_devres * 696 lkpi_pcim_iomap_devres_find(struct pci_dev *pdev) 697 { 698 struct pcim_iomap_devres *dr; 699 700 dr = lkpi_devres_find(&pdev->dev, lkpi_pcim_iomap_table_release, 701 NULL, NULL); 702 if (dr == NULL) { 703 dr = lkpi_devres_alloc(lkpi_pcim_iomap_table_release, 704 sizeof(*dr), GFP_KERNEL | __GFP_ZERO); 705 if (dr != NULL) 706 lkpi_devres_add(&pdev->dev, dr); 707 } 708 709 if (dr == NULL) 710 device_printf(pdev->dev.bsddev, "%s: NULL\n", __func__); 711 712 return (dr); 713 } 714 715 void __iomem ** 716 linuxkpi_pcim_iomap_table(struct pci_dev *pdev) 717 { 718 struct pcim_iomap_devres *dr; 719 720 dr = lkpi_pcim_iomap_devres_find(pdev); 721 if (dr == NULL) 722 return (NULL); 723 724 /* 725 * If the driver has manually set a flag to be able to request the 726 * resource to use bus_read/write_<n>, return the shadow table. 727 */ 728 if (pdev->want_iomap_res) 729 return ((void **)dr->res_table); 730 731 /* This is the Linux default. */ 732 return (dr->mmio_table); 733 } 734 735 static struct resource * 736 _lkpi_pci_iomap(struct pci_dev *pdev, int bar, int mmio_size __unused) 737 { 738 struct pci_mmio_region *mmio, *p; 739 int type; 740 741 type = pci_resource_type(pdev, bar); 742 if (type < 0) { 743 device_printf(pdev->dev.bsddev, "%s: bar %d type %d\n", 744 __func__, bar, type); 745 return (NULL); 746 } 747 748 /* 749 * Check for duplicate mappings. 750 * This can happen if a driver calls pci_request_region() first. 751 */ 752 TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) { 753 if (mmio->type == type && mmio->rid == PCIR_BAR(bar)) { 754 return (mmio->res); 755 } 756 } 757 758 mmio = malloc(sizeof(*mmio), M_DEVBUF, M_WAITOK | M_ZERO); 759 mmio->rid = PCIR_BAR(bar); 760 mmio->type = type; 761 mmio->res = bus_alloc_resource_any(pdev->dev.bsddev, mmio->type, 762 &mmio->rid, RF_ACTIVE|RF_SHAREABLE); 763 if (mmio->res == NULL) { 764 device_printf(pdev->dev.bsddev, "%s: failed to alloc " 765 "bar %d type %d rid %d\n", 766 __func__, bar, type, PCIR_BAR(bar)); 767 free(mmio, M_DEVBUF); 768 return (NULL); 769 } 770 TAILQ_INSERT_TAIL(&pdev->mmio, mmio, next); 771 772 return (mmio->res); 773 } 774 775 void * 776 linuxkpi_pci_iomap_range(struct pci_dev *pdev, int mmio_bar, 777 unsigned long mmio_off, unsigned long mmio_size) 778 { 779 struct resource *res; 780 781 res = _lkpi_pci_iomap(pdev, mmio_bar, mmio_size); 782 if (res == NULL) 783 return (NULL); 784 /* This is a FreeBSD extension so we can use bus_*(). */ 785 if (pdev->want_iomap_res) 786 return (res); 787 MPASS(mmio_off < rman_get_size(res)); 788 return ((void *)(rman_get_bushandle(res) + mmio_off)); 789 } 790 791 void * 792 linuxkpi_pci_iomap(struct pci_dev *pdev, int mmio_bar, int mmio_size) 793 { 794 return (linuxkpi_pci_iomap_range(pdev, mmio_bar, 0, mmio_size)); 795 } 796 797 void 798 linuxkpi_pci_iounmap(struct pci_dev *pdev, void *res) 799 { 800 struct pci_mmio_region *mmio, *p; 801 bus_space_handle_t bh = (bus_space_handle_t)res; 802 803 TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) { 804 if (pdev->want_iomap_res) { 805 if (res != mmio->res) 806 continue; 807 } else { 808 if (bh < rman_get_bushandle(mmio->res) || 809 bh >= rman_get_bushandle(mmio->res) + 810 rman_get_size(mmio->res)) 811 continue; 812 } 813 bus_release_resource(pdev->dev.bsddev, 814 mmio->type, mmio->rid, mmio->res); 815 TAILQ_REMOVE(&pdev->mmio, mmio, next); 816 free(mmio, M_DEVBUF); 817 return; 818 } 819 } 820 821 int 822 linuxkpi_pcim_iomap_regions(struct pci_dev *pdev, uint32_t mask, const char *name) 823 { 824 struct pcim_iomap_devres *dr; 825 void *res; 826 uint32_t mappings; 827 int bar; 828 829 dr = lkpi_pcim_iomap_devres_find(pdev); 830 if (dr == NULL) 831 return (-ENOMEM); 832 833 /* Now iomap all the requested (by "mask") ones. */ 834 for (bar = mappings = 0; mappings != mask; bar++) { 835 if ((mask & (1 << bar)) == 0) 836 continue; 837 838 /* Request double is not allowed. */ 839 if (dr->mmio_table[bar] != NULL) { 840 device_printf(pdev->dev.bsddev, "%s: bar %d %p\n", 841 __func__, bar, dr->mmio_table[bar]); 842 goto err; 843 } 844 845 res = _lkpi_pci_iomap(pdev, bar, 0); 846 if (res == NULL) 847 goto err; 848 dr->mmio_table[bar] = (void *)rman_get_bushandle(res); 849 dr->res_table[bar] = res; 850 851 mappings |= (1 << bar); 852 } 853 854 return (0); 855 err: 856 for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) { 857 if ((mappings & (1 << bar)) != 0) { 858 res = dr->mmio_table[bar]; 859 if (res == NULL) 860 continue; 861 pci_iounmap(pdev, res); 862 } 863 } 864 865 return (-EINVAL); 866 } 867 868 static void 869 lkpi_pcim_iomap_table_release(struct device *dev, void *p) 870 { 871 struct pcim_iomap_devres *dr; 872 struct pci_dev *pdev; 873 int bar; 874 875 dr = p; 876 pdev = to_pci_dev(dev); 877 for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) { 878 879 if (dr->mmio_table[bar] == NULL) 880 continue; 881 882 pci_iounmap(pdev, dr->mmio_table[bar]); 883 } 884 } 885 886 static int 887 linux_pci_suspend(device_t dev) 888 { 889 const struct dev_pm_ops *pmops; 890 struct pm_message pm = { }; 891 struct pci_dev *pdev; 892 int error; 893 894 error = 0; 895 linux_set_current(curthread); 896 pdev = device_get_softc(dev); 897 pmops = pdev->pdrv->driver.pm; 898 899 if (pdev->pdrv->suspend != NULL) 900 error = -pdev->pdrv->suspend(pdev, pm); 901 else if (pmops != NULL && pmops->suspend != NULL) { 902 error = -pmops->suspend(&pdev->dev); 903 if (error == 0 && pmops->suspend_late != NULL) 904 error = -pmops->suspend_late(&pdev->dev); 905 if (error == 0 && pmops->suspend_noirq != NULL) 906 error = -pmops->suspend_noirq(&pdev->dev); 907 } 908 return (error); 909 } 910 911 static int 912 linux_pci_resume(device_t dev) 913 { 914 const struct dev_pm_ops *pmops; 915 struct pci_dev *pdev; 916 int error; 917 918 error = 0; 919 linux_set_current(curthread); 920 pdev = device_get_softc(dev); 921 pmops = pdev->pdrv->driver.pm; 922 923 if (pdev->pdrv->resume != NULL) 924 error = -pdev->pdrv->resume(pdev); 925 else if (pmops != NULL && pmops->resume != NULL) { 926 if (pmops->resume_early != NULL) 927 error = -pmops->resume_early(&pdev->dev); 928 if (error == 0 && pmops->resume != NULL) 929 error = -pmops->resume(&pdev->dev); 930 } 931 return (error); 932 } 933 934 static int 935 linux_pci_shutdown(device_t dev) 936 { 937 struct pci_dev *pdev; 938 939 linux_set_current(curthread); 940 pdev = device_get_softc(dev); 941 if (pdev->pdrv->shutdown != NULL) 942 pdev->pdrv->shutdown(pdev); 943 return (0); 944 } 945 946 static int 947 linux_pci_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *pf_config) 948 { 949 struct pci_dev *pdev; 950 int error; 951 952 linux_set_current(curthread); 953 pdev = device_get_softc(dev); 954 if (pdev->pdrv->bsd_iov_init != NULL) 955 error = pdev->pdrv->bsd_iov_init(dev, num_vfs, pf_config); 956 else 957 error = EINVAL; 958 return (error); 959 } 960 961 static void 962 linux_pci_iov_uninit(device_t dev) 963 { 964 struct pci_dev *pdev; 965 966 linux_set_current(curthread); 967 pdev = device_get_softc(dev); 968 if (pdev->pdrv->bsd_iov_uninit != NULL) 969 pdev->pdrv->bsd_iov_uninit(dev); 970 } 971 972 static int 973 linux_pci_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *vf_config) 974 { 975 struct pci_dev *pdev; 976 int error; 977 978 linux_set_current(curthread); 979 pdev = device_get_softc(dev); 980 if (pdev->pdrv->bsd_iov_add_vf != NULL) 981 error = pdev->pdrv->bsd_iov_add_vf(dev, vfnum, vf_config); 982 else 983 error = EINVAL; 984 return (error); 985 } 986 987 static int 988 _linux_pci_register_driver(struct pci_driver *pdrv, devclass_t dc) 989 { 990 int error; 991 992 linux_set_current(curthread); 993 spin_lock(&pci_lock); 994 list_add(&pdrv->node, &pci_drivers); 995 spin_unlock(&pci_lock); 996 if (pdrv->bsddriver.name == NULL) 997 pdrv->bsddriver.name = pdrv->name; 998 pdrv->bsddriver.methods = pci_methods; 999 pdrv->bsddriver.size = sizeof(struct pci_dev); 1000 1001 bus_topo_lock(); 1002 error = devclass_add_driver(dc, &pdrv->bsddriver, 1003 BUS_PASS_DEFAULT, &pdrv->bsdclass); 1004 bus_topo_unlock(); 1005 return (-error); 1006 } 1007 1008 int 1009 linux_pci_register_driver(struct pci_driver *pdrv) 1010 { 1011 devclass_t dc; 1012 1013 pdrv->isdrm = strcmp(pdrv->name, "drmn") == 0; 1014 dc = pdrv->isdrm ? devclass_create("vgapci") : devclass_find("pci"); 1015 if (dc == NULL) 1016 return (-ENXIO); 1017 return (_linux_pci_register_driver(pdrv, dc)); 1018 } 1019 1020 static struct resource_list_entry * 1021 lkpi_pci_get_bar(struct pci_dev *pdev, int bar, bool reserve) 1022 { 1023 int type; 1024 1025 type = pci_resource_type(pdev, bar); 1026 if (type < 0) 1027 return (NULL); 1028 bar = PCIR_BAR(bar); 1029 return (linux_pci_get_rle(pdev, type, bar, reserve)); 1030 } 1031 1032 struct device * 1033 lkpi_pci_find_irq_dev(unsigned int irq) 1034 { 1035 struct pci_dev *pdev; 1036 struct device *found; 1037 1038 found = NULL; 1039 spin_lock(&pci_lock); 1040 list_for_each_entry(pdev, &pci_devices, links) { 1041 if (irq == pdev->dev.irq || 1042 (irq >= pdev->dev.irq_start && irq < pdev->dev.irq_end)) { 1043 found = &pdev->dev; 1044 break; 1045 } 1046 } 1047 spin_unlock(&pci_lock); 1048 return (found); 1049 } 1050 1051 unsigned long 1052 pci_resource_start(struct pci_dev *pdev, int bar) 1053 { 1054 struct resource_list_entry *rle; 1055 rman_res_t newstart; 1056 device_t dev; 1057 int error; 1058 1059 if ((rle = lkpi_pci_get_bar(pdev, bar, true)) == NULL) 1060 return (0); 1061 dev = pdev->pdrv != NULL && pdev->pdrv->isdrm ? 1062 device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev; 1063 error = bus_translate_resource(dev, rle->type, rle->start, &newstart); 1064 if (error != 0) { 1065 device_printf(pdev->dev.bsddev, 1066 "translate of %#jx failed: %d\n", 1067 (uintmax_t)rle->start, error); 1068 return (0); 1069 } 1070 return (newstart); 1071 } 1072 1073 unsigned long 1074 pci_resource_len(struct pci_dev *pdev, int bar) 1075 { 1076 struct resource_list_entry *rle; 1077 1078 if ((rle = lkpi_pci_get_bar(pdev, bar, true)) == NULL) 1079 return (0); 1080 return (rle->count); 1081 } 1082 1083 int 1084 pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) 1085 { 1086 struct resource *res; 1087 struct pci_devres *dr; 1088 struct pci_mmio_region *mmio; 1089 int rid; 1090 int type; 1091 1092 type = pci_resource_type(pdev, bar); 1093 if (type < 0) 1094 return (-ENODEV); 1095 rid = PCIR_BAR(bar); 1096 res = bus_alloc_resource_any(pdev->dev.bsddev, type, &rid, 1097 RF_ACTIVE|RF_SHAREABLE); 1098 if (res == NULL) { 1099 device_printf(pdev->dev.bsddev, "%s: failed to alloc " 1100 "bar %d type %d rid %d\n", 1101 __func__, bar, type, PCIR_BAR(bar)); 1102 return (-ENODEV); 1103 } 1104 1105 /* 1106 * It seems there is an implicit devres tracking on these if the device 1107 * is managed; otherwise the resources are not automatiaclly freed on 1108 * FreeBSD/LinuxKPI tough they should be/are expected to be by Linux 1109 * drivers. 1110 */ 1111 dr = lkpi_pci_devres_find(pdev); 1112 if (dr != NULL) { 1113 dr->region_mask |= (1 << bar); 1114 dr->region_table[bar] = res; 1115 } 1116 1117 /* Even if the device is not managed we need to track it for iomap. */ 1118 mmio = malloc(sizeof(*mmio), M_DEVBUF, M_WAITOK | M_ZERO); 1119 mmio->rid = PCIR_BAR(bar); 1120 mmio->type = type; 1121 mmio->res = res; 1122 TAILQ_INSERT_TAIL(&pdev->mmio, mmio, next); 1123 1124 return (0); 1125 } 1126 1127 int 1128 linuxkpi_pci_request_regions(struct pci_dev *pdev, const char *res_name) 1129 { 1130 int error; 1131 int i; 1132 1133 for (i = 0; i <= PCIR_MAX_BAR_0; i++) { 1134 error = pci_request_region(pdev, i, res_name); 1135 if (error && error != -ENODEV) { 1136 pci_release_regions(pdev); 1137 return (error); 1138 } 1139 } 1140 return (0); 1141 } 1142 1143 void 1144 linuxkpi_pci_release_region(struct pci_dev *pdev, int bar) 1145 { 1146 struct resource_list_entry *rle; 1147 struct pci_devres *dr; 1148 struct pci_mmio_region *mmio, *p; 1149 1150 if ((rle = lkpi_pci_get_bar(pdev, bar, false)) == NULL) 1151 return; 1152 1153 /* 1154 * As we implicitly track the requests we also need to clear them on 1155 * release. Do clear before resource release. 1156 */ 1157 dr = lkpi_pci_devres_find(pdev); 1158 if (dr != NULL) { 1159 KASSERT(dr->region_table[bar] == rle->res, ("%s: pdev %p bar %d" 1160 " region_table res %p != rel->res %p\n", __func__, pdev, 1161 bar, dr->region_table[bar], rle->res)); 1162 dr->region_table[bar] = NULL; 1163 dr->region_mask &= ~(1 << bar); 1164 } 1165 1166 TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) { 1167 if (rle->res != (void *)rman_get_bushandle(mmio->res)) 1168 continue; 1169 TAILQ_REMOVE(&pdev->mmio, mmio, next); 1170 free(mmio, M_DEVBUF); 1171 } 1172 1173 bus_release_resource(pdev->dev.bsddev, rle->type, rle->rid, rle->res); 1174 } 1175 1176 void 1177 linuxkpi_pci_release_regions(struct pci_dev *pdev) 1178 { 1179 int i; 1180 1181 for (i = 0; i <= PCIR_MAX_BAR_0; i++) 1182 pci_release_region(pdev, i); 1183 } 1184 1185 int 1186 linux_pci_register_drm_driver(struct pci_driver *pdrv) 1187 { 1188 devclass_t dc; 1189 1190 dc = devclass_create("vgapci"); 1191 if (dc == NULL) 1192 return (-ENXIO); 1193 pdrv->isdrm = true; 1194 pdrv->name = "drmn"; 1195 return (_linux_pci_register_driver(pdrv, dc)); 1196 } 1197 1198 void 1199 linux_pci_unregister_driver(struct pci_driver *pdrv) 1200 { 1201 devclass_t bus; 1202 1203 bus = devclass_find(pdrv->isdrm ? "vgapci" : "pci"); 1204 1205 spin_lock(&pci_lock); 1206 list_del(&pdrv->node); 1207 spin_unlock(&pci_lock); 1208 bus_topo_lock(); 1209 if (bus != NULL) 1210 devclass_delete_driver(bus, &pdrv->bsddriver); 1211 bus_topo_unlock(); 1212 } 1213 1214 void 1215 linux_pci_unregister_drm_driver(struct pci_driver *pdrv) 1216 { 1217 devclass_t bus; 1218 1219 bus = devclass_find("vgapci"); 1220 1221 spin_lock(&pci_lock); 1222 list_del(&pdrv->node); 1223 spin_unlock(&pci_lock); 1224 bus_topo_lock(); 1225 if (bus != NULL) 1226 devclass_delete_driver(bus, &pdrv->bsddriver); 1227 bus_topo_unlock(); 1228 } 1229 1230 int 1231 linuxkpi_pci_enable_msix(struct pci_dev *pdev, struct msix_entry *entries, 1232 int nreq) 1233 { 1234 struct resource_list_entry *rle; 1235 int error; 1236 int avail; 1237 int i; 1238 1239 avail = pci_msix_count(pdev->dev.bsddev); 1240 if (avail < nreq) { 1241 if (avail == 0) 1242 return -EINVAL; 1243 return avail; 1244 } 1245 avail = nreq; 1246 if ((error = -pci_alloc_msix(pdev->dev.bsddev, &avail)) != 0) 1247 return error; 1248 /* 1249 * Handle case where "pci_alloc_msix()" may allocate less 1250 * interrupts than available and return with no error: 1251 */ 1252 if (avail < nreq) { 1253 pci_release_msi(pdev->dev.bsddev); 1254 return avail; 1255 } 1256 rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 1, false); 1257 pdev->dev.irq_start = rle->start; 1258 pdev->dev.irq_end = rle->start + avail; 1259 for (i = 0; i < nreq; i++) 1260 entries[i].vector = pdev->dev.irq_start + i; 1261 pdev->msix_enabled = true; 1262 return (0); 1263 } 1264 1265 int 1266 _lkpi_pci_enable_msi_range(struct pci_dev *pdev, int minvec, int maxvec) 1267 { 1268 struct resource_list_entry *rle; 1269 int error; 1270 int nvec; 1271 1272 if (maxvec < minvec) 1273 return (-EINVAL); 1274 1275 nvec = pci_msi_count(pdev->dev.bsddev); 1276 if (nvec < 1 || nvec < minvec) 1277 return (-ENOSPC); 1278 1279 nvec = min(nvec, maxvec); 1280 if ((error = -pci_alloc_msi(pdev->dev.bsddev, &nvec)) != 0) 1281 return error; 1282 1283 /* Native PCI might only ever ask for 32 vectors. */ 1284 if (nvec < minvec) { 1285 pci_release_msi(pdev->dev.bsddev); 1286 return (-ENOSPC); 1287 } 1288 1289 rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 1, false); 1290 pdev->dev.irq_start = rle->start; 1291 pdev->dev.irq_end = rle->start + nvec; 1292 pdev->irq = rle->start; 1293 pdev->msi_enabled = true; 1294 return (0); 1295 } 1296 1297 int 1298 pci_alloc_irq_vectors(struct pci_dev *pdev, int minv, int maxv, 1299 unsigned int flags) 1300 { 1301 int error; 1302 1303 if (flags & PCI_IRQ_MSIX) { 1304 struct msix_entry *entries; 1305 int i; 1306 1307 entries = kcalloc(maxv, sizeof(*entries), GFP_KERNEL); 1308 if (entries == NULL) { 1309 error = -ENOMEM; 1310 goto out; 1311 } 1312 for (i = 0; i < maxv; ++i) 1313 entries[i].entry = i; 1314 error = pci_enable_msix(pdev, entries, maxv); 1315 out: 1316 kfree(entries); 1317 if (error == 0 && pdev->msix_enabled) 1318 return (pdev->dev.irq_end - pdev->dev.irq_start); 1319 } 1320 if (flags & PCI_IRQ_MSI) { 1321 if (pci_msi_count(pdev->dev.bsddev) < minv) 1322 return (-ENOSPC); 1323 error = _lkpi_pci_enable_msi_range(pdev, minv, maxv); 1324 if (error == 0 && pdev->msi_enabled) 1325 return (pdev->dev.irq_end - pdev->dev.irq_start); 1326 } 1327 if (flags & PCI_IRQ_INTX) { 1328 if (pdev->irq) 1329 return (1); 1330 } 1331 1332 return (-EINVAL); 1333 } 1334 1335 struct msi_desc * 1336 lkpi_pci_msi_desc_alloc(int irq) 1337 { 1338 struct device *dev; 1339 struct pci_dev *pdev; 1340 struct msi_desc *desc; 1341 struct pci_devinfo *dinfo; 1342 struct pcicfg_msi *msi; 1343 int vec; 1344 1345 dev = lkpi_pci_find_irq_dev(irq); 1346 if (dev == NULL) 1347 return (NULL); 1348 1349 pdev = to_pci_dev(dev); 1350 1351 if (pdev->msi_desc == NULL) 1352 return (NULL); 1353 1354 if (irq < pdev->dev.irq_start || irq >= pdev->dev.irq_end) 1355 return (NULL); 1356 1357 vec = pdev->dev.irq_start - irq; 1358 1359 if (pdev->msi_desc[vec] != NULL) 1360 return (pdev->msi_desc[vec]); 1361 1362 dinfo = device_get_ivars(dev->bsddev); 1363 msi = &dinfo->cfg.msi; 1364 1365 desc = malloc(sizeof(*desc), M_DEVBUF, M_WAITOK | M_ZERO); 1366 1367 desc->pci.msi_attrib.is_64 = 1368 (msi->msi_ctrl & PCIM_MSICTRL_64BIT) ? true : false; 1369 desc->msg.data = msi->msi_data; 1370 1371 pdev->msi_desc[vec] = desc; 1372 1373 return (desc); 1374 } 1375 1376 bool 1377 pci_device_is_present(struct pci_dev *pdev) 1378 { 1379 device_t dev; 1380 1381 dev = pdev->dev.bsddev; 1382 1383 return (bus_child_present(dev)); 1384 } 1385 1386 CTASSERT(sizeof(dma_addr_t) <= sizeof(uint64_t)); 1387 1388 struct linux_dma_obj { 1389 void *vaddr; 1390 uint64_t dma_addr; 1391 bus_dmamap_t dmamap; 1392 bus_dma_tag_t dmat; 1393 }; 1394 1395 static uma_zone_t linux_dma_trie_zone; 1396 static uma_zone_t linux_dma_obj_zone; 1397 1398 static void 1399 linux_dma_init(void *arg) 1400 { 1401 1402 linux_dma_trie_zone = uma_zcreate("linux_dma_pctrie", 1403 pctrie_node_size(), NULL, NULL, pctrie_zone_init, NULL, 1404 UMA_ALIGN_PTR, 0); 1405 linux_dma_obj_zone = uma_zcreate("linux_dma_object", 1406 sizeof(struct linux_dma_obj), NULL, NULL, NULL, NULL, 1407 UMA_ALIGN_PTR, 0); 1408 lkpi_pci_nseg1_fail = counter_u64_alloc(M_WAITOK); 1409 } 1410 SYSINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_init, NULL); 1411 1412 static void 1413 linux_dma_uninit(void *arg) 1414 { 1415 1416 counter_u64_free(lkpi_pci_nseg1_fail); 1417 uma_zdestroy(linux_dma_obj_zone); 1418 uma_zdestroy(linux_dma_trie_zone); 1419 } 1420 SYSUNINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_uninit, NULL); 1421 1422 static void * 1423 linux_dma_trie_alloc(struct pctrie *ptree) 1424 { 1425 1426 return (uma_zalloc(linux_dma_trie_zone, M_NOWAIT)); 1427 } 1428 1429 static void 1430 linux_dma_trie_free(struct pctrie *ptree, void *node) 1431 { 1432 1433 uma_zfree(linux_dma_trie_zone, node); 1434 } 1435 1436 PCTRIE_DEFINE(LINUX_DMA, linux_dma_obj, dma_addr, linux_dma_trie_alloc, 1437 linux_dma_trie_free); 1438 1439 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 1440 static dma_addr_t 1441 linux_dma_map_phys_common(struct device *dev, vm_paddr_t phys, size_t len, 1442 bus_dma_tag_t dmat) 1443 { 1444 struct linux_dma_priv *priv; 1445 struct linux_dma_obj *obj; 1446 int error, nseg; 1447 bus_dma_segment_t seg; 1448 1449 priv = dev->dma_priv; 1450 1451 /* 1452 * If the resultant mapping will be entirely 1:1 with the 1453 * physical address, short-circuit the remainder of the 1454 * bus_dma API. This avoids tracking collisions in the pctrie 1455 * with the additional benefit of reducing overhead. 1456 */ 1457 if (bus_dma_id_mapped(dmat, phys, len)) 1458 return (phys); 1459 1460 obj = uma_zalloc(linux_dma_obj_zone, M_NOWAIT); 1461 if (obj == NULL) { 1462 return (0); 1463 } 1464 obj->dmat = dmat; 1465 1466 DMA_PRIV_LOCK(priv); 1467 if (bus_dmamap_create(obj->dmat, 0, &obj->dmamap) != 0) { 1468 DMA_PRIV_UNLOCK(priv); 1469 uma_zfree(linux_dma_obj_zone, obj); 1470 return (0); 1471 } 1472 1473 nseg = -1; 1474 if (_bus_dmamap_load_phys(obj->dmat, obj->dmamap, phys, len, 1475 BUS_DMA_NOWAIT, &seg, &nseg) != 0) { 1476 bus_dmamap_destroy(obj->dmat, obj->dmamap); 1477 DMA_PRIV_UNLOCK(priv); 1478 uma_zfree(linux_dma_obj_zone, obj); 1479 counter_u64_add(lkpi_pci_nseg1_fail, 1); 1480 if (linuxkpi_debug) 1481 dump_stack(); 1482 return (0); 1483 } 1484 1485 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); 1486 obj->dma_addr = seg.ds_addr; 1487 1488 error = LINUX_DMA_PCTRIE_INSERT(&priv->ptree, obj); 1489 if (error != 0) { 1490 bus_dmamap_unload(obj->dmat, obj->dmamap); 1491 bus_dmamap_destroy(obj->dmat, obj->dmamap); 1492 DMA_PRIV_UNLOCK(priv); 1493 uma_zfree(linux_dma_obj_zone, obj); 1494 return (0); 1495 } 1496 DMA_PRIV_UNLOCK(priv); 1497 return (obj->dma_addr); 1498 } 1499 #else 1500 static dma_addr_t 1501 linux_dma_map_phys_common(struct device *dev __unused, vm_paddr_t phys, 1502 size_t len __unused, bus_dma_tag_t dmat __unused) 1503 { 1504 return (phys); 1505 } 1506 #endif 1507 1508 dma_addr_t 1509 linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len) 1510 { 1511 struct linux_dma_priv *priv; 1512 1513 priv = dev->dma_priv; 1514 return (linux_dma_map_phys_common(dev, phys, len, priv->dmat)); 1515 } 1516 1517 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 1518 void 1519 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len) 1520 { 1521 struct linux_dma_priv *priv; 1522 struct linux_dma_obj *obj; 1523 1524 priv = dev->dma_priv; 1525 1526 if (pctrie_is_empty(&priv->ptree)) 1527 return; 1528 1529 DMA_PRIV_LOCK(priv); 1530 obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr); 1531 if (obj == NULL) { 1532 DMA_PRIV_UNLOCK(priv); 1533 return; 1534 } 1535 LINUX_DMA_PCTRIE_REMOVE(&priv->ptree, dma_addr); 1536 bus_dmamap_unload(obj->dmat, obj->dmamap); 1537 bus_dmamap_destroy(obj->dmat, obj->dmamap); 1538 DMA_PRIV_UNLOCK(priv); 1539 1540 uma_zfree(linux_dma_obj_zone, obj); 1541 } 1542 #else 1543 void 1544 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len) 1545 { 1546 } 1547 #endif 1548 1549 void * 1550 linux_dma_alloc_coherent(struct device *dev, size_t size, 1551 dma_addr_t *dma_handle, gfp_t flag) 1552 { 1553 struct linux_dma_priv *priv; 1554 vm_paddr_t high; 1555 size_t align; 1556 void *mem; 1557 1558 if (dev == NULL || dev->dma_priv == NULL) { 1559 *dma_handle = 0; 1560 return (NULL); 1561 } 1562 priv = dev->dma_priv; 1563 if (priv->dma_coherent_mask) 1564 high = priv->dma_coherent_mask; 1565 else 1566 /* Coherent is lower 32bit only by default in Linux. */ 1567 high = BUS_SPACE_MAXADDR_32BIT; 1568 align = PAGE_SIZE << get_order(size); 1569 /* Always zero the allocation. */ 1570 flag |= M_ZERO; 1571 mem = kmem_alloc_contig(size, flag & GFP_NATIVE_MASK, 0, high, 1572 align, 0, VM_MEMATTR_DEFAULT); 1573 if (mem != NULL) { 1574 *dma_handle = linux_dma_map_phys_common(dev, vtophys(mem), size, 1575 priv->dmat_coherent); 1576 if (*dma_handle == 0) { 1577 kmem_free(mem, size); 1578 mem = NULL; 1579 } 1580 } else { 1581 *dma_handle = 0; 1582 } 1583 return (mem); 1584 } 1585 1586 struct lkpi_devres_dmam_coherent { 1587 size_t size; 1588 dma_addr_t *handle; 1589 void *mem; 1590 }; 1591 1592 static void 1593 lkpi_dmam_free_coherent(struct device *dev, void *p) 1594 { 1595 struct lkpi_devres_dmam_coherent *dr; 1596 1597 dr = p; 1598 dma_free_coherent(dev, dr->size, dr->mem, *dr->handle); 1599 } 1600 1601 void * 1602 linuxkpi_dmam_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 1603 gfp_t flag) 1604 { 1605 struct lkpi_devres_dmam_coherent *dr; 1606 1607 dr = lkpi_devres_alloc(lkpi_dmam_free_coherent, 1608 sizeof(*dr), GFP_KERNEL | __GFP_ZERO); 1609 1610 if (dr == NULL) 1611 return (NULL); 1612 1613 dr->size = size; 1614 dr->mem = linux_dma_alloc_coherent(dev, size, dma_handle, flag); 1615 dr->handle = dma_handle; 1616 if (dr->mem == NULL) { 1617 lkpi_devres_free(dr); 1618 return (NULL); 1619 } 1620 1621 lkpi_devres_add(dev, dr); 1622 return (dr->mem); 1623 } 1624 1625 void 1626 linuxkpi_dma_sync(struct device *dev, dma_addr_t dma_addr, size_t size, 1627 bus_dmasync_op_t op) 1628 { 1629 struct linux_dma_priv *priv; 1630 struct linux_dma_obj *obj; 1631 1632 priv = dev->dma_priv; 1633 1634 if (pctrie_is_empty(&priv->ptree)) 1635 return; 1636 1637 DMA_PRIV_LOCK(priv); 1638 obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr); 1639 if (obj == NULL) { 1640 DMA_PRIV_UNLOCK(priv); 1641 return; 1642 } 1643 1644 bus_dmamap_sync(obj->dmat, obj->dmamap, op); 1645 DMA_PRIV_UNLOCK(priv); 1646 } 1647 1648 int 1649 linux_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents, 1650 enum dma_data_direction direction, unsigned long attrs __unused) 1651 { 1652 struct linux_dma_priv *priv; 1653 struct scatterlist *sg; 1654 int i, nseg; 1655 bus_dma_segment_t seg; 1656 1657 priv = dev->dma_priv; 1658 1659 DMA_PRIV_LOCK(priv); 1660 1661 /* create common DMA map in the first S/G entry */ 1662 if (bus_dmamap_create(priv->dmat, 0, &sgl->dma_map) != 0) { 1663 DMA_PRIV_UNLOCK(priv); 1664 return (0); 1665 } 1666 1667 /* load all S/G list entries */ 1668 for_each_sg(sgl, sg, nents, i) { 1669 nseg = -1; 1670 if (_bus_dmamap_load_phys(priv->dmat, sgl->dma_map, 1671 sg_phys(sg), sg->length, BUS_DMA_NOWAIT, 1672 &seg, &nseg) != 0) { 1673 bus_dmamap_unload(priv->dmat, sgl->dma_map); 1674 bus_dmamap_destroy(priv->dmat, sgl->dma_map); 1675 DMA_PRIV_UNLOCK(priv); 1676 return (0); 1677 } 1678 KASSERT(nseg == 0, 1679 ("More than one segment (nseg=%d)", nseg + 1)); 1680 1681 sg_dma_address(sg) = seg.ds_addr; 1682 } 1683 1684 switch (direction) { 1685 case DMA_BIDIRECTIONAL: 1686 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREWRITE); 1687 break; 1688 case DMA_TO_DEVICE: 1689 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREREAD); 1690 break; 1691 case DMA_FROM_DEVICE: 1692 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREWRITE); 1693 break; 1694 default: 1695 break; 1696 } 1697 1698 DMA_PRIV_UNLOCK(priv); 1699 1700 return (nents); 1701 } 1702 1703 void 1704 linux_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, 1705 int nents __unused, enum dma_data_direction direction, 1706 unsigned long attrs __unused) 1707 { 1708 struct linux_dma_priv *priv; 1709 1710 priv = dev->dma_priv; 1711 1712 DMA_PRIV_LOCK(priv); 1713 1714 switch (direction) { 1715 case DMA_BIDIRECTIONAL: 1716 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTREAD); 1717 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREREAD); 1718 break; 1719 case DMA_TO_DEVICE: 1720 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTWRITE); 1721 break; 1722 case DMA_FROM_DEVICE: 1723 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTREAD); 1724 break; 1725 default: 1726 break; 1727 } 1728 1729 bus_dmamap_unload(priv->dmat, sgl->dma_map); 1730 bus_dmamap_destroy(priv->dmat, sgl->dma_map); 1731 DMA_PRIV_UNLOCK(priv); 1732 } 1733 1734 struct dma_pool { 1735 struct device *pool_device; 1736 uma_zone_t pool_zone; 1737 struct mtx pool_lock; 1738 bus_dma_tag_t pool_dmat; 1739 size_t pool_entry_size; 1740 struct pctrie pool_ptree; 1741 }; 1742 1743 #define DMA_POOL_LOCK(pool) mtx_lock(&(pool)->pool_lock) 1744 #define DMA_POOL_UNLOCK(pool) mtx_unlock(&(pool)->pool_lock) 1745 1746 static inline int 1747 dma_pool_obj_ctor(void *mem, int size, void *arg, int flags) 1748 { 1749 struct linux_dma_obj *obj = mem; 1750 struct dma_pool *pool = arg; 1751 int error, nseg; 1752 bus_dma_segment_t seg; 1753 1754 nseg = -1; 1755 DMA_POOL_LOCK(pool); 1756 error = _bus_dmamap_load_phys(pool->pool_dmat, obj->dmamap, 1757 vtophys(obj->vaddr), pool->pool_entry_size, BUS_DMA_NOWAIT, 1758 &seg, &nseg); 1759 DMA_POOL_UNLOCK(pool); 1760 if (error != 0) { 1761 return (error); 1762 } 1763 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); 1764 obj->dma_addr = seg.ds_addr; 1765 1766 return (0); 1767 } 1768 1769 static void 1770 dma_pool_obj_dtor(void *mem, int size, void *arg) 1771 { 1772 struct linux_dma_obj *obj = mem; 1773 struct dma_pool *pool = arg; 1774 1775 DMA_POOL_LOCK(pool); 1776 bus_dmamap_unload(pool->pool_dmat, obj->dmamap); 1777 DMA_POOL_UNLOCK(pool); 1778 } 1779 1780 static int 1781 dma_pool_obj_import(void *arg, void **store, int count, int domain __unused, 1782 int flags) 1783 { 1784 struct dma_pool *pool = arg; 1785 struct linux_dma_obj *obj; 1786 int error, i; 1787 1788 for (i = 0; i < count; i++) { 1789 obj = uma_zalloc(linux_dma_obj_zone, flags); 1790 if (obj == NULL) 1791 break; 1792 1793 error = bus_dmamem_alloc(pool->pool_dmat, &obj->vaddr, 1794 BUS_DMA_NOWAIT, &obj->dmamap); 1795 if (error!= 0) { 1796 uma_zfree(linux_dma_obj_zone, obj); 1797 break; 1798 } 1799 1800 store[i] = obj; 1801 } 1802 1803 return (i); 1804 } 1805 1806 static void 1807 dma_pool_obj_release(void *arg, void **store, int count) 1808 { 1809 struct dma_pool *pool = arg; 1810 struct linux_dma_obj *obj; 1811 int i; 1812 1813 for (i = 0; i < count; i++) { 1814 obj = store[i]; 1815 bus_dmamem_free(pool->pool_dmat, obj->vaddr, obj->dmamap); 1816 uma_zfree(linux_dma_obj_zone, obj); 1817 } 1818 } 1819 1820 struct dma_pool * 1821 linux_dma_pool_create(char *name, struct device *dev, size_t size, 1822 size_t align, size_t boundary) 1823 { 1824 struct linux_dma_priv *priv; 1825 struct dma_pool *pool; 1826 1827 priv = dev->dma_priv; 1828 1829 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 1830 pool->pool_device = dev; 1831 pool->pool_entry_size = size; 1832 1833 if (bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 1834 align, boundary, /* alignment, boundary */ 1835 priv->dma_mask, /* lowaddr */ 1836 BUS_SPACE_MAXADDR, /* highaddr */ 1837 NULL, NULL, /* filtfunc, filtfuncarg */ 1838 size, /* maxsize */ 1839 1, /* nsegments */ 1840 size, /* maxsegsz */ 1841 0, /* flags */ 1842 NULL, NULL, /* lockfunc, lockfuncarg */ 1843 &pool->pool_dmat)) { 1844 kfree(pool); 1845 return (NULL); 1846 } 1847 1848 pool->pool_zone = uma_zcache_create(name, -1, dma_pool_obj_ctor, 1849 dma_pool_obj_dtor, NULL, NULL, dma_pool_obj_import, 1850 dma_pool_obj_release, pool, 0); 1851 1852 mtx_init(&pool->pool_lock, "lkpi-dma-pool", NULL, MTX_DEF); 1853 pctrie_init(&pool->pool_ptree); 1854 1855 return (pool); 1856 } 1857 1858 void 1859 linux_dma_pool_destroy(struct dma_pool *pool) 1860 { 1861 1862 uma_zdestroy(pool->pool_zone); 1863 bus_dma_tag_destroy(pool->pool_dmat); 1864 mtx_destroy(&pool->pool_lock); 1865 kfree(pool); 1866 } 1867 1868 void 1869 lkpi_dmam_pool_destroy(struct device *dev, void *p) 1870 { 1871 struct dma_pool *pool; 1872 1873 pool = *(struct dma_pool **)p; 1874 LINUX_DMA_PCTRIE_RECLAIM(&pool->pool_ptree); 1875 linux_dma_pool_destroy(pool); 1876 } 1877 1878 void * 1879 linux_dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, 1880 dma_addr_t *handle) 1881 { 1882 struct linux_dma_obj *obj; 1883 1884 obj = uma_zalloc_arg(pool->pool_zone, pool, mem_flags & GFP_NATIVE_MASK); 1885 if (obj == NULL) 1886 return (NULL); 1887 1888 DMA_POOL_LOCK(pool); 1889 if (LINUX_DMA_PCTRIE_INSERT(&pool->pool_ptree, obj) != 0) { 1890 DMA_POOL_UNLOCK(pool); 1891 uma_zfree_arg(pool->pool_zone, obj, pool); 1892 return (NULL); 1893 } 1894 DMA_POOL_UNLOCK(pool); 1895 1896 *handle = obj->dma_addr; 1897 return (obj->vaddr); 1898 } 1899 1900 void 1901 linux_dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma_addr) 1902 { 1903 struct linux_dma_obj *obj; 1904 1905 DMA_POOL_LOCK(pool); 1906 obj = LINUX_DMA_PCTRIE_LOOKUP(&pool->pool_ptree, dma_addr); 1907 if (obj == NULL) { 1908 DMA_POOL_UNLOCK(pool); 1909 return; 1910 } 1911 LINUX_DMA_PCTRIE_REMOVE(&pool->pool_ptree, dma_addr); 1912 DMA_POOL_UNLOCK(pool); 1913 1914 uma_zfree_arg(pool->pool_zone, obj, pool); 1915 } 1916 1917 static int 1918 linux_backlight_get_status(device_t dev, struct backlight_props *props) 1919 { 1920 struct pci_dev *pdev; 1921 1922 linux_set_current(curthread); 1923 pdev = device_get_softc(dev); 1924 1925 props->brightness = pdev->dev.bd->props.brightness; 1926 props->brightness = props->brightness * 100 / pdev->dev.bd->props.max_brightness; 1927 props->nlevels = 0; 1928 1929 return (0); 1930 } 1931 1932 static int 1933 linux_backlight_get_info(device_t dev, struct backlight_info *info) 1934 { 1935 struct pci_dev *pdev; 1936 1937 linux_set_current(curthread); 1938 pdev = device_get_softc(dev); 1939 1940 info->type = BACKLIGHT_TYPE_PANEL; 1941 strlcpy(info->name, pdev->dev.bd->name, BACKLIGHTMAXNAMELENGTH); 1942 return (0); 1943 } 1944 1945 static int 1946 linux_backlight_update_status(device_t dev, struct backlight_props *props) 1947 { 1948 struct pci_dev *pdev; 1949 1950 linux_set_current(curthread); 1951 pdev = device_get_softc(dev); 1952 1953 pdev->dev.bd->props.brightness = pdev->dev.bd->props.max_brightness * 1954 props->brightness / 100; 1955 pdev->dev.bd->props.power = props->brightness == 0 ? 1956 4/* FB_BLANK_POWERDOWN */ : 0/* FB_BLANK_UNBLANK */; 1957 return (pdev->dev.bd->ops->update_status(pdev->dev.bd)); 1958 } 1959 1960 struct backlight_device * 1961 linux_backlight_device_register(const char *name, struct device *dev, 1962 void *data, const struct backlight_ops *ops, struct backlight_properties *props) 1963 { 1964 1965 dev->bd = malloc(sizeof(*dev->bd), M_DEVBUF, M_WAITOK | M_ZERO); 1966 dev->bd->ops = ops; 1967 dev->bd->props.type = props->type; 1968 dev->bd->props.max_brightness = props->max_brightness; 1969 dev->bd->props.brightness = props->brightness; 1970 dev->bd->props.power = props->power; 1971 dev->bd->data = data; 1972 dev->bd->dev = dev; 1973 dev->bd->name = strdup(name, M_DEVBUF); 1974 1975 dev->backlight_dev = backlight_register(name, dev->bsddev); 1976 1977 return (dev->bd); 1978 } 1979 1980 void 1981 linux_backlight_device_unregister(struct backlight_device *bd) 1982 { 1983 1984 backlight_destroy(bd->dev->backlight_dev); 1985 free(bd->name, M_DEVBUF); 1986 free(bd, M_DEVBUF); 1987 } 1988