1 /*- 2 * Copyright (c) 2015-2016 Mellanox Technologies, Ltd. 3 * All rights reserved. 4 * Copyright (c) 2020-2025 The FreeBSD Foundation 5 * 6 * Portions of this software were developed by Björn Zeeb 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice unmodified, this list of conditions, and the following 14 * disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/bus.h> 34 #include <sys/malloc.h> 35 #include <sys/kernel.h> 36 #include <sys/sysctl.h> 37 #include <sys/lock.h> 38 #include <sys/mutex.h> 39 #include <sys/fcntl.h> 40 #include <sys/file.h> 41 #include <sys/filio.h> 42 #include <sys/pciio.h> 43 #include <sys/pctrie.h> 44 #include <sys/rman.h> 45 #include <sys/rwlock.h> 46 #include <sys/stdarg.h> 47 48 #include <vm/vm.h> 49 #include <vm/pmap.h> 50 51 #include <machine/bus.h> 52 #include <machine/resource.h> 53 54 #include <dev/pci/pcivar.h> 55 #include <dev/pci/pci_private.h> 56 #include <dev/pci/pci_iov.h> 57 #include <dev/backlight/backlight.h> 58 59 #include <linux/kernel.h> 60 #include <linux/kobject.h> 61 #include <linux/device.h> 62 #include <linux/slab.h> 63 #include <linux/module.h> 64 #include <linux/cdev.h> 65 #include <linux/file.h> 66 #include <linux/sysfs.h> 67 #include <linux/mm.h> 68 #include <linux/io.h> 69 #include <linux/vmalloc.h> 70 #include <linux/pci.h> 71 #include <linux/compat.h> 72 73 #include <linux/backlight.h> 74 75 #include "backlight_if.h" 76 #include "pcib_if.h" 77 78 /* Undef the linux function macro defined in linux/pci.h */ 79 #undef pci_get_class 80 81 extern int linuxkpi_debug; 82 83 SYSCTL_DECL(_compat_linuxkpi); 84 85 static counter_u64_t lkpi_pci_nseg1_fail; 86 SYSCTL_COUNTER_U64(_compat_linuxkpi, OID_AUTO, lkpi_pci_nseg1_fail, CTLFLAG_RD, 87 &lkpi_pci_nseg1_fail, "Count of busdma mapping failures of single-segment"); 88 89 static device_probe_t linux_pci_probe; 90 static device_attach_t linux_pci_attach; 91 static device_detach_t linux_pci_detach; 92 static device_suspend_t linux_pci_suspend; 93 static device_resume_t linux_pci_resume; 94 static device_shutdown_t linux_pci_shutdown; 95 static pci_iov_init_t linux_pci_iov_init; 96 static pci_iov_uninit_t linux_pci_iov_uninit; 97 static pci_iov_add_vf_t linux_pci_iov_add_vf; 98 static int linux_backlight_get_status(device_t dev, struct backlight_props *props); 99 static int linux_backlight_update_status(device_t dev, struct backlight_props *props); 100 static int linux_backlight_get_info(device_t dev, struct backlight_info *info); 101 static void lkpi_pcim_iomap_table_release(struct device *, void *); 102 103 static device_method_t pci_methods[] = { 104 DEVMETHOD(device_probe, linux_pci_probe), 105 DEVMETHOD(device_attach, linux_pci_attach), 106 DEVMETHOD(device_detach, linux_pci_detach), 107 DEVMETHOD(device_suspend, linux_pci_suspend), 108 DEVMETHOD(device_resume, linux_pci_resume), 109 DEVMETHOD(device_shutdown, linux_pci_shutdown), 110 DEVMETHOD(pci_iov_init, linux_pci_iov_init), 111 DEVMETHOD(pci_iov_uninit, linux_pci_iov_uninit), 112 DEVMETHOD(pci_iov_add_vf, linux_pci_iov_add_vf), 113 114 /* Bus interface. */ 115 DEVMETHOD(bus_add_child, bus_generic_add_child), 116 117 /* backlight interface */ 118 DEVMETHOD(backlight_update_status, linux_backlight_update_status), 119 DEVMETHOD(backlight_get_status, linux_backlight_get_status), 120 DEVMETHOD(backlight_get_info, linux_backlight_get_info), 121 DEVMETHOD_END 122 }; 123 124 const char *pci_power_names[] = { 125 "UNKNOWN", "D0", "D1", "D2", "D3hot", "D3cold" 126 }; 127 128 /* We need some meta-struct to keep track of these for devres. */ 129 struct pci_devres { 130 bool enable_io; 131 /* PCIR_MAX_BAR_0 + 1 = 6 => BIT(0..5). */ 132 uint8_t region_mask; 133 struct resource *region_table[PCIR_MAX_BAR_0 + 1]; /* Not needed. */ 134 }; 135 struct pcim_iomap_devres { 136 void *mmio_table[PCIR_MAX_BAR_0 + 1]; 137 struct resource *res_table[PCIR_MAX_BAR_0 + 1]; 138 }; 139 140 struct linux_dma_priv { 141 uint64_t dma_mask; 142 bus_dma_tag_t dmat; 143 uint64_t dma_coherent_mask; 144 bus_dma_tag_t dmat_coherent; 145 struct mtx lock; 146 struct pctrie ptree; 147 }; 148 #define DMA_PRIV_LOCK(priv) mtx_lock(&(priv)->lock) 149 #define DMA_PRIV_UNLOCK(priv) mtx_unlock(&(priv)->lock) 150 151 static void 152 lkpi_set_pcim_iomap_devres(struct pcim_iomap_devres *dr, int bar, 153 void *res) 154 { 155 dr->mmio_table[bar] = (void *)rman_get_bushandle(res); 156 dr->res_table[bar] = res; 157 } 158 159 static bool 160 lkpi_pci_bar_id_valid(int bar) 161 { 162 if (bar < 0 || bar > PCIR_MAX_BAR_0) 163 return (false); 164 165 return (true); 166 } 167 168 static int 169 linux_pdev_dma_uninit(struct pci_dev *pdev) 170 { 171 struct linux_dma_priv *priv; 172 173 priv = pdev->dev.dma_priv; 174 if (priv->dmat) 175 bus_dma_tag_destroy(priv->dmat); 176 if (priv->dmat_coherent) 177 bus_dma_tag_destroy(priv->dmat_coherent); 178 mtx_destroy(&priv->lock); 179 pdev->dev.dma_priv = NULL; 180 free(priv, M_DEVBUF); 181 return (0); 182 } 183 184 static int 185 linux_pdev_dma_init(struct pci_dev *pdev) 186 { 187 struct linux_dma_priv *priv; 188 int error; 189 190 priv = malloc(sizeof(*priv), M_DEVBUF, M_WAITOK | M_ZERO); 191 192 mtx_init(&priv->lock, "lkpi-priv-dma", NULL, MTX_DEF); 193 pctrie_init(&priv->ptree); 194 195 pdev->dev.dma_priv = priv; 196 197 /* Create a default DMA tags. */ 198 error = linux_dma_tag_init(&pdev->dev, DMA_BIT_MASK(64)); 199 if (error != 0) 200 goto err; 201 /* Coherent is lower 32bit only by default in Linux. */ 202 error = linux_dma_tag_init_coherent(&pdev->dev, DMA_BIT_MASK(32)); 203 if (error != 0) 204 goto err; 205 206 return (error); 207 208 err: 209 linux_pdev_dma_uninit(pdev); 210 return (error); 211 } 212 213 int 214 linux_dma_tag_init(struct device *dev, u64 dma_mask) 215 { 216 struct linux_dma_priv *priv; 217 int error; 218 219 priv = dev->dma_priv; 220 221 if (priv->dmat) { 222 if (priv->dma_mask == dma_mask) 223 return (0); 224 225 bus_dma_tag_destroy(priv->dmat); 226 } 227 228 priv->dma_mask = dma_mask; 229 230 error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 231 1, 0, /* alignment, boundary */ 232 dma_mask, /* lowaddr */ 233 BUS_SPACE_MAXADDR, /* highaddr */ 234 NULL, NULL, /* filtfunc, filtfuncarg */ 235 BUS_SPACE_MAXSIZE, /* maxsize */ 236 1, /* nsegments */ 237 BUS_SPACE_MAXSIZE, /* maxsegsz */ 238 0, /* flags */ 239 NULL, NULL, /* lockfunc, lockfuncarg */ 240 &priv->dmat); 241 return (-error); 242 } 243 244 int 245 linux_dma_tag_init_coherent(struct device *dev, u64 dma_mask) 246 { 247 struct linux_dma_priv *priv; 248 int error; 249 250 priv = dev->dma_priv; 251 252 if (priv->dmat_coherent) { 253 if (priv->dma_coherent_mask == dma_mask) 254 return (0); 255 256 bus_dma_tag_destroy(priv->dmat_coherent); 257 } 258 259 priv->dma_coherent_mask = dma_mask; 260 261 error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 262 1, 0, /* alignment, boundary */ 263 dma_mask, /* lowaddr */ 264 BUS_SPACE_MAXADDR, /* highaddr */ 265 NULL, NULL, /* filtfunc, filtfuncarg */ 266 BUS_SPACE_MAXSIZE, /* maxsize */ 267 1, /* nsegments */ 268 BUS_SPACE_MAXSIZE, /* maxsegsz */ 269 0, /* flags */ 270 NULL, NULL, /* lockfunc, lockfuncarg */ 271 &priv->dmat_coherent); 272 return (-error); 273 } 274 275 static struct pci_driver * 276 linux_pci_find(device_t dev, const struct pci_device_id **idp) 277 { 278 const struct pci_device_id *id; 279 struct pci_driver *pdrv; 280 uint16_t vendor; 281 uint16_t device; 282 uint16_t subvendor; 283 uint16_t subdevice; 284 285 vendor = pci_get_vendor(dev); 286 device = pci_get_device(dev); 287 subvendor = pci_get_subvendor(dev); 288 subdevice = pci_get_subdevice(dev); 289 290 spin_lock(&pci_lock); 291 list_for_each_entry(pdrv, &pci_drivers, node) { 292 for (id = pdrv->id_table; id->vendor != 0; id++) { 293 if (vendor == id->vendor && 294 (PCI_ANY_ID == id->device || device == id->device) && 295 (PCI_ANY_ID == id->subvendor || subvendor == id->subvendor) && 296 (PCI_ANY_ID == id->subdevice || subdevice == id->subdevice)) { 297 *idp = id; 298 spin_unlock(&pci_lock); 299 return (pdrv); 300 } 301 } 302 } 303 spin_unlock(&pci_lock); 304 return (NULL); 305 } 306 307 struct pci_dev * 308 lkpi_pci_get_device(uint32_t vendor, uint32_t device, struct pci_dev *odev) 309 { 310 struct pci_dev *pdev, *found; 311 312 found = NULL; 313 spin_lock(&pci_lock); 314 list_for_each_entry(pdev, &pci_devices, links) { 315 /* Walk until we find odev. */ 316 if (odev != NULL) { 317 if (pdev == odev) 318 odev = NULL; 319 continue; 320 } 321 322 if ((pdev->vendor == vendor || vendor == PCI_ANY_ID) && 323 (pdev->device == device || device == PCI_ANY_ID)) { 324 found = pdev; 325 break; 326 } 327 } 328 pci_dev_get(found); 329 spin_unlock(&pci_lock); 330 331 return (found); 332 } 333 334 static void 335 lkpi_pci_dev_release(struct device *dev) 336 { 337 338 lkpi_devres_release_free_list(dev); 339 spin_lock_destroy(&dev->devres_lock); 340 } 341 342 static int 343 lkpifill_pci_dev(device_t dev, struct pci_dev *pdev) 344 { 345 struct pci_devinfo *dinfo; 346 int error; 347 348 error = kobject_init_and_add(&pdev->dev.kobj, &linux_dev_ktype, 349 &linux_root_device.kobj, device_get_nameunit(dev)); 350 if (error != 0) { 351 printf("%s:%d: kobject_init_and_add returned %d\n", 352 __func__, __LINE__, error); 353 return (error); 354 } 355 356 pdev->devfn = PCI_DEVFN(pci_get_slot(dev), pci_get_function(dev)); 357 pdev->vendor = pci_get_vendor(dev); 358 pdev->device = pci_get_device(dev); 359 pdev->subsystem_vendor = pci_get_subvendor(dev); 360 pdev->subsystem_device = pci_get_subdevice(dev); 361 pdev->class = pci_get_class(dev); 362 pdev->revision = pci_get_revid(dev); 363 pdev->path_name = kasprintf(GFP_KERNEL, "%04d:%02d:%02d.%d", 364 pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev), 365 pci_get_function(dev)); 366 367 pdev->bus = malloc(sizeof(*pdev->bus), M_DEVBUF, M_WAITOK | M_ZERO); 368 pdev->bus->number = pci_get_bus(dev); 369 pdev->bus->domain = pci_get_domain(dev); 370 371 /* Check if we have reached the root to satisfy pci_is_root_bus() */ 372 dinfo = device_get_ivars(dev); 373 if (dinfo->cfg.pcie.pcie_location != 0 && 374 dinfo->cfg.pcie.pcie_type == PCIEM_TYPE_ROOT_PORT) { 375 pdev->bus->self = NULL; 376 } else { 377 /* 378 * This should be the upstream bridge; pci_upstream_bridge() 379 * handles that case on demand as otherwise we'll shadow the 380 * entire PCI hierarchy. 381 */ 382 pdev->bus->self = pdev; 383 } 384 pdev->dev.bsddev = dev; 385 pdev->dev.parent = &linux_root_device; 386 pdev->dev.release = lkpi_pci_dev_release; 387 INIT_LIST_HEAD(&pdev->dev.irqents); 388 389 if (pci_msi_count(dev) > 0) 390 pdev->msi_desc = malloc(pci_msi_count(dev) * 391 sizeof(*pdev->msi_desc), M_DEVBUF, M_WAITOK | M_ZERO); 392 393 spin_lock_init(&pdev->dev.devres_lock); 394 INIT_LIST_HEAD(&pdev->dev.devres_head); 395 396 return (0); 397 } 398 399 static void 400 lkpinew_pci_dev_release(struct device *dev) 401 { 402 struct pci_dev *pdev; 403 int i; 404 405 pdev = to_pci_dev(dev); 406 if (pdev->root != NULL) 407 pci_dev_put(pdev->root); 408 if (pdev->bus->self != pdev && pdev->bus->self != NULL) 409 pci_dev_put(pdev->bus->self); 410 free(pdev->bus, M_DEVBUF); 411 if (pdev->msi_desc != NULL) { 412 for (i = pci_msi_count(pdev->dev.bsddev) - 1; i >= 0; i--) 413 free(pdev->msi_desc[i], M_DEVBUF); 414 free(pdev->msi_desc, M_DEVBUF); 415 } 416 kfree(pdev->path_name); 417 free(pdev, M_DEVBUF); 418 } 419 420 struct pci_dev * 421 lkpinew_pci_dev(device_t dev) 422 { 423 struct pci_dev *pdev; 424 int error; 425 426 pdev = malloc(sizeof(*pdev), M_DEVBUF, M_WAITOK|M_ZERO); 427 error = lkpifill_pci_dev(dev, pdev); 428 if (error != 0) { 429 free(pdev, M_DEVBUF); 430 return (NULL); 431 } 432 pdev->dev.release = lkpinew_pci_dev_release; 433 434 return (pdev); 435 } 436 437 struct pci_dev * 438 lkpi_pci_get_class(unsigned int class, struct pci_dev *from) 439 { 440 device_t dev; 441 device_t devfrom = NULL; 442 struct pci_dev *pdev; 443 444 if (from != NULL) 445 devfrom = from->dev.bsddev; 446 447 dev = pci_find_class_from(class >> 16, (class >> 8) & 0xFF, devfrom); 448 if (dev == NULL) 449 return (NULL); 450 451 pdev = lkpinew_pci_dev(dev); 452 return (pdev); 453 } 454 455 struct pci_dev * 456 lkpi_pci_get_base_class(unsigned int baseclass, struct pci_dev *from) 457 { 458 device_t dev; 459 device_t devfrom = NULL; 460 struct pci_dev *pdev; 461 462 if (from != NULL) 463 devfrom = from->dev.bsddev; 464 465 dev = pci_find_base_class_from(baseclass, devfrom); 466 if (dev == NULL) 467 return (NULL); 468 469 pdev = lkpinew_pci_dev(dev); 470 return (pdev); 471 } 472 473 struct pci_dev * 474 lkpi_pci_get_domain_bus_and_slot(int domain, unsigned int bus, 475 unsigned int devfn) 476 { 477 device_t dev; 478 struct pci_dev *pdev; 479 480 dev = pci_find_dbsf(domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); 481 if (dev == NULL) 482 return (NULL); 483 484 pdev = lkpinew_pci_dev(dev); 485 return (pdev); 486 } 487 488 static int 489 linux_pci_probe(device_t dev) 490 { 491 const struct pci_device_id *id; 492 struct pci_driver *pdrv; 493 494 if ((pdrv = linux_pci_find(dev, &id)) == NULL) 495 return (ENXIO); 496 if (device_get_driver(dev) != &pdrv->bsddriver) 497 return (ENXIO); 498 device_set_desc(dev, pdrv->name); 499 500 /* Assume BSS initialized (should never return BUS_PROBE_SPECIFIC). */ 501 if (pdrv->bsd_probe_return == 0) 502 return (BUS_PROBE_DEFAULT); 503 else 504 return (pdrv->bsd_probe_return); 505 } 506 507 static int 508 linux_pci_attach(device_t dev) 509 { 510 const struct pci_device_id *id; 511 struct pci_driver *pdrv; 512 struct pci_dev *pdev; 513 514 pdrv = linux_pci_find(dev, &id); 515 pdev = device_get_softc(dev); 516 517 MPASS(pdrv != NULL); 518 MPASS(pdev != NULL); 519 520 return (linux_pci_attach_device(dev, pdrv, id, pdev)); 521 } 522 523 static struct resource_list_entry * 524 linux_pci_reserve_bar(struct pci_dev *pdev, struct resource_list *rl, 525 int type, int rid) 526 { 527 device_t dev; 528 struct resource *res; 529 530 KASSERT(type == SYS_RES_IOPORT || type == SYS_RES_MEMORY, 531 ("trying to reserve non-BAR type %d", type)); 532 533 dev = pdev->pdrv != NULL && pdev->pdrv->isdrm ? 534 device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev; 535 res = pci_reserve_map(device_get_parent(dev), dev, type, &rid, 0, ~0, 536 1, 1, 0); 537 if (res == NULL) 538 return (NULL); 539 return (resource_list_find(rl, type, rid)); 540 } 541 542 static struct resource_list_entry * 543 linux_pci_get_rle(struct pci_dev *pdev, int type, int rid, bool reserve_bar) 544 { 545 struct pci_devinfo *dinfo; 546 struct resource_list *rl; 547 struct resource_list_entry *rle; 548 549 dinfo = device_get_ivars(pdev->dev.bsddev); 550 rl = &dinfo->resources; 551 rle = resource_list_find(rl, type, rid); 552 /* Reserve resources for this BAR if needed. */ 553 if (rle == NULL && reserve_bar) 554 rle = linux_pci_reserve_bar(pdev, rl, type, rid); 555 return (rle); 556 } 557 558 int 559 linux_pci_attach_device(device_t dev, struct pci_driver *pdrv, 560 const struct pci_device_id *id, struct pci_dev *pdev) 561 { 562 struct resource_list_entry *rle; 563 device_t parent; 564 struct pci_dev *pbus, *ppbus; 565 uintptr_t rid; 566 int error; 567 bool isdrm; 568 569 linux_set_current(curthread); 570 571 parent = device_get_parent(dev); 572 isdrm = pdrv != NULL && pdrv->isdrm; 573 574 if (isdrm) { 575 struct pci_devinfo *dinfo; 576 577 dinfo = device_get_ivars(parent); 578 device_set_ivars(dev, dinfo); 579 } 580 581 error = lkpifill_pci_dev(dev, pdev); 582 if (error != 0) 583 return (error); 584 585 if (isdrm) 586 PCI_GET_ID(device_get_parent(parent), parent, PCI_ID_RID, &rid); 587 else 588 PCI_GET_ID(parent, dev, PCI_ID_RID, &rid); 589 pdev->devfn = rid; 590 pdev->pdrv = pdrv; 591 rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 0, false); 592 if (rle != NULL) 593 pdev->dev.irq = rle->start; 594 else 595 pdev->dev.irq = LINUX_IRQ_INVALID; 596 pdev->irq = pdev->dev.irq; 597 error = linux_pdev_dma_init(pdev); 598 if (error) 599 goto out_dma_init; 600 601 TAILQ_INIT(&pdev->mmio); 602 spin_lock_init(&pdev->pcie_cap_lock); 603 604 spin_lock(&pci_lock); 605 list_add(&pdev->links, &pci_devices); 606 spin_unlock(&pci_lock); 607 608 /* 609 * Create the hierarchy now as we cannot on demand later. 610 * Take special care of DRM as there is a non-PCI device in the chain. 611 */ 612 pbus = pdev; 613 if (isdrm) { 614 pbus = lkpinew_pci_dev(parent); 615 if (pbus == NULL) { 616 error = ENXIO; 617 goto out_dma_init; 618 } 619 } 620 pcie_find_root_port(pbus); 621 if (isdrm) 622 pdev->root = pbus->root; 623 ppbus = pci_upstream_bridge(pbus); 624 while (ppbus != NULL && ppbus != pbus) { 625 pbus = ppbus; 626 ppbus = pci_upstream_bridge(pbus); 627 } 628 629 if (pdrv != NULL) { 630 error = pdrv->probe(pdev, id); 631 if (error) 632 goto out_probe; 633 } 634 return (0); 635 636 /* XXX the cleanup does not match the allocation up there. */ 637 out_probe: 638 free(pdev->bus, M_DEVBUF); 639 spin_lock_destroy(&pdev->pcie_cap_lock); 640 linux_pdev_dma_uninit(pdev); 641 out_dma_init: 642 spin_lock(&pci_lock); 643 list_del(&pdev->links); 644 spin_unlock(&pci_lock); 645 put_device(&pdev->dev); 646 return (-error); 647 } 648 649 static int 650 linux_pci_detach(device_t dev) 651 { 652 struct pci_dev *pdev; 653 654 pdev = device_get_softc(dev); 655 656 MPASS(pdev != NULL); 657 658 device_set_desc(dev, NULL); 659 660 return (linux_pci_detach_device(pdev)); 661 } 662 663 int 664 linux_pci_detach_device(struct pci_dev *pdev) 665 { 666 667 linux_set_current(curthread); 668 669 if (pdev->pdrv != NULL) 670 pdev->pdrv->remove(pdev); 671 672 if (pdev->root != NULL) 673 pci_dev_put(pdev->root); 674 free(pdev->bus, M_DEVBUF); 675 linux_pdev_dma_uninit(pdev); 676 677 spin_lock(&pci_lock); 678 list_del(&pdev->links); 679 spin_unlock(&pci_lock); 680 spin_lock_destroy(&pdev->pcie_cap_lock); 681 put_device(&pdev->dev); 682 683 return (0); 684 } 685 686 static int 687 lkpi_pci_disable_dev(struct device *dev) 688 { 689 690 (void) pci_disable_io(dev->bsddev, SYS_RES_MEMORY); 691 (void) pci_disable_io(dev->bsddev, SYS_RES_IOPORT); 692 return (0); 693 } 694 695 static struct pci_devres * 696 lkpi_pci_devres_get_alloc(struct pci_dev *pdev) 697 { 698 struct pci_devres *dr; 699 700 dr = lkpi_devres_find(&pdev->dev, lkpi_pci_devres_release, NULL, NULL); 701 if (dr == NULL) { 702 dr = lkpi_devres_alloc(lkpi_pci_devres_release, sizeof(*dr), 703 GFP_KERNEL | __GFP_ZERO); 704 if (dr != NULL) 705 lkpi_devres_add(&pdev->dev, dr); 706 } 707 708 return (dr); 709 } 710 711 static struct pci_devres * 712 lkpi_pci_devres_find(struct pci_dev *pdev) 713 { 714 if (!pdev->managed) 715 return (NULL); 716 717 return (lkpi_pci_devres_get_alloc(pdev)); 718 } 719 720 void 721 lkpi_pci_devres_release(struct device *dev, void *p) 722 { 723 struct pci_devres *dr; 724 struct pci_dev *pdev; 725 int bar; 726 727 pdev = to_pci_dev(dev); 728 dr = p; 729 730 if (pdev->msix_enabled) 731 lkpi_pci_disable_msix(pdev); 732 if (pdev->msi_enabled) 733 lkpi_pci_disable_msi(pdev); 734 735 if (dr->enable_io && lkpi_pci_disable_dev(dev) == 0) 736 dr->enable_io = false; 737 738 if (dr->region_mask == 0) 739 return; 740 for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) { 741 742 if ((dr->region_mask & (1 << bar)) == 0) 743 continue; 744 pci_release_region(pdev, bar); 745 } 746 } 747 748 int 749 linuxkpi_pcim_enable_device(struct pci_dev *pdev) 750 { 751 struct pci_devres *dr; 752 int error; 753 754 /* Here we cannot run through the pdev->managed check. */ 755 dr = lkpi_pci_devres_get_alloc(pdev); 756 if (dr == NULL) 757 return (-ENOMEM); 758 759 /* If resources were enabled before do not do it again. */ 760 if (dr->enable_io) 761 return (0); 762 763 error = pci_enable_device(pdev); 764 if (error == 0) 765 dr->enable_io = true; 766 767 /* This device is not managed. */ 768 pdev->managed = true; 769 770 return (error); 771 } 772 773 static struct pcim_iomap_devres * 774 lkpi_pcim_iomap_devres_find(struct pci_dev *pdev) 775 { 776 struct pcim_iomap_devres *dr; 777 778 dr = lkpi_devres_find(&pdev->dev, lkpi_pcim_iomap_table_release, 779 NULL, NULL); 780 if (dr == NULL) { 781 dr = lkpi_devres_alloc(lkpi_pcim_iomap_table_release, 782 sizeof(*dr), GFP_KERNEL | __GFP_ZERO); 783 if (dr != NULL) 784 lkpi_devres_add(&pdev->dev, dr); 785 } 786 787 if (dr == NULL) 788 device_printf(pdev->dev.bsddev, "%s: NULL\n", __func__); 789 790 return (dr); 791 } 792 793 void __iomem ** 794 linuxkpi_pcim_iomap_table(struct pci_dev *pdev) 795 { 796 struct pcim_iomap_devres *dr; 797 798 dr = lkpi_pcim_iomap_devres_find(pdev); 799 if (dr == NULL) 800 return (NULL); 801 802 /* 803 * If the driver has manually set a flag to be able to request the 804 * resource to use bus_read/write_<n>, return the shadow table. 805 */ 806 if (pdev->want_iomap_res) 807 return ((void **)dr->res_table); 808 809 /* This is the Linux default. */ 810 return (dr->mmio_table); 811 } 812 813 static struct resource * 814 _lkpi_pci_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen __unused) 815 { 816 struct pci_mmio_region *mmio, *p; 817 int type; 818 819 if (!lkpi_pci_bar_id_valid(bar)) 820 return (NULL); 821 822 type = pci_resource_type(pdev, bar); 823 if (type < 0) { 824 device_printf(pdev->dev.bsddev, "%s: bar %d type %d\n", 825 __func__, bar, type); 826 return (NULL); 827 } 828 829 /* 830 * Check for duplicate mappings. 831 * This can happen if a driver calls pci_request_region() first. 832 */ 833 TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) { 834 if (mmio->type == type && mmio->rid == PCIR_BAR(bar)) { 835 return (mmio->res); 836 } 837 } 838 839 mmio = malloc(sizeof(*mmio), M_DEVBUF, M_WAITOK | M_ZERO); 840 mmio->rid = PCIR_BAR(bar); 841 mmio->type = type; 842 mmio->res = bus_alloc_resource_any(pdev->dev.bsddev, mmio->type, 843 &mmio->rid, RF_ACTIVE|RF_SHAREABLE); 844 if (mmio->res == NULL) { 845 device_printf(pdev->dev.bsddev, "%s: failed to alloc " 846 "bar %d type %d rid %d\n", 847 __func__, bar, type, PCIR_BAR(bar)); 848 free(mmio, M_DEVBUF); 849 return (NULL); 850 } 851 TAILQ_INSERT_TAIL(&pdev->mmio, mmio, next); 852 853 return (mmio->res); 854 } 855 856 void * 857 linuxkpi_pci_iomap_range(struct pci_dev *pdev, int bar, 858 unsigned long off, unsigned long maxlen) 859 { 860 struct resource *res; 861 862 if (!lkpi_pci_bar_id_valid(bar)) 863 return (NULL); 864 865 res = _lkpi_pci_iomap(pdev, bar, maxlen); 866 if (res == NULL) 867 return (NULL); 868 /* This is a FreeBSD extension so we can use bus_*(). */ 869 if (pdev->want_iomap_res) 870 return (res); 871 MPASS(off < rman_get_size(res)); 872 return ((void *)(rman_get_bushandle(res) + off)); 873 } 874 875 void * 876 linuxkpi_pci_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen) 877 { 878 if (!lkpi_pci_bar_id_valid(bar)) 879 return (NULL); 880 881 return (linuxkpi_pci_iomap_range(pdev, bar, 0, maxlen)); 882 } 883 884 void * 885 linuxkpi_pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen) 886 { 887 struct pcim_iomap_devres *dr; 888 void *res; 889 890 if (!lkpi_pci_bar_id_valid(bar)) 891 return (NULL); 892 893 dr = lkpi_pcim_iomap_devres_find(pdev); 894 if (dr == NULL) 895 return (NULL); 896 897 if (dr->res_table[bar] != NULL) 898 return (dr->res_table[bar]); 899 900 res = linuxkpi_pci_iomap(pdev, bar, maxlen); 901 if (res == NULL) { 902 /* 903 * Do not free the devres in case there were 904 * other valid mappings before already. 905 */ 906 return (NULL); 907 } 908 lkpi_set_pcim_iomap_devres(dr, bar, res); 909 910 return (res); 911 } 912 913 void 914 linuxkpi_pci_iounmap(struct pci_dev *pdev, void *res) 915 { 916 struct pci_mmio_region *mmio, *p; 917 bus_space_handle_t bh = (bus_space_handle_t)res; 918 919 TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) { 920 if (pdev->want_iomap_res) { 921 if (res != mmio->res) 922 continue; 923 } else { 924 if (bh < rman_get_bushandle(mmio->res) || 925 bh >= rman_get_bushandle(mmio->res) + 926 rman_get_size(mmio->res)) 927 continue; 928 } 929 bus_release_resource(pdev->dev.bsddev, 930 mmio->type, mmio->rid, mmio->res); 931 TAILQ_REMOVE(&pdev->mmio, mmio, next); 932 free(mmio, M_DEVBUF); 933 return; 934 } 935 } 936 937 int 938 linuxkpi_pcim_iomap_regions(struct pci_dev *pdev, uint32_t mask, const char *name) 939 { 940 struct pcim_iomap_devres *dr; 941 void *res; 942 uint32_t mappings; 943 int bar; 944 945 dr = lkpi_pcim_iomap_devres_find(pdev); 946 if (dr == NULL) 947 return (-ENOMEM); 948 949 /* Now iomap all the requested (by "mask") ones. */ 950 for (bar = mappings = 0; mappings != mask; bar++) { 951 if ((mask & (1 << bar)) == 0) 952 continue; 953 954 /* Request double is not allowed. */ 955 if (dr->mmio_table[bar] != NULL) { 956 device_printf(pdev->dev.bsddev, "%s: bar %d %p\n", 957 __func__, bar, dr->mmio_table[bar]); 958 goto err; 959 } 960 961 res = _lkpi_pci_iomap(pdev, bar, 0); 962 if (res == NULL) 963 goto err; 964 lkpi_set_pcim_iomap_devres(dr, bar, res); 965 966 mappings |= (1 << bar); 967 } 968 969 return (0); 970 err: 971 for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) { 972 if ((mappings & (1 << bar)) != 0) { 973 res = dr->mmio_table[bar]; 974 if (res == NULL) 975 continue; 976 pci_iounmap(pdev, res); 977 } 978 } 979 980 return (-EINVAL); 981 } 982 983 static void 984 lkpi_pcim_iomap_table_release(struct device *dev, void *p) 985 { 986 struct pcim_iomap_devres *dr; 987 struct pci_dev *pdev; 988 int bar; 989 990 dr = p; 991 pdev = to_pci_dev(dev); 992 for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) { 993 994 if (dr->mmio_table[bar] == NULL) 995 continue; 996 997 pci_iounmap(pdev, dr->mmio_table[bar]); 998 } 999 } 1000 1001 static int 1002 linux_pci_suspend(device_t dev) 1003 { 1004 const struct dev_pm_ops *pmops; 1005 struct pm_message pm = { }; 1006 struct pci_dev *pdev; 1007 int error; 1008 1009 error = 0; 1010 linux_set_current(curthread); 1011 pdev = device_get_softc(dev); 1012 pmops = pdev->pdrv->driver.pm; 1013 1014 if (pdev->pdrv->suspend != NULL) 1015 error = -pdev->pdrv->suspend(pdev, pm); 1016 else if (pmops != NULL && pmops->suspend != NULL) { 1017 error = -pmops->suspend(&pdev->dev); 1018 if (error == 0 && pmops->suspend_late != NULL) 1019 error = -pmops->suspend_late(&pdev->dev); 1020 if (error == 0 && pmops->suspend_noirq != NULL) 1021 error = -pmops->suspend_noirq(&pdev->dev); 1022 } 1023 return (error); 1024 } 1025 1026 static int 1027 linux_pci_resume(device_t dev) 1028 { 1029 const struct dev_pm_ops *pmops; 1030 struct pci_dev *pdev; 1031 int error; 1032 1033 error = 0; 1034 linux_set_current(curthread); 1035 pdev = device_get_softc(dev); 1036 pmops = pdev->pdrv->driver.pm; 1037 1038 if (pdev->pdrv->resume != NULL) 1039 error = -pdev->pdrv->resume(pdev); 1040 else if (pmops != NULL && pmops->resume != NULL) { 1041 if (pmops->resume_early != NULL) 1042 error = -pmops->resume_early(&pdev->dev); 1043 if (error == 0 && pmops->resume != NULL) 1044 error = -pmops->resume(&pdev->dev); 1045 } 1046 return (error); 1047 } 1048 1049 static int 1050 linux_pci_shutdown(device_t dev) 1051 { 1052 struct pci_dev *pdev; 1053 1054 linux_set_current(curthread); 1055 pdev = device_get_softc(dev); 1056 if (pdev->pdrv->shutdown != NULL) 1057 pdev->pdrv->shutdown(pdev); 1058 return (0); 1059 } 1060 1061 static int 1062 linux_pci_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *pf_config) 1063 { 1064 struct pci_dev *pdev; 1065 int error; 1066 1067 linux_set_current(curthread); 1068 pdev = device_get_softc(dev); 1069 if (pdev->pdrv->bsd_iov_init != NULL) 1070 error = pdev->pdrv->bsd_iov_init(dev, num_vfs, pf_config); 1071 else 1072 error = EINVAL; 1073 return (error); 1074 } 1075 1076 static void 1077 linux_pci_iov_uninit(device_t dev) 1078 { 1079 struct pci_dev *pdev; 1080 1081 linux_set_current(curthread); 1082 pdev = device_get_softc(dev); 1083 if (pdev->pdrv->bsd_iov_uninit != NULL) 1084 pdev->pdrv->bsd_iov_uninit(dev); 1085 } 1086 1087 static int 1088 linux_pci_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *vf_config) 1089 { 1090 struct pci_dev *pdev; 1091 int error; 1092 1093 linux_set_current(curthread); 1094 pdev = device_get_softc(dev); 1095 if (pdev->pdrv->bsd_iov_add_vf != NULL) 1096 error = pdev->pdrv->bsd_iov_add_vf(dev, vfnum, vf_config); 1097 else 1098 error = EINVAL; 1099 return (error); 1100 } 1101 1102 static int 1103 _linux_pci_register_driver(struct pci_driver *pdrv, devclass_t dc) 1104 { 1105 int error; 1106 1107 linux_set_current(curthread); 1108 spin_lock(&pci_lock); 1109 list_add(&pdrv->node, &pci_drivers); 1110 spin_unlock(&pci_lock); 1111 if (pdrv->bsddriver.name == NULL) 1112 pdrv->bsddriver.name = pdrv->name; 1113 pdrv->bsddriver.methods = pci_methods; 1114 pdrv->bsddriver.size = sizeof(struct pci_dev); 1115 1116 bus_topo_lock(); 1117 error = devclass_add_driver(dc, &pdrv->bsddriver, 1118 BUS_PASS_DEFAULT, &pdrv->bsdclass); 1119 bus_topo_unlock(); 1120 return (-error); 1121 } 1122 1123 int 1124 linux_pci_register_driver(struct pci_driver *pdrv) 1125 { 1126 devclass_t dc; 1127 1128 pdrv->isdrm = strcmp(pdrv->name, "drmn") == 0; 1129 dc = pdrv->isdrm ? devclass_create("vgapci") : devclass_find("pci"); 1130 if (dc == NULL) 1131 return (-ENXIO); 1132 return (_linux_pci_register_driver(pdrv, dc)); 1133 } 1134 1135 static struct resource_list_entry * 1136 lkpi_pci_get_bar(struct pci_dev *pdev, int bar, bool reserve) 1137 { 1138 int type; 1139 1140 type = pci_resource_type(pdev, bar); 1141 if (type < 0) 1142 return (NULL); 1143 bar = PCIR_BAR(bar); 1144 return (linux_pci_get_rle(pdev, type, bar, reserve)); 1145 } 1146 1147 struct device * 1148 lkpi_pci_find_irq_dev(unsigned int irq) 1149 { 1150 struct pci_dev *pdev; 1151 struct device *found; 1152 1153 found = NULL; 1154 spin_lock(&pci_lock); 1155 list_for_each_entry(pdev, &pci_devices, links) { 1156 if (irq == pdev->dev.irq || 1157 (irq >= pdev->dev.irq_start && irq < pdev->dev.irq_end)) { 1158 found = &pdev->dev; 1159 break; 1160 } 1161 } 1162 spin_unlock(&pci_lock); 1163 return (found); 1164 } 1165 1166 unsigned long 1167 pci_resource_start(struct pci_dev *pdev, int bar) 1168 { 1169 struct resource_list_entry *rle; 1170 rman_res_t newstart; 1171 device_t dev; 1172 int error; 1173 1174 if ((rle = lkpi_pci_get_bar(pdev, bar, true)) == NULL) 1175 return (0); 1176 dev = pdev->pdrv != NULL && pdev->pdrv->isdrm ? 1177 device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev; 1178 error = bus_translate_resource(dev, rle->type, rle->start, &newstart); 1179 if (error != 0) { 1180 device_printf(pdev->dev.bsddev, 1181 "translate of %#jx failed: %d\n", 1182 (uintmax_t)rle->start, error); 1183 return (0); 1184 } 1185 return (newstart); 1186 } 1187 1188 unsigned long 1189 pci_resource_len(struct pci_dev *pdev, int bar) 1190 { 1191 struct resource_list_entry *rle; 1192 1193 if ((rle = lkpi_pci_get_bar(pdev, bar, true)) == NULL) 1194 return (0); 1195 return (rle->count); 1196 } 1197 1198 static int 1199 lkpi_pci_request_region(struct pci_dev *pdev, int bar, const char *res_name, 1200 bool managed) 1201 { 1202 struct resource *res; 1203 struct pci_devres *dr; 1204 struct pci_mmio_region *mmio; 1205 int rid; 1206 int type; 1207 1208 if (!lkpi_pci_bar_id_valid(bar)) 1209 return (-EINVAL); 1210 1211 /* 1212 * If the bar is not valid, return success without adding the BAR; 1213 * otherwise linuxkpi_pcim_request_all_regions() will error. 1214 */ 1215 if (pci_resource_len(pdev, bar) == 0) 1216 return (0); 1217 /* Likewise if it is neither IO nor MEM, nothing to do for us. */ 1218 type = pci_resource_type(pdev, bar); 1219 if (type < 0) 1220 return (0); 1221 1222 rid = PCIR_BAR(bar); 1223 res = bus_alloc_resource_any(pdev->dev.bsddev, type, &rid, 1224 RF_ACTIVE|RF_SHAREABLE); 1225 if (res == NULL) { 1226 device_printf(pdev->dev.bsddev, "%s: failed to alloc " 1227 "bar %d type %d rid %d\n", 1228 __func__, bar, type, PCIR_BAR(bar)); 1229 return (-ENODEV); 1230 } 1231 1232 /* 1233 * It seems there is an implicit devres tracking on these if the device 1234 * is managed (lkpi_pci_devres_find() case); otherwise the resources are 1235 * not automatically freed on FreeBSD/LinuxKPI though they should be/are 1236 * expected to be by Linux drivers. 1237 * Otherwise if we are called from a pcim-function with the managed 1238 * argument set, we need to track devres independent of pdev->managed. 1239 */ 1240 if (managed) 1241 dr = lkpi_pci_devres_get_alloc(pdev); 1242 else 1243 dr = lkpi_pci_devres_find(pdev); 1244 if (dr != NULL) { 1245 dr->region_mask |= (1 << bar); 1246 dr->region_table[bar] = res; 1247 } 1248 1249 /* Even if the device is not managed we need to track it for iomap. */ 1250 mmio = malloc(sizeof(*mmio), M_DEVBUF, M_WAITOK | M_ZERO); 1251 mmio->rid = PCIR_BAR(bar); 1252 mmio->type = type; 1253 mmio->res = res; 1254 TAILQ_INSERT_TAIL(&pdev->mmio, mmio, next); 1255 1256 return (0); 1257 } 1258 1259 int 1260 linuxkpi_pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) 1261 { 1262 return (lkpi_pci_request_region(pdev, bar, res_name, false)); 1263 } 1264 1265 int 1266 linuxkpi_pci_request_regions(struct pci_dev *pdev, const char *res_name) 1267 { 1268 int error; 1269 int i; 1270 1271 for (i = 0; i <= PCIR_MAX_BAR_0; i++) { 1272 error = pci_request_region(pdev, i, res_name); 1273 if (error && error != -ENODEV) { 1274 pci_release_regions(pdev); 1275 return (error); 1276 } 1277 } 1278 return (0); 1279 } 1280 1281 int 1282 linuxkpi_pcim_request_all_regions(struct pci_dev *pdev, const char *res_name) 1283 { 1284 int bar, error; 1285 1286 for (bar = 0; bar <= PCIR_MAX_BAR_0; bar++) { 1287 error = lkpi_pci_request_region(pdev, bar, res_name, true); 1288 if (error != 0) { 1289 device_printf(pdev->dev.bsddev, "%s: bar %d res_name '%s': " 1290 "lkpi_pci_request_region returned %d\n", __func__, 1291 bar, res_name, error); 1292 pci_release_regions(pdev); 1293 return (error); 1294 } 1295 } 1296 return (0); 1297 } 1298 1299 void 1300 linuxkpi_pci_release_region(struct pci_dev *pdev, int bar) 1301 { 1302 struct resource_list_entry *rle; 1303 struct pci_devres *dr; 1304 struct pci_mmio_region *mmio, *p; 1305 1306 if ((rle = lkpi_pci_get_bar(pdev, bar, false)) == NULL) 1307 return; 1308 1309 /* 1310 * As we implicitly track the requests we also need to clear them on 1311 * release. Do clear before resource release. 1312 */ 1313 dr = lkpi_pci_devres_find(pdev); 1314 if (dr != NULL) { 1315 KASSERT(dr->region_table[bar] == rle->res, ("%s: pdev %p bar %d" 1316 " region_table res %p != rel->res %p\n", __func__, pdev, 1317 bar, dr->region_table[bar], rle->res)); 1318 dr->region_table[bar] = NULL; 1319 dr->region_mask &= ~(1 << bar); 1320 } 1321 1322 TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) { 1323 if (rle->res != (void *)rman_get_bushandle(mmio->res)) 1324 continue; 1325 TAILQ_REMOVE(&pdev->mmio, mmio, next); 1326 free(mmio, M_DEVBUF); 1327 } 1328 1329 bus_release_resource(pdev->dev.bsddev, rle->type, rle->rid, rle->res); 1330 } 1331 1332 void 1333 linuxkpi_pci_release_regions(struct pci_dev *pdev) 1334 { 1335 int i; 1336 1337 for (i = 0; i <= PCIR_MAX_BAR_0; i++) 1338 pci_release_region(pdev, i); 1339 } 1340 1341 int 1342 linux_pci_register_drm_driver(struct pci_driver *pdrv) 1343 { 1344 devclass_t dc; 1345 1346 dc = devclass_create("vgapci"); 1347 if (dc == NULL) 1348 return (-ENXIO); 1349 pdrv->isdrm = true; 1350 pdrv->name = "drmn"; 1351 return (_linux_pci_register_driver(pdrv, dc)); 1352 } 1353 1354 void 1355 linux_pci_unregister_driver(struct pci_driver *pdrv) 1356 { 1357 devclass_t bus; 1358 1359 bus = devclass_find(pdrv->isdrm ? "vgapci" : "pci"); 1360 1361 spin_lock(&pci_lock); 1362 list_del(&pdrv->node); 1363 spin_unlock(&pci_lock); 1364 bus_topo_lock(); 1365 if (bus != NULL) 1366 devclass_delete_driver(bus, &pdrv->bsddriver); 1367 bus_topo_unlock(); 1368 } 1369 1370 void 1371 linux_pci_unregister_drm_driver(struct pci_driver *pdrv) 1372 { 1373 devclass_t bus; 1374 1375 bus = devclass_find("vgapci"); 1376 1377 spin_lock(&pci_lock); 1378 list_del(&pdrv->node); 1379 spin_unlock(&pci_lock); 1380 bus_topo_lock(); 1381 if (bus != NULL) 1382 devclass_delete_driver(bus, &pdrv->bsddriver); 1383 bus_topo_unlock(); 1384 } 1385 1386 int 1387 linuxkpi_pci_enable_msix(struct pci_dev *pdev, struct msix_entry *entries, 1388 int nreq) 1389 { 1390 struct resource_list_entry *rle; 1391 int error; 1392 int avail; 1393 int i; 1394 1395 avail = pci_msix_count(pdev->dev.bsddev); 1396 if (avail < nreq) { 1397 if (avail == 0) 1398 return -EINVAL; 1399 return avail; 1400 } 1401 avail = nreq; 1402 if ((error = -pci_alloc_msix(pdev->dev.bsddev, &avail)) != 0) 1403 return error; 1404 /* 1405 * Handle case where "pci_alloc_msix()" may allocate less 1406 * interrupts than available and return with no error: 1407 */ 1408 if (avail < nreq) { 1409 pci_release_msi(pdev->dev.bsddev); 1410 return avail; 1411 } 1412 rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 1, false); 1413 pdev->dev.irq_start = rle->start; 1414 pdev->dev.irq_end = rle->start + avail; 1415 for (i = 0; i < nreq; i++) 1416 entries[i].vector = pdev->dev.irq_start + i; 1417 pdev->msix_enabled = true; 1418 return (0); 1419 } 1420 1421 int 1422 _lkpi_pci_enable_msi_range(struct pci_dev *pdev, int minvec, int maxvec) 1423 { 1424 struct resource_list_entry *rle; 1425 int error; 1426 int nvec; 1427 1428 if (maxvec < minvec) 1429 return (-EINVAL); 1430 1431 nvec = pci_msi_count(pdev->dev.bsddev); 1432 if (nvec < 1 || nvec < minvec) 1433 return (-ENOSPC); 1434 1435 nvec = min(nvec, maxvec); 1436 if ((error = -pci_alloc_msi(pdev->dev.bsddev, &nvec)) != 0) 1437 return error; 1438 1439 /* Native PCI might only ever ask for 32 vectors. */ 1440 if (nvec < minvec) { 1441 pci_release_msi(pdev->dev.bsddev); 1442 return (-ENOSPC); 1443 } 1444 1445 rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 1, false); 1446 pdev->dev.irq_start = rle->start; 1447 pdev->dev.irq_end = rle->start + nvec; 1448 pdev->irq = rle->start; 1449 pdev->msi_enabled = true; 1450 return (0); 1451 } 1452 1453 int 1454 pci_alloc_irq_vectors(struct pci_dev *pdev, int minv, int maxv, 1455 unsigned int flags) 1456 { 1457 int error; 1458 1459 if (flags & PCI_IRQ_MSIX) { 1460 struct msix_entry *entries; 1461 int i; 1462 1463 entries = kcalloc(maxv, sizeof(*entries), GFP_KERNEL); 1464 if (entries == NULL) { 1465 error = -ENOMEM; 1466 goto out; 1467 } 1468 for (i = 0; i < maxv; ++i) 1469 entries[i].entry = i; 1470 error = pci_enable_msix(pdev, entries, maxv); 1471 out: 1472 kfree(entries); 1473 if (error == 0 && pdev->msix_enabled) 1474 return (pdev->dev.irq_end - pdev->dev.irq_start); 1475 } 1476 if (flags & PCI_IRQ_MSI) { 1477 if (pci_msi_count(pdev->dev.bsddev) < minv) 1478 return (-ENOSPC); 1479 error = _lkpi_pci_enable_msi_range(pdev, minv, maxv); 1480 if (error == 0 && pdev->msi_enabled) 1481 return (pdev->dev.irq_end - pdev->dev.irq_start); 1482 } 1483 if (flags & PCI_IRQ_INTX) { 1484 if (pdev->irq) 1485 return (1); 1486 } 1487 1488 return (-EINVAL); 1489 } 1490 1491 struct msi_desc * 1492 lkpi_pci_msi_desc_alloc(int irq) 1493 { 1494 struct device *dev; 1495 struct pci_dev *pdev; 1496 struct msi_desc *desc; 1497 struct pci_devinfo *dinfo; 1498 struct pcicfg_msi *msi; 1499 int vec; 1500 1501 dev = lkpi_pci_find_irq_dev(irq); 1502 if (dev == NULL) 1503 return (NULL); 1504 1505 pdev = to_pci_dev(dev); 1506 1507 if (pdev->msi_desc == NULL) 1508 return (NULL); 1509 1510 if (irq < pdev->dev.irq_start || irq >= pdev->dev.irq_end) 1511 return (NULL); 1512 1513 vec = pdev->dev.irq_start - irq; 1514 1515 if (pdev->msi_desc[vec] != NULL) 1516 return (pdev->msi_desc[vec]); 1517 1518 dinfo = device_get_ivars(dev->bsddev); 1519 msi = &dinfo->cfg.msi; 1520 1521 desc = malloc(sizeof(*desc), M_DEVBUF, M_WAITOK | M_ZERO); 1522 1523 desc->pci.msi_attrib.is_64 = 1524 (msi->msi_ctrl & PCIM_MSICTRL_64BIT) ? true : false; 1525 desc->msg.data = msi->msi_data; 1526 1527 pdev->msi_desc[vec] = desc; 1528 1529 return (desc); 1530 } 1531 1532 bool 1533 pci_device_is_present(struct pci_dev *pdev) 1534 { 1535 device_t dev; 1536 1537 dev = pdev->dev.bsddev; 1538 1539 return (bus_child_present(dev)); 1540 } 1541 1542 CTASSERT(sizeof(dma_addr_t) <= sizeof(uint64_t)); 1543 1544 struct linux_dma_obj { 1545 void *vaddr; 1546 uint64_t dma_addr; 1547 bus_dmamap_t dmamap; 1548 bus_dma_tag_t dmat; 1549 }; 1550 1551 static uma_zone_t linux_dma_trie_zone; 1552 static uma_zone_t linux_dma_obj_zone; 1553 1554 static void 1555 linux_dma_init(void *arg) 1556 { 1557 1558 linux_dma_trie_zone = uma_zcreate("linux_dma_pctrie", 1559 pctrie_node_size(), NULL, NULL, pctrie_zone_init, NULL, 1560 UMA_ALIGN_PTR, 0); 1561 linux_dma_obj_zone = uma_zcreate("linux_dma_object", 1562 sizeof(struct linux_dma_obj), NULL, NULL, NULL, NULL, 1563 UMA_ALIGN_PTR, 0); 1564 lkpi_pci_nseg1_fail = counter_u64_alloc(M_WAITOK); 1565 } 1566 SYSINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_init, NULL); 1567 1568 static void 1569 linux_dma_uninit(void *arg) 1570 { 1571 1572 counter_u64_free(lkpi_pci_nseg1_fail); 1573 uma_zdestroy(linux_dma_obj_zone); 1574 uma_zdestroy(linux_dma_trie_zone); 1575 } 1576 SYSUNINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_uninit, NULL); 1577 1578 static void * 1579 linux_dma_trie_alloc(struct pctrie *ptree) 1580 { 1581 1582 return (uma_zalloc(linux_dma_trie_zone, M_NOWAIT)); 1583 } 1584 1585 static void 1586 linux_dma_trie_free(struct pctrie *ptree, void *node) 1587 { 1588 1589 uma_zfree(linux_dma_trie_zone, node); 1590 } 1591 1592 PCTRIE_DEFINE(LINUX_DMA, linux_dma_obj, dma_addr, linux_dma_trie_alloc, 1593 linux_dma_trie_free); 1594 1595 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 1596 static dma_addr_t 1597 linux_dma_map_phys_common(struct device *dev, vm_paddr_t phys, size_t len, 1598 bus_dma_tag_t dmat) 1599 { 1600 struct linux_dma_priv *priv; 1601 struct linux_dma_obj *obj; 1602 int error, nseg; 1603 bus_dma_segment_t seg; 1604 1605 priv = dev->dma_priv; 1606 1607 /* 1608 * If the resultant mapping will be entirely 1:1 with the 1609 * physical address, short-circuit the remainder of the 1610 * bus_dma API. This avoids tracking collisions in the pctrie 1611 * with the additional benefit of reducing overhead. 1612 */ 1613 if (bus_dma_id_mapped(dmat, phys, len)) 1614 return (phys); 1615 1616 obj = uma_zalloc(linux_dma_obj_zone, M_NOWAIT); 1617 if (obj == NULL) { 1618 return (0); 1619 } 1620 obj->dmat = dmat; 1621 1622 DMA_PRIV_LOCK(priv); 1623 if (bus_dmamap_create(obj->dmat, 0, &obj->dmamap) != 0) { 1624 DMA_PRIV_UNLOCK(priv); 1625 uma_zfree(linux_dma_obj_zone, obj); 1626 return (0); 1627 } 1628 1629 nseg = -1; 1630 error = _bus_dmamap_load_phys(obj->dmat, obj->dmamap, phys, len, 1631 BUS_DMA_NOWAIT, &seg, &nseg); 1632 if (error != 0) { 1633 bus_dmamap_destroy(obj->dmat, obj->dmamap); 1634 DMA_PRIV_UNLOCK(priv); 1635 uma_zfree(linux_dma_obj_zone, obj); 1636 counter_u64_add(lkpi_pci_nseg1_fail, 1); 1637 if (linuxkpi_debug) { 1638 device_printf(dev->bsddev, "%s: _bus_dmamap_load_phys " 1639 "error %d, phys %#018jx len %zu\n", __func__, 1640 error, (uintmax_t)phys, len); 1641 dump_stack(); 1642 } 1643 return (0); 1644 } 1645 1646 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); 1647 obj->dma_addr = seg.ds_addr; 1648 1649 error = LINUX_DMA_PCTRIE_INSERT(&priv->ptree, obj); 1650 if (error != 0) { 1651 bus_dmamap_unload(obj->dmat, obj->dmamap); 1652 bus_dmamap_destroy(obj->dmat, obj->dmamap); 1653 DMA_PRIV_UNLOCK(priv); 1654 uma_zfree(linux_dma_obj_zone, obj); 1655 return (0); 1656 } 1657 DMA_PRIV_UNLOCK(priv); 1658 return (obj->dma_addr); 1659 } 1660 #else 1661 static dma_addr_t 1662 linux_dma_map_phys_common(struct device *dev __unused, vm_paddr_t phys, 1663 size_t len __unused, bus_dma_tag_t dmat __unused) 1664 { 1665 return (phys); 1666 } 1667 #endif 1668 1669 dma_addr_t 1670 lkpi_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len, 1671 enum dma_data_direction direction, unsigned long attrs) 1672 { 1673 struct linux_dma_priv *priv; 1674 dma_addr_t dma; 1675 1676 priv = dev->dma_priv; 1677 dma = linux_dma_map_phys_common(dev, phys, len, priv->dmat); 1678 if (dma_mapping_error(dev, dma)) 1679 return (dma); 1680 1681 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 1682 dma_sync_single_for_device(dev, dma, len, direction); 1683 1684 return (dma); 1685 } 1686 1687 /* For backward compat only so we can MFC this. Remove before 15. */ 1688 dma_addr_t 1689 linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len) 1690 { 1691 return (lkpi_dma_map_phys(dev, phys, len, DMA_NONE, 0)); 1692 } 1693 1694 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 1695 void 1696 lkpi_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len, 1697 enum dma_data_direction direction, unsigned long attrs) 1698 { 1699 struct linux_dma_priv *priv; 1700 struct linux_dma_obj *obj; 1701 1702 priv = dev->dma_priv; 1703 1704 if (pctrie_is_empty(&priv->ptree)) 1705 return; 1706 1707 DMA_PRIV_LOCK(priv); 1708 obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr); 1709 if (obj == NULL) { 1710 DMA_PRIV_UNLOCK(priv); 1711 return; 1712 } 1713 LINUX_DMA_PCTRIE_REMOVE(&priv->ptree, dma_addr); 1714 1715 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 1716 dma_sync_single_for_cpu(dev, dma_addr, len, direction); 1717 1718 bus_dmamap_unload(obj->dmat, obj->dmamap); 1719 bus_dmamap_destroy(obj->dmat, obj->dmamap); 1720 DMA_PRIV_UNLOCK(priv); 1721 1722 uma_zfree(linux_dma_obj_zone, obj); 1723 } 1724 #else 1725 void 1726 lkpi_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len, 1727 enum dma_data_direction direction, unsigned long attrs) 1728 { 1729 } 1730 #endif 1731 1732 /* For backward compat only so we can MFC this. Remove before 15. */ 1733 void 1734 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len) 1735 { 1736 lkpi_dma_unmap(dev, dma_addr, len, DMA_NONE, 0); 1737 } 1738 1739 void * 1740 linux_dma_alloc_coherent(struct device *dev, size_t size, 1741 dma_addr_t *dma_handle, gfp_t flag) 1742 { 1743 struct linux_dma_priv *priv; 1744 vm_paddr_t high; 1745 size_t align; 1746 void *mem; 1747 1748 if (dev == NULL || dev->dma_priv == NULL) { 1749 *dma_handle = 0; 1750 return (NULL); 1751 } 1752 priv = dev->dma_priv; 1753 if (priv->dma_coherent_mask) 1754 high = priv->dma_coherent_mask; 1755 else 1756 /* Coherent is lower 32bit only by default in Linux. */ 1757 high = BUS_SPACE_MAXADDR_32BIT; 1758 align = PAGE_SIZE << get_order(size); 1759 /* Always zero the allocation. */ 1760 flag |= M_ZERO; 1761 mem = kmem_alloc_contig(size, flag & GFP_NATIVE_MASK, 0, high, 1762 align, 0, VM_MEMATTR_DEFAULT); 1763 if (mem != NULL) { 1764 *dma_handle = linux_dma_map_phys_common(dev, vtophys(mem), size, 1765 priv->dmat_coherent); 1766 if (*dma_handle == 0) { 1767 kmem_free(mem, size); 1768 mem = NULL; 1769 } 1770 } else { 1771 *dma_handle = 0; 1772 } 1773 return (mem); 1774 } 1775 1776 struct lkpi_devres_dmam_coherent { 1777 size_t size; 1778 dma_addr_t *handle; 1779 void *mem; 1780 }; 1781 1782 static void 1783 lkpi_dmam_free_coherent(struct device *dev, void *p) 1784 { 1785 struct lkpi_devres_dmam_coherent *dr; 1786 1787 dr = p; 1788 dma_free_coherent(dev, dr->size, dr->mem, *dr->handle); 1789 } 1790 1791 void * 1792 linuxkpi_dmam_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 1793 gfp_t flag) 1794 { 1795 struct lkpi_devres_dmam_coherent *dr; 1796 1797 dr = lkpi_devres_alloc(lkpi_dmam_free_coherent, 1798 sizeof(*dr), GFP_KERNEL | __GFP_ZERO); 1799 1800 if (dr == NULL) 1801 return (NULL); 1802 1803 dr->size = size; 1804 dr->mem = linux_dma_alloc_coherent(dev, size, dma_handle, flag); 1805 dr->handle = dma_handle; 1806 if (dr->mem == NULL) { 1807 lkpi_devres_free(dr); 1808 return (NULL); 1809 } 1810 1811 lkpi_devres_add(dev, dr); 1812 return (dr->mem); 1813 } 1814 1815 void 1816 linuxkpi_dma_sync(struct device *dev, dma_addr_t dma_addr, size_t size, 1817 bus_dmasync_op_t op) 1818 { 1819 struct linux_dma_priv *priv; 1820 struct linux_dma_obj *obj; 1821 1822 priv = dev->dma_priv; 1823 1824 if (pctrie_is_empty(&priv->ptree)) 1825 return; 1826 1827 DMA_PRIV_LOCK(priv); 1828 obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr); 1829 if (obj == NULL) { 1830 DMA_PRIV_UNLOCK(priv); 1831 return; 1832 } 1833 1834 bus_dmamap_sync(obj->dmat, obj->dmamap, op); 1835 DMA_PRIV_UNLOCK(priv); 1836 } 1837 1838 int 1839 linux_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents, 1840 enum dma_data_direction direction, unsigned long attrs) 1841 { 1842 struct linux_dma_priv *priv; 1843 struct scatterlist *sg; 1844 int i, nseg; 1845 bus_dma_segment_t seg; 1846 1847 priv = dev->dma_priv; 1848 1849 DMA_PRIV_LOCK(priv); 1850 1851 /* create common DMA map in the first S/G entry */ 1852 if (bus_dmamap_create(priv->dmat, 0, &sgl->dma_map) != 0) { 1853 DMA_PRIV_UNLOCK(priv); 1854 return (0); 1855 } 1856 1857 /* load all S/G list entries */ 1858 for_each_sg(sgl, sg, nents, i) { 1859 nseg = -1; 1860 if (_bus_dmamap_load_phys(priv->dmat, sgl->dma_map, 1861 sg_phys(sg), sg->length, BUS_DMA_NOWAIT, 1862 &seg, &nseg) != 0) { 1863 bus_dmamap_unload(priv->dmat, sgl->dma_map); 1864 bus_dmamap_destroy(priv->dmat, sgl->dma_map); 1865 DMA_PRIV_UNLOCK(priv); 1866 return (0); 1867 } 1868 KASSERT(nseg == 0, 1869 ("More than one segment (nseg=%d)", nseg + 1)); 1870 1871 sg_dma_address(sg) = seg.ds_addr; 1872 } 1873 1874 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) != 0) 1875 goto skip_sync; 1876 1877 switch (direction) { 1878 case DMA_BIDIRECTIONAL: 1879 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREWRITE); 1880 break; 1881 case DMA_TO_DEVICE: 1882 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREREAD); 1883 break; 1884 case DMA_FROM_DEVICE: 1885 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREWRITE); 1886 break; 1887 default: 1888 break; 1889 } 1890 skip_sync: 1891 1892 DMA_PRIV_UNLOCK(priv); 1893 1894 return (nents); 1895 } 1896 1897 void 1898 linux_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, 1899 int nents __unused, enum dma_data_direction direction, 1900 unsigned long attrs) 1901 { 1902 struct linux_dma_priv *priv; 1903 1904 priv = dev->dma_priv; 1905 1906 DMA_PRIV_LOCK(priv); 1907 1908 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) != 0) 1909 goto skip_sync; 1910 1911 switch (direction) { 1912 case DMA_BIDIRECTIONAL: 1913 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTREAD); 1914 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREREAD); 1915 break; 1916 case DMA_TO_DEVICE: 1917 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTWRITE); 1918 break; 1919 case DMA_FROM_DEVICE: 1920 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTREAD); 1921 break; 1922 default: 1923 break; 1924 } 1925 skip_sync: 1926 1927 bus_dmamap_unload(priv->dmat, sgl->dma_map); 1928 bus_dmamap_destroy(priv->dmat, sgl->dma_map); 1929 DMA_PRIV_UNLOCK(priv); 1930 } 1931 1932 struct dma_pool { 1933 struct device *pool_device; 1934 uma_zone_t pool_zone; 1935 struct mtx pool_lock; 1936 bus_dma_tag_t pool_dmat; 1937 size_t pool_entry_size; 1938 struct pctrie pool_ptree; 1939 }; 1940 1941 #define DMA_POOL_LOCK(pool) mtx_lock(&(pool)->pool_lock) 1942 #define DMA_POOL_UNLOCK(pool) mtx_unlock(&(pool)->pool_lock) 1943 1944 static inline int 1945 dma_pool_obj_ctor(void *mem, int size, void *arg, int flags) 1946 { 1947 struct linux_dma_obj *obj = mem; 1948 struct dma_pool *pool = arg; 1949 int error, nseg; 1950 bus_dma_segment_t seg; 1951 1952 nseg = -1; 1953 DMA_POOL_LOCK(pool); 1954 error = _bus_dmamap_load_phys(pool->pool_dmat, obj->dmamap, 1955 vtophys(obj->vaddr), pool->pool_entry_size, BUS_DMA_NOWAIT, 1956 &seg, &nseg); 1957 DMA_POOL_UNLOCK(pool); 1958 if (error != 0) { 1959 return (error); 1960 } 1961 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); 1962 obj->dma_addr = seg.ds_addr; 1963 1964 return (0); 1965 } 1966 1967 static void 1968 dma_pool_obj_dtor(void *mem, int size, void *arg) 1969 { 1970 struct linux_dma_obj *obj = mem; 1971 struct dma_pool *pool = arg; 1972 1973 DMA_POOL_LOCK(pool); 1974 bus_dmamap_unload(pool->pool_dmat, obj->dmamap); 1975 DMA_POOL_UNLOCK(pool); 1976 } 1977 1978 static int 1979 dma_pool_obj_import(void *arg, void **store, int count, int domain __unused, 1980 int flags) 1981 { 1982 struct dma_pool *pool = arg; 1983 struct linux_dma_obj *obj; 1984 int error, i; 1985 1986 for (i = 0; i < count; i++) { 1987 obj = uma_zalloc(linux_dma_obj_zone, flags); 1988 if (obj == NULL) 1989 break; 1990 1991 error = bus_dmamem_alloc(pool->pool_dmat, &obj->vaddr, 1992 BUS_DMA_NOWAIT, &obj->dmamap); 1993 if (error!= 0) { 1994 uma_zfree(linux_dma_obj_zone, obj); 1995 break; 1996 } 1997 1998 store[i] = obj; 1999 } 2000 2001 return (i); 2002 } 2003 2004 static void 2005 dma_pool_obj_release(void *arg, void **store, int count) 2006 { 2007 struct dma_pool *pool = arg; 2008 struct linux_dma_obj *obj; 2009 int i; 2010 2011 for (i = 0; i < count; i++) { 2012 obj = store[i]; 2013 bus_dmamem_free(pool->pool_dmat, obj->vaddr, obj->dmamap); 2014 uma_zfree(linux_dma_obj_zone, obj); 2015 } 2016 } 2017 2018 struct dma_pool * 2019 linux_dma_pool_create(char *name, struct device *dev, size_t size, 2020 size_t align, size_t boundary) 2021 { 2022 struct linux_dma_priv *priv; 2023 struct dma_pool *pool; 2024 2025 priv = dev->dma_priv; 2026 2027 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 2028 pool->pool_device = dev; 2029 pool->pool_entry_size = size; 2030 2031 if (bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 2032 align, boundary, /* alignment, boundary */ 2033 priv->dma_mask, /* lowaddr */ 2034 BUS_SPACE_MAXADDR, /* highaddr */ 2035 NULL, NULL, /* filtfunc, filtfuncarg */ 2036 size, /* maxsize */ 2037 1, /* nsegments */ 2038 size, /* maxsegsz */ 2039 0, /* flags */ 2040 NULL, NULL, /* lockfunc, lockfuncarg */ 2041 &pool->pool_dmat)) { 2042 kfree(pool); 2043 return (NULL); 2044 } 2045 2046 pool->pool_zone = uma_zcache_create(name, -1, dma_pool_obj_ctor, 2047 dma_pool_obj_dtor, NULL, NULL, dma_pool_obj_import, 2048 dma_pool_obj_release, pool, 0); 2049 2050 mtx_init(&pool->pool_lock, "lkpi-dma-pool", NULL, MTX_DEF); 2051 pctrie_init(&pool->pool_ptree); 2052 2053 return (pool); 2054 } 2055 2056 void 2057 linux_dma_pool_destroy(struct dma_pool *pool) 2058 { 2059 2060 uma_zdestroy(pool->pool_zone); 2061 bus_dma_tag_destroy(pool->pool_dmat); 2062 mtx_destroy(&pool->pool_lock); 2063 kfree(pool); 2064 } 2065 2066 void 2067 lkpi_dmam_pool_destroy(struct device *dev, void *p) 2068 { 2069 struct dma_pool *pool; 2070 2071 pool = *(struct dma_pool **)p; 2072 LINUX_DMA_PCTRIE_RECLAIM(&pool->pool_ptree); 2073 linux_dma_pool_destroy(pool); 2074 } 2075 2076 void * 2077 linux_dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, 2078 dma_addr_t *handle) 2079 { 2080 struct linux_dma_obj *obj; 2081 2082 obj = uma_zalloc_arg(pool->pool_zone, pool, mem_flags & GFP_NATIVE_MASK); 2083 if (obj == NULL) 2084 return (NULL); 2085 2086 DMA_POOL_LOCK(pool); 2087 if (LINUX_DMA_PCTRIE_INSERT(&pool->pool_ptree, obj) != 0) { 2088 DMA_POOL_UNLOCK(pool); 2089 uma_zfree_arg(pool->pool_zone, obj, pool); 2090 return (NULL); 2091 } 2092 DMA_POOL_UNLOCK(pool); 2093 2094 *handle = obj->dma_addr; 2095 return (obj->vaddr); 2096 } 2097 2098 void 2099 linux_dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma_addr) 2100 { 2101 struct linux_dma_obj *obj; 2102 2103 DMA_POOL_LOCK(pool); 2104 obj = LINUX_DMA_PCTRIE_LOOKUP(&pool->pool_ptree, dma_addr); 2105 if (obj == NULL) { 2106 DMA_POOL_UNLOCK(pool); 2107 return; 2108 } 2109 LINUX_DMA_PCTRIE_REMOVE(&pool->pool_ptree, dma_addr); 2110 DMA_POOL_UNLOCK(pool); 2111 2112 uma_zfree_arg(pool->pool_zone, obj, pool); 2113 } 2114 2115 static int 2116 linux_backlight_get_status(device_t dev, struct backlight_props *props) 2117 { 2118 struct pci_dev *pdev; 2119 2120 linux_set_current(curthread); 2121 pdev = device_get_softc(dev); 2122 2123 props->brightness = pdev->dev.bd->props.brightness; 2124 props->brightness = props->brightness * 100 / pdev->dev.bd->props.max_brightness; 2125 props->nlevels = 0; 2126 2127 return (0); 2128 } 2129 2130 static int 2131 linux_backlight_get_info(device_t dev, struct backlight_info *info) 2132 { 2133 struct pci_dev *pdev; 2134 2135 linux_set_current(curthread); 2136 pdev = device_get_softc(dev); 2137 2138 info->type = BACKLIGHT_TYPE_PANEL; 2139 strlcpy(info->name, pdev->dev.bd->name, BACKLIGHTMAXNAMELENGTH); 2140 return (0); 2141 } 2142 2143 static int 2144 linux_backlight_update_status(device_t dev, struct backlight_props *props) 2145 { 2146 struct pci_dev *pdev; 2147 2148 linux_set_current(curthread); 2149 pdev = device_get_softc(dev); 2150 2151 pdev->dev.bd->props.brightness = pdev->dev.bd->props.max_brightness * 2152 props->brightness / 100; 2153 pdev->dev.bd->props.power = props->brightness == 0 ? 2154 4/* FB_BLANK_POWERDOWN */ : 0/* FB_BLANK_UNBLANK */; 2155 return (pdev->dev.bd->ops->update_status(pdev->dev.bd)); 2156 } 2157 2158 struct backlight_device * 2159 linux_backlight_device_register(const char *name, struct device *dev, 2160 void *data, const struct backlight_ops *ops, struct backlight_properties *props) 2161 { 2162 2163 dev->bd = malloc(sizeof(*dev->bd), M_DEVBUF, M_WAITOK | M_ZERO); 2164 dev->bd->ops = ops; 2165 dev->bd->props.type = props->type; 2166 dev->bd->props.max_brightness = props->max_brightness; 2167 dev->bd->props.brightness = props->brightness; 2168 dev->bd->props.power = props->power; 2169 dev->bd->data = data; 2170 dev->bd->dev = dev; 2171 dev->bd->name = strdup(name, M_DEVBUF); 2172 2173 dev->backlight_dev = backlight_register(name, dev->bsddev); 2174 2175 return (dev->bd); 2176 } 2177 2178 void 2179 linux_backlight_device_unregister(struct backlight_device *bd) 2180 { 2181 2182 backlight_destroy(bd->dev->backlight_dev); 2183 free(bd->name, M_DEVBUF); 2184 free(bd, M_DEVBUF); 2185 } 2186