1 /*- 2 * Copyright (c) 2015-2016 Mellanox Technologies, Ltd. 3 * All rights reserved. 4 * Copyright (c) 2020-2025 The FreeBSD Foundation 5 * 6 * Portions of this software were developed by Björn Zeeb 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice unmodified, this list of conditions, and the following 14 * disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/bus.h> 34 #include <sys/malloc.h> 35 #include <sys/kernel.h> 36 #include <sys/sysctl.h> 37 #include <sys/lock.h> 38 #include <sys/mutex.h> 39 #include <sys/fcntl.h> 40 #include <sys/file.h> 41 #include <sys/filio.h> 42 #include <sys/pciio.h> 43 #include <sys/pctrie.h> 44 #include <sys/rman.h> 45 #include <sys/rwlock.h> 46 #include <sys/stdarg.h> 47 48 #include <vm/vm.h> 49 #include <vm/pmap.h> 50 51 #include <machine/bus.h> 52 #include <machine/resource.h> 53 54 #include <dev/pci/pcivar.h> 55 #include <dev/pci/pci_private.h> 56 #include <dev/pci/pci_iov.h> 57 #include <dev/backlight/backlight.h> 58 59 #include <linux/kernel.h> 60 #include <linux/kobject.h> 61 #include <linux/device.h> 62 #include <linux/slab.h> 63 #include <linux/module.h> 64 #include <linux/cdev.h> 65 #include <linux/file.h> 66 #include <linux/sysfs.h> 67 #include <linux/mm.h> 68 #include <linux/io.h> 69 #include <linux/vmalloc.h> 70 #define WANT_NATIVE_PCI_GET_SLOT 71 #include <linux/pci.h> 72 #include <linux/compat.h> 73 74 #include <linux/backlight.h> 75 76 #include "backlight_if.h" 77 #include "pcib_if.h" 78 79 /* Undef the linux function macro defined in linux/pci.h */ 80 #undef pci_get_class 81 82 extern int linuxkpi_debug; 83 84 SYSCTL_DECL(_compat_linuxkpi); 85 86 static counter_u64_t lkpi_pci_nseg1_fail; 87 SYSCTL_COUNTER_U64(_compat_linuxkpi, OID_AUTO, lkpi_pci_nseg1_fail, CTLFLAG_RD, 88 &lkpi_pci_nseg1_fail, "Count of busdma mapping failures of single-segment"); 89 90 static device_probe_t linux_pci_probe; 91 static device_attach_t linux_pci_attach; 92 static device_detach_t linux_pci_detach; 93 static device_suspend_t linux_pci_suspend; 94 static device_resume_t linux_pci_resume; 95 static device_shutdown_t linux_pci_shutdown; 96 static pci_iov_init_t linux_pci_iov_init; 97 static pci_iov_uninit_t linux_pci_iov_uninit; 98 static pci_iov_add_vf_t linux_pci_iov_add_vf; 99 static int linux_backlight_get_status(device_t dev, struct backlight_props *props); 100 static int linux_backlight_update_status(device_t dev, struct backlight_props *props); 101 static int linux_backlight_get_info(device_t dev, struct backlight_info *info); 102 static void lkpi_pcim_iomap_table_release(struct device *, void *); 103 104 static device_method_t pci_methods[] = { 105 DEVMETHOD(device_probe, linux_pci_probe), 106 DEVMETHOD(device_attach, linux_pci_attach), 107 DEVMETHOD(device_detach, linux_pci_detach), 108 DEVMETHOD(device_suspend, linux_pci_suspend), 109 DEVMETHOD(device_resume, linux_pci_resume), 110 DEVMETHOD(device_shutdown, linux_pci_shutdown), 111 DEVMETHOD(pci_iov_init, linux_pci_iov_init), 112 DEVMETHOD(pci_iov_uninit, linux_pci_iov_uninit), 113 DEVMETHOD(pci_iov_add_vf, linux_pci_iov_add_vf), 114 115 /* Bus interface. */ 116 DEVMETHOD(bus_add_child, bus_generic_add_child), 117 118 /* backlight interface */ 119 DEVMETHOD(backlight_update_status, linux_backlight_update_status), 120 DEVMETHOD(backlight_get_status, linux_backlight_get_status), 121 DEVMETHOD(backlight_get_info, linux_backlight_get_info), 122 DEVMETHOD_END 123 }; 124 125 const char *pci_power_names[] = { 126 "UNKNOWN", "D0", "D1", "D2", "D3hot", "D3cold" 127 }; 128 129 /* We need some meta-struct to keep track of these for devres. */ 130 struct pci_devres { 131 bool enable_io; 132 /* PCIR_MAX_BAR_0 + 1 = 6 => BIT(0..5). */ 133 uint8_t region_mask; 134 struct resource *region_table[PCIR_MAX_BAR_0 + 1]; /* Not needed. */ 135 }; 136 struct pcim_iomap_devres { 137 void *mmio_table[PCIR_MAX_BAR_0 + 1]; 138 struct resource *res_table[PCIR_MAX_BAR_0 + 1]; 139 }; 140 141 struct linux_dma_priv { 142 uint64_t dma_mask; 143 bus_dma_tag_t dmat; 144 uint64_t dma_coherent_mask; 145 bus_dma_tag_t dmat_coherent; 146 struct mtx lock; 147 struct pctrie ptree; 148 }; 149 #define DMA_PRIV_LOCK(priv) mtx_lock(&(priv)->lock) 150 #define DMA_PRIV_UNLOCK(priv) mtx_unlock(&(priv)->lock) 151 152 static void 153 lkpi_set_pcim_iomap_devres(struct pcim_iomap_devres *dr, int bar, 154 void *res) 155 { 156 dr->mmio_table[bar] = (void *)rman_get_bushandle(res); 157 dr->res_table[bar] = res; 158 } 159 160 static bool 161 lkpi_pci_bar_id_valid(int bar) 162 { 163 if (bar < 0 || bar > PCIR_MAX_BAR_0) 164 return (false); 165 166 return (true); 167 } 168 169 static int 170 linux_pdev_dma_uninit(struct pci_dev *pdev) 171 { 172 struct linux_dma_priv *priv; 173 174 priv = pdev->dev.dma_priv; 175 if (priv->dmat) 176 bus_dma_tag_destroy(priv->dmat); 177 if (priv->dmat_coherent) 178 bus_dma_tag_destroy(priv->dmat_coherent); 179 mtx_destroy(&priv->lock); 180 pdev->dev.dma_priv = NULL; 181 free(priv, M_DEVBUF); 182 return (0); 183 } 184 185 static int 186 linux_pdev_dma_init(struct pci_dev *pdev) 187 { 188 struct linux_dma_priv *priv; 189 int error; 190 191 priv = malloc(sizeof(*priv), M_DEVBUF, M_WAITOK | M_ZERO); 192 193 mtx_init(&priv->lock, "lkpi-priv-dma", NULL, MTX_DEF); 194 pctrie_init(&priv->ptree); 195 196 pdev->dev.dma_priv = priv; 197 198 /* Create a default DMA tags. */ 199 error = linux_dma_tag_init(&pdev->dev, DMA_BIT_MASK(64)); 200 if (error != 0) 201 goto err; 202 /* Coherent is lower 32bit only by default in Linux. */ 203 error = linux_dma_tag_init_coherent(&pdev->dev, DMA_BIT_MASK(32)); 204 if (error != 0) 205 goto err; 206 207 return (error); 208 209 err: 210 linux_pdev_dma_uninit(pdev); 211 return (error); 212 } 213 214 int 215 linux_dma_tag_init(struct device *dev, u64 dma_mask) 216 { 217 struct linux_dma_priv *priv; 218 int error; 219 220 priv = dev->dma_priv; 221 222 if (priv->dmat) { 223 if (priv->dma_mask == dma_mask) 224 return (0); 225 226 bus_dma_tag_destroy(priv->dmat); 227 } 228 229 priv->dma_mask = dma_mask; 230 231 error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 232 1, 0, /* alignment, boundary */ 233 dma_mask, /* lowaddr */ 234 BUS_SPACE_MAXADDR, /* highaddr */ 235 NULL, NULL, /* filtfunc, filtfuncarg */ 236 BUS_SPACE_MAXSIZE, /* maxsize */ 237 1, /* nsegments */ 238 BUS_SPACE_MAXSIZE, /* maxsegsz */ 239 0, /* flags */ 240 NULL, NULL, /* lockfunc, lockfuncarg */ 241 &priv->dmat); 242 return (-error); 243 } 244 245 int 246 linux_dma_tag_init_coherent(struct device *dev, u64 dma_mask) 247 { 248 struct linux_dma_priv *priv; 249 int error; 250 251 priv = dev->dma_priv; 252 253 if (priv->dmat_coherent) { 254 if (priv->dma_coherent_mask == dma_mask) 255 return (0); 256 257 bus_dma_tag_destroy(priv->dmat_coherent); 258 } 259 260 priv->dma_coherent_mask = dma_mask; 261 262 error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 263 1, 0, /* alignment, boundary */ 264 dma_mask, /* lowaddr */ 265 BUS_SPACE_MAXADDR, /* highaddr */ 266 NULL, NULL, /* filtfunc, filtfuncarg */ 267 BUS_SPACE_MAXSIZE, /* maxsize */ 268 1, /* nsegments */ 269 BUS_SPACE_MAXSIZE, /* maxsegsz */ 270 0, /* flags */ 271 NULL, NULL, /* lockfunc, lockfuncarg */ 272 &priv->dmat_coherent); 273 return (-error); 274 } 275 276 static struct pci_driver * 277 linux_pci_find(device_t dev, const struct pci_device_id **idp) 278 { 279 const struct pci_device_id *id; 280 struct pci_driver *pdrv; 281 uint16_t vendor; 282 uint16_t device; 283 uint16_t subvendor; 284 uint16_t subdevice; 285 286 vendor = pci_get_vendor(dev); 287 device = pci_get_device(dev); 288 subvendor = pci_get_subvendor(dev); 289 subdevice = pci_get_subdevice(dev); 290 291 spin_lock(&pci_lock); 292 list_for_each_entry(pdrv, &pci_drivers, node) { 293 for (id = pdrv->id_table; id->vendor != 0; id++) { 294 if (vendor == id->vendor && 295 (PCI_ANY_ID == id->device || device == id->device) && 296 (PCI_ANY_ID == id->subvendor || subvendor == id->subvendor) && 297 (PCI_ANY_ID == id->subdevice || subdevice == id->subdevice)) { 298 *idp = id; 299 spin_unlock(&pci_lock); 300 return (pdrv); 301 } 302 } 303 } 304 spin_unlock(&pci_lock); 305 return (NULL); 306 } 307 308 struct pci_dev * 309 lkpi_pci_get_device(uint32_t vendor, uint32_t device, struct pci_dev *odev) 310 { 311 struct pci_dev *pdev, *found; 312 313 found = NULL; 314 spin_lock(&pci_lock); 315 list_for_each_entry(pdev, &pci_devices, links) { 316 /* Walk until we find odev. */ 317 if (odev != NULL) { 318 if (pdev == odev) 319 odev = NULL; 320 continue; 321 } 322 323 if ((pdev->vendor == vendor || vendor == PCI_ANY_ID) && 324 (pdev->device == device || device == PCI_ANY_ID)) { 325 found = pdev; 326 break; 327 } 328 } 329 pci_dev_get(found); 330 spin_unlock(&pci_lock); 331 332 return (found); 333 } 334 335 static void 336 lkpi_pci_dev_release(struct device *dev) 337 { 338 339 lkpi_devres_release_free_list(dev); 340 spin_lock_destroy(&dev->devres_lock); 341 } 342 343 static int 344 lkpifill_pci_dev(device_t dev, struct pci_dev *pdev) 345 { 346 struct pci_devinfo *dinfo; 347 int error; 348 349 error = kobject_init_and_add(&pdev->dev.kobj, &linux_dev_ktype, 350 &linux_root_device.kobj, device_get_nameunit(dev)); 351 if (error != 0) { 352 printf("%s:%d: kobject_init_and_add returned %d\n", 353 __func__, __LINE__, error); 354 return (error); 355 } 356 357 pdev->devfn = PCI_DEVFN(pci_get_slot(dev), pci_get_function(dev)); 358 pdev->vendor = pci_get_vendor(dev); 359 pdev->device = pci_get_device(dev); 360 pdev->subsystem_vendor = pci_get_subvendor(dev); 361 pdev->subsystem_device = pci_get_subdevice(dev); 362 pdev->class = pci_get_class(dev); 363 pdev->revision = pci_get_revid(dev); 364 pdev->path_name = kasprintf(GFP_KERNEL, "%04d:%02d:%02d.%d", 365 pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev), 366 pci_get_function(dev)); 367 368 pdev->bus = malloc(sizeof(*pdev->bus), M_DEVBUF, M_WAITOK | M_ZERO); 369 pdev->bus->number = pci_get_bus(dev); 370 pdev->bus->domain = pci_get_domain(dev); 371 372 /* Check if we have reached the root to satisfy pci_is_root_bus() */ 373 dinfo = device_get_ivars(dev); 374 if (dinfo->cfg.pcie.pcie_location != 0 && 375 dinfo->cfg.pcie.pcie_type == PCIEM_TYPE_ROOT_PORT) { 376 pdev->bus->self = NULL; 377 } else { 378 /* 379 * This should be the upstream bridge; pci_upstream_bridge() 380 * handles that case on demand as otherwise we'll shadow the 381 * entire PCI hierarchy. 382 */ 383 pdev->bus->self = pdev; 384 } 385 pdev->dev.bsddev = dev; 386 pdev->dev.parent = &linux_root_device; 387 pdev->dev.release = lkpi_pci_dev_release; 388 INIT_LIST_HEAD(&pdev->dev.irqents); 389 390 if (pci_msi_count(dev) > 0) 391 pdev->msi_desc = malloc(pci_msi_count(dev) * 392 sizeof(*pdev->msi_desc), M_DEVBUF, M_WAITOK | M_ZERO); 393 394 spin_lock_init(&pdev->dev.devres_lock); 395 INIT_LIST_HEAD(&pdev->dev.devres_head); 396 397 return (0); 398 } 399 400 static void 401 lkpinew_pci_dev_release(struct device *dev) 402 { 403 struct pci_dev *pdev; 404 int i; 405 406 pdev = to_pci_dev(dev); 407 if (pdev->root != NULL) 408 pci_dev_put(pdev->root); 409 if (pdev->bus->self != pdev && pdev->bus->self != NULL) 410 pci_dev_put(pdev->bus->self); 411 free(pdev->bus, M_DEVBUF); 412 if (pdev->msi_desc != NULL) { 413 for (i = pci_msi_count(pdev->dev.bsddev) - 1; i >= 0; i--) 414 free(pdev->msi_desc[i], M_DEVBUF); 415 free(pdev->msi_desc, M_DEVBUF); 416 } 417 kfree(pdev->path_name); 418 free(pdev, M_DEVBUF); 419 } 420 421 struct pci_dev * 422 lkpinew_pci_dev(device_t dev) 423 { 424 struct pci_dev *pdev; 425 int error; 426 427 pdev = malloc(sizeof(*pdev), M_DEVBUF, M_WAITOK|M_ZERO); 428 error = lkpifill_pci_dev(dev, pdev); 429 if (error != 0) { 430 free(pdev, M_DEVBUF); 431 return (NULL); 432 } 433 pdev->dev.release = lkpinew_pci_dev_release; 434 435 return (pdev); 436 } 437 438 struct pci_dev * 439 lkpi_pci_get_class(unsigned int class, struct pci_dev *from) 440 { 441 device_t dev; 442 device_t devfrom = NULL; 443 struct pci_dev *pdev; 444 445 if (from != NULL) 446 devfrom = from->dev.bsddev; 447 448 dev = pci_find_class_from(class >> 16, (class >> 8) & 0xFF, devfrom); 449 if (dev == NULL) 450 return (NULL); 451 452 pdev = lkpinew_pci_dev(dev); 453 return (pdev); 454 } 455 456 struct pci_dev * 457 lkpi_pci_get_base_class(unsigned int baseclass, struct pci_dev *from) 458 { 459 device_t dev; 460 device_t devfrom = NULL; 461 struct pci_dev *pdev; 462 463 if (from != NULL) 464 devfrom = from->dev.bsddev; 465 466 dev = pci_find_base_class_from(baseclass, devfrom); 467 if (dev == NULL) 468 return (NULL); 469 470 pdev = lkpinew_pci_dev(dev); 471 return (pdev); 472 } 473 474 struct pci_dev * 475 lkpi_pci_get_domain_bus_and_slot(int domain, unsigned int bus, 476 unsigned int devfn) 477 { 478 device_t dev; 479 struct pci_dev *pdev; 480 481 dev = pci_find_dbsf(domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); 482 if (dev == NULL) 483 return (NULL); 484 485 pdev = lkpinew_pci_dev(dev); 486 return (pdev); 487 } 488 489 struct pci_dev * 490 lkpi_pci_get_slot(struct pci_bus *pbus, unsigned int devfn) 491 { 492 device_t dev; 493 struct pci_dev *pdev; 494 495 dev = pci_find_bsf(pbus->number, PCI_SLOT(devfn), PCI_FUNC(devfn)); 496 if (dev == NULL) 497 return (NULL); 498 499 pdev = lkpinew_pci_dev(dev); 500 return (pdev); 501 } 502 503 static int 504 linux_pci_probe(device_t dev) 505 { 506 const struct pci_device_id *id; 507 struct pci_driver *pdrv; 508 509 if ((pdrv = linux_pci_find(dev, &id)) == NULL) 510 return (ENXIO); 511 if (device_get_driver(dev) != &pdrv->bsddriver) 512 return (ENXIO); 513 device_set_desc(dev, pdrv->name); 514 515 /* Assume BSS initialized (should never return BUS_PROBE_SPECIFIC). */ 516 if (pdrv->bsd_probe_return == 0) 517 return (BUS_PROBE_DEFAULT); 518 else 519 return (pdrv->bsd_probe_return); 520 } 521 522 static int 523 linux_pci_attach(device_t dev) 524 { 525 const struct pci_device_id *id; 526 struct pci_driver *pdrv; 527 struct pci_dev *pdev; 528 529 pdrv = linux_pci_find(dev, &id); 530 pdev = device_get_softc(dev); 531 532 MPASS(pdrv != NULL); 533 MPASS(pdev != NULL); 534 535 return (linux_pci_attach_device(dev, pdrv, id, pdev)); 536 } 537 538 static struct resource_list_entry * 539 linux_pci_reserve_bar(struct pci_dev *pdev, struct resource_list *rl, 540 int type, int rid) 541 { 542 device_t dev; 543 struct resource *res; 544 545 KASSERT(type == SYS_RES_IOPORT || type == SYS_RES_MEMORY, 546 ("trying to reserve non-BAR type %d", type)); 547 548 dev = pdev->pdrv != NULL && pdev->pdrv->isdrm ? 549 device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev; 550 res = pci_reserve_map(device_get_parent(dev), dev, type, &rid, 0, ~0, 551 1, 1, 0); 552 if (res == NULL) 553 return (NULL); 554 return (resource_list_find(rl, type, rid)); 555 } 556 557 static struct resource_list_entry * 558 linux_pci_get_rle(struct pci_dev *pdev, int type, int rid, bool reserve_bar) 559 { 560 struct pci_devinfo *dinfo; 561 struct resource_list *rl; 562 struct resource_list_entry *rle; 563 564 dinfo = device_get_ivars(pdev->dev.bsddev); 565 rl = &dinfo->resources; 566 rle = resource_list_find(rl, type, rid); 567 /* Reserve resources for this BAR if needed. */ 568 if (rle == NULL && reserve_bar) 569 rle = linux_pci_reserve_bar(pdev, rl, type, rid); 570 return (rle); 571 } 572 573 int 574 linux_pci_attach_device(device_t dev, struct pci_driver *pdrv, 575 const struct pci_device_id *id, struct pci_dev *pdev) 576 { 577 struct resource_list_entry *rle; 578 device_t parent; 579 struct pci_dev *pbus, *ppbus; 580 uintptr_t rid; 581 int error; 582 bool isdrm; 583 584 linux_set_current(curthread); 585 586 parent = device_get_parent(dev); 587 isdrm = pdrv != NULL && pdrv->isdrm; 588 589 if (isdrm) { 590 struct pci_devinfo *dinfo; 591 592 dinfo = device_get_ivars(parent); 593 device_set_ivars(dev, dinfo); 594 } 595 596 error = lkpifill_pci_dev(dev, pdev); 597 if (error != 0) 598 return (error); 599 600 if (isdrm) 601 PCI_GET_ID(device_get_parent(parent), parent, PCI_ID_RID, &rid); 602 else 603 PCI_GET_ID(parent, dev, PCI_ID_RID, &rid); 604 pdev->devfn = rid; 605 pdev->pdrv = pdrv; 606 rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 0, false); 607 if (rle != NULL) 608 pdev->dev.irq = rle->start; 609 else 610 pdev->dev.irq = LINUX_IRQ_INVALID; 611 pdev->irq = pdev->dev.irq; 612 error = linux_pdev_dma_init(pdev); 613 if (error) 614 goto out_dma_init; 615 616 TAILQ_INIT(&pdev->mmio); 617 spin_lock_init(&pdev->pcie_cap_lock); 618 619 spin_lock(&pci_lock); 620 list_add(&pdev->links, &pci_devices); 621 spin_unlock(&pci_lock); 622 623 /* 624 * Create the hierarchy now as we cannot on demand later. 625 * Take special care of DRM as there is a non-PCI device in the chain. 626 */ 627 pbus = pdev; 628 if (isdrm) { 629 pbus = lkpinew_pci_dev(parent); 630 if (pbus == NULL) { 631 error = ENXIO; 632 goto out_dma_init; 633 } 634 } 635 pcie_find_root_port(pbus); 636 if (isdrm) 637 pdev->root = pbus->root; 638 ppbus = pci_upstream_bridge(pbus); 639 while (ppbus != NULL && ppbus != pbus) { 640 pbus = ppbus; 641 ppbus = pci_upstream_bridge(pbus); 642 } 643 644 if (pdrv != NULL) { 645 error = pdrv->probe(pdev, id); 646 if (error) 647 goto out_probe; 648 } 649 return (0); 650 651 /* XXX the cleanup does not match the allocation up there. */ 652 out_probe: 653 free(pdev->bus, M_DEVBUF); 654 spin_lock_destroy(&pdev->pcie_cap_lock); 655 linux_pdev_dma_uninit(pdev); 656 out_dma_init: 657 spin_lock(&pci_lock); 658 list_del(&pdev->links); 659 spin_unlock(&pci_lock); 660 put_device(&pdev->dev); 661 return (-error); 662 } 663 664 static int 665 linux_pci_detach(device_t dev) 666 { 667 struct pci_dev *pdev; 668 669 pdev = device_get_softc(dev); 670 671 MPASS(pdev != NULL); 672 673 device_set_desc(dev, NULL); 674 675 return (linux_pci_detach_device(pdev)); 676 } 677 678 int 679 linux_pci_detach_device(struct pci_dev *pdev) 680 { 681 682 linux_set_current(curthread); 683 684 if (pdev->pdrv != NULL) 685 pdev->pdrv->remove(pdev); 686 687 if (pdev->root != NULL) 688 pci_dev_put(pdev->root); 689 free(pdev->bus, M_DEVBUF); 690 linux_pdev_dma_uninit(pdev); 691 692 spin_lock(&pci_lock); 693 list_del(&pdev->links); 694 spin_unlock(&pci_lock); 695 spin_lock_destroy(&pdev->pcie_cap_lock); 696 put_device(&pdev->dev); 697 698 return (0); 699 } 700 701 static int 702 lkpi_pci_disable_dev(struct device *dev) 703 { 704 705 (void) pci_disable_io(dev->bsddev, SYS_RES_MEMORY); 706 (void) pci_disable_io(dev->bsddev, SYS_RES_IOPORT); 707 return (0); 708 } 709 710 static struct pci_devres * 711 lkpi_pci_devres_get_alloc(struct pci_dev *pdev) 712 { 713 struct pci_devres *dr; 714 715 dr = lkpi_devres_find(&pdev->dev, lkpi_pci_devres_release, NULL, NULL); 716 if (dr == NULL) { 717 dr = lkpi_devres_alloc(lkpi_pci_devres_release, sizeof(*dr), 718 GFP_KERNEL | __GFP_ZERO); 719 if (dr != NULL) 720 lkpi_devres_add(&pdev->dev, dr); 721 } 722 723 return (dr); 724 } 725 726 static struct pci_devres * 727 lkpi_pci_devres_find(struct pci_dev *pdev) 728 { 729 if (!pdev->managed) 730 return (NULL); 731 732 return (lkpi_pci_devres_get_alloc(pdev)); 733 } 734 735 void 736 lkpi_pci_devres_release(struct device *dev, void *p) 737 { 738 struct pci_devres *dr; 739 struct pci_dev *pdev; 740 int bar; 741 742 pdev = to_pci_dev(dev); 743 dr = p; 744 745 if (pdev->msix_enabled) 746 lkpi_pci_disable_msix(pdev); 747 if (pdev->msi_enabled) 748 lkpi_pci_disable_msi(pdev); 749 750 if (dr->enable_io && lkpi_pci_disable_dev(dev) == 0) 751 dr->enable_io = false; 752 753 if (dr->region_mask == 0) 754 return; 755 for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) { 756 757 if ((dr->region_mask & (1 << bar)) == 0) 758 continue; 759 pci_release_region(pdev, bar); 760 } 761 } 762 763 int 764 linuxkpi_pcim_enable_device(struct pci_dev *pdev) 765 { 766 struct pci_devres *dr; 767 int error; 768 769 /* Here we cannot run through the pdev->managed check. */ 770 dr = lkpi_pci_devres_get_alloc(pdev); 771 if (dr == NULL) 772 return (-ENOMEM); 773 774 /* If resources were enabled before do not do it again. */ 775 if (dr->enable_io) 776 return (0); 777 778 error = pci_enable_device(pdev); 779 if (error == 0) 780 dr->enable_io = true; 781 782 /* This device is not managed. */ 783 pdev->managed = true; 784 785 return (error); 786 } 787 788 static struct pcim_iomap_devres * 789 lkpi_pcim_iomap_devres_find(struct pci_dev *pdev) 790 { 791 struct pcim_iomap_devres *dr; 792 793 dr = lkpi_devres_find(&pdev->dev, lkpi_pcim_iomap_table_release, 794 NULL, NULL); 795 if (dr == NULL) { 796 dr = lkpi_devres_alloc(lkpi_pcim_iomap_table_release, 797 sizeof(*dr), GFP_KERNEL | __GFP_ZERO); 798 if (dr != NULL) 799 lkpi_devres_add(&pdev->dev, dr); 800 } 801 802 if (dr == NULL) 803 device_printf(pdev->dev.bsddev, "%s: NULL\n", __func__); 804 805 return (dr); 806 } 807 808 void __iomem ** 809 linuxkpi_pcim_iomap_table(struct pci_dev *pdev) 810 { 811 struct pcim_iomap_devres *dr; 812 813 dr = lkpi_pcim_iomap_devres_find(pdev); 814 if (dr == NULL) 815 return (NULL); 816 817 /* 818 * If the driver has manually set a flag to be able to request the 819 * resource to use bus_read/write_<n>, return the shadow table. 820 */ 821 if (pdev->want_iomap_res) 822 return ((void **)dr->res_table); 823 824 /* This is the Linux default. */ 825 return (dr->mmio_table); 826 } 827 828 static struct resource * 829 _lkpi_pci_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen __unused) 830 { 831 struct pci_mmio_region *mmio, *p; 832 int type; 833 834 if (!lkpi_pci_bar_id_valid(bar)) 835 return (NULL); 836 837 type = pci_resource_type(pdev, bar); 838 if (type < 0) { 839 device_printf(pdev->dev.bsddev, "%s: bar %d type %d\n", 840 __func__, bar, type); 841 return (NULL); 842 } 843 844 /* 845 * Check for duplicate mappings. 846 * This can happen if a driver calls pci_request_region() first. 847 */ 848 TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) { 849 if (mmio->type == type && mmio->rid == PCIR_BAR(bar)) { 850 return (mmio->res); 851 } 852 } 853 854 mmio = malloc(sizeof(*mmio), M_DEVBUF, M_WAITOK | M_ZERO); 855 mmio->rid = PCIR_BAR(bar); 856 mmio->type = type; 857 mmio->res = bus_alloc_resource_any(pdev->dev.bsddev, mmio->type, 858 &mmio->rid, RF_ACTIVE|RF_SHAREABLE); 859 if (mmio->res == NULL) { 860 device_printf(pdev->dev.bsddev, "%s: failed to alloc " 861 "bar %d type %d rid %d\n", 862 __func__, bar, type, PCIR_BAR(bar)); 863 free(mmio, M_DEVBUF); 864 return (NULL); 865 } 866 TAILQ_INSERT_TAIL(&pdev->mmio, mmio, next); 867 868 return (mmio->res); 869 } 870 871 void * 872 linuxkpi_pci_iomap_range(struct pci_dev *pdev, int bar, 873 unsigned long off, unsigned long maxlen) 874 { 875 struct resource *res; 876 877 if (!lkpi_pci_bar_id_valid(bar)) 878 return (NULL); 879 880 res = _lkpi_pci_iomap(pdev, bar, maxlen); 881 if (res == NULL) 882 return (NULL); 883 /* This is a FreeBSD extension so we can use bus_*(). */ 884 if (pdev->want_iomap_res) 885 return (res); 886 MPASS(off < rman_get_size(res)); 887 return ((void *)(rman_get_bushandle(res) + off)); 888 } 889 890 void * 891 linuxkpi_pci_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen) 892 { 893 if (!lkpi_pci_bar_id_valid(bar)) 894 return (NULL); 895 896 return (linuxkpi_pci_iomap_range(pdev, bar, 0, maxlen)); 897 } 898 899 void * 900 linuxkpi_pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen) 901 { 902 struct pcim_iomap_devres *dr; 903 void *res; 904 905 if (!lkpi_pci_bar_id_valid(bar)) 906 return (NULL); 907 908 dr = lkpi_pcim_iomap_devres_find(pdev); 909 if (dr == NULL) 910 return (NULL); 911 912 if (dr->res_table[bar] != NULL) 913 return (dr->res_table[bar]); 914 915 res = linuxkpi_pci_iomap(pdev, bar, maxlen); 916 if (res == NULL) { 917 /* 918 * Do not free the devres in case there were 919 * other valid mappings before already. 920 */ 921 return (NULL); 922 } 923 lkpi_set_pcim_iomap_devres(dr, bar, res); 924 925 return (res); 926 } 927 928 void 929 linuxkpi_pci_iounmap(struct pci_dev *pdev, void *res) 930 { 931 struct pci_mmio_region *mmio, *p; 932 bus_space_handle_t bh = (bus_space_handle_t)res; 933 934 TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) { 935 if (pdev->want_iomap_res) { 936 if (res != mmio->res) 937 continue; 938 } else { 939 if (bh < rman_get_bushandle(mmio->res) || 940 bh >= rman_get_bushandle(mmio->res) + 941 rman_get_size(mmio->res)) 942 continue; 943 } 944 bus_release_resource(pdev->dev.bsddev, 945 mmio->type, mmio->rid, mmio->res); 946 TAILQ_REMOVE(&pdev->mmio, mmio, next); 947 free(mmio, M_DEVBUF); 948 return; 949 } 950 } 951 952 int 953 linuxkpi_pcim_iomap_regions(struct pci_dev *pdev, uint32_t mask, const char *name) 954 { 955 struct pcim_iomap_devres *dr; 956 void *res; 957 uint32_t mappings; 958 int bar; 959 960 dr = lkpi_pcim_iomap_devres_find(pdev); 961 if (dr == NULL) 962 return (-ENOMEM); 963 964 /* Now iomap all the requested (by "mask") ones. */ 965 for (bar = mappings = 0; mappings != mask; bar++) { 966 if ((mask & (1 << bar)) == 0) 967 continue; 968 969 /* Request double is not allowed. */ 970 if (dr->mmio_table[bar] != NULL) { 971 device_printf(pdev->dev.bsddev, "%s: bar %d %p\n", 972 __func__, bar, dr->mmio_table[bar]); 973 goto err; 974 } 975 976 res = _lkpi_pci_iomap(pdev, bar, 0); 977 if (res == NULL) 978 goto err; 979 lkpi_set_pcim_iomap_devres(dr, bar, res); 980 981 mappings |= (1 << bar); 982 } 983 984 return (0); 985 err: 986 for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) { 987 if ((mappings & (1 << bar)) != 0) { 988 res = dr->mmio_table[bar]; 989 if (res == NULL) 990 continue; 991 pci_iounmap(pdev, res); 992 } 993 } 994 995 return (-EINVAL); 996 } 997 998 static void 999 lkpi_pcim_iomap_table_release(struct device *dev, void *p) 1000 { 1001 struct pcim_iomap_devres *dr; 1002 struct pci_dev *pdev; 1003 int bar; 1004 1005 dr = p; 1006 pdev = to_pci_dev(dev); 1007 for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) { 1008 1009 if (dr->mmio_table[bar] == NULL) 1010 continue; 1011 1012 pci_iounmap(pdev, dr->mmio_table[bar]); 1013 } 1014 } 1015 1016 static int 1017 linux_pci_suspend(device_t dev) 1018 { 1019 const struct dev_pm_ops *pmops; 1020 struct pm_message pm = { }; 1021 struct pci_dev *pdev; 1022 int error; 1023 1024 error = 0; 1025 linux_set_current(curthread); 1026 pdev = device_get_softc(dev); 1027 pmops = pdev->pdrv->driver.pm; 1028 1029 if (pdev->pdrv->suspend != NULL) 1030 error = -pdev->pdrv->suspend(pdev, pm); 1031 else if (pmops != NULL && pmops->suspend != NULL) { 1032 error = -pmops->suspend(&pdev->dev); 1033 if (error == 0 && pmops->suspend_late != NULL) 1034 error = -pmops->suspend_late(&pdev->dev); 1035 if (error == 0 && pmops->suspend_noirq != NULL) 1036 error = -pmops->suspend_noirq(&pdev->dev); 1037 } 1038 return (error); 1039 } 1040 1041 static int 1042 linux_pci_resume(device_t dev) 1043 { 1044 const struct dev_pm_ops *pmops; 1045 struct pci_dev *pdev; 1046 int error; 1047 1048 error = 0; 1049 linux_set_current(curthread); 1050 pdev = device_get_softc(dev); 1051 pmops = pdev->pdrv->driver.pm; 1052 1053 if (pdev->pdrv->resume != NULL) 1054 error = -pdev->pdrv->resume(pdev); 1055 else if (pmops != NULL && pmops->resume != NULL) { 1056 if (pmops->resume_early != NULL) 1057 error = -pmops->resume_early(&pdev->dev); 1058 if (error == 0 && pmops->resume != NULL) 1059 error = -pmops->resume(&pdev->dev); 1060 } 1061 return (error); 1062 } 1063 1064 static int 1065 linux_pci_shutdown(device_t dev) 1066 { 1067 struct pci_dev *pdev; 1068 1069 linux_set_current(curthread); 1070 pdev = device_get_softc(dev); 1071 if (pdev->pdrv->shutdown != NULL) 1072 pdev->pdrv->shutdown(pdev); 1073 return (0); 1074 } 1075 1076 static int 1077 linux_pci_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *pf_config) 1078 { 1079 struct pci_dev *pdev; 1080 int error; 1081 1082 linux_set_current(curthread); 1083 pdev = device_get_softc(dev); 1084 if (pdev->pdrv->bsd_iov_init != NULL) 1085 error = pdev->pdrv->bsd_iov_init(dev, num_vfs, pf_config); 1086 else 1087 error = EINVAL; 1088 return (error); 1089 } 1090 1091 static void 1092 linux_pci_iov_uninit(device_t dev) 1093 { 1094 struct pci_dev *pdev; 1095 1096 linux_set_current(curthread); 1097 pdev = device_get_softc(dev); 1098 if (pdev->pdrv->bsd_iov_uninit != NULL) 1099 pdev->pdrv->bsd_iov_uninit(dev); 1100 } 1101 1102 static int 1103 linux_pci_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *vf_config) 1104 { 1105 struct pci_dev *pdev; 1106 int error; 1107 1108 linux_set_current(curthread); 1109 pdev = device_get_softc(dev); 1110 if (pdev->pdrv->bsd_iov_add_vf != NULL) 1111 error = pdev->pdrv->bsd_iov_add_vf(dev, vfnum, vf_config); 1112 else 1113 error = EINVAL; 1114 return (error); 1115 } 1116 1117 static int 1118 _linux_pci_register_driver(struct pci_driver *pdrv, devclass_t dc) 1119 { 1120 int error; 1121 1122 linux_set_current(curthread); 1123 spin_lock(&pci_lock); 1124 list_add(&pdrv->node, &pci_drivers); 1125 spin_unlock(&pci_lock); 1126 if (pdrv->bsddriver.name == NULL) 1127 pdrv->bsddriver.name = pdrv->name; 1128 pdrv->bsddriver.methods = pci_methods; 1129 pdrv->bsddriver.size = sizeof(struct pci_dev); 1130 1131 bus_topo_lock(); 1132 error = devclass_add_driver(dc, &pdrv->bsddriver, 1133 BUS_PASS_DEFAULT, &pdrv->bsdclass); 1134 bus_topo_unlock(); 1135 return (-error); 1136 } 1137 1138 int 1139 linux_pci_register_driver(struct pci_driver *pdrv) 1140 { 1141 devclass_t dc; 1142 1143 pdrv->isdrm = strcmp(pdrv->name, "drmn") == 0; 1144 dc = pdrv->isdrm ? devclass_create("vgapci") : devclass_find("pci"); 1145 if (dc == NULL) 1146 return (-ENXIO); 1147 return (_linux_pci_register_driver(pdrv, dc)); 1148 } 1149 1150 static struct resource_list_entry * 1151 lkpi_pci_get_bar(struct pci_dev *pdev, int bar, bool reserve) 1152 { 1153 int type; 1154 1155 type = pci_resource_type(pdev, bar); 1156 if (type < 0) 1157 return (NULL); 1158 bar = PCIR_BAR(bar); 1159 return (linux_pci_get_rle(pdev, type, bar, reserve)); 1160 } 1161 1162 struct device * 1163 lkpi_pci_find_irq_dev(unsigned int irq) 1164 { 1165 struct pci_dev *pdev; 1166 struct device *found; 1167 1168 found = NULL; 1169 spin_lock(&pci_lock); 1170 list_for_each_entry(pdev, &pci_devices, links) { 1171 if (irq == pdev->dev.irq || 1172 (irq >= pdev->dev.irq_start && irq < pdev->dev.irq_end)) { 1173 found = &pdev->dev; 1174 break; 1175 } 1176 } 1177 spin_unlock(&pci_lock); 1178 return (found); 1179 } 1180 1181 unsigned long 1182 pci_resource_start(struct pci_dev *pdev, int bar) 1183 { 1184 struct resource_list_entry *rle; 1185 rman_res_t newstart; 1186 device_t dev; 1187 int error; 1188 1189 if ((rle = lkpi_pci_get_bar(pdev, bar, true)) == NULL) 1190 return (0); 1191 dev = pdev->pdrv != NULL && pdev->pdrv->isdrm ? 1192 device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev; 1193 error = bus_translate_resource(dev, rle->type, rle->start, &newstart); 1194 if (error != 0) { 1195 device_printf(pdev->dev.bsddev, 1196 "translate of %#jx failed: %d\n", 1197 (uintmax_t)rle->start, error); 1198 return (0); 1199 } 1200 return (newstart); 1201 } 1202 1203 unsigned long 1204 pci_resource_len(struct pci_dev *pdev, int bar) 1205 { 1206 struct resource_list_entry *rle; 1207 1208 if ((rle = lkpi_pci_get_bar(pdev, bar, true)) == NULL) 1209 return (0); 1210 return (rle->count); 1211 } 1212 1213 static int 1214 lkpi_pci_request_region(struct pci_dev *pdev, int bar, const char *res_name, 1215 bool managed) 1216 { 1217 struct resource *res; 1218 struct pci_devres *dr; 1219 struct pci_mmio_region *mmio; 1220 int rid; 1221 int type; 1222 1223 if (!lkpi_pci_bar_id_valid(bar)) 1224 return (-EINVAL); 1225 1226 /* 1227 * If the bar is not valid, return success without adding the BAR; 1228 * otherwise linuxkpi_pcim_request_all_regions() will error. 1229 */ 1230 if (pci_resource_len(pdev, bar) == 0) 1231 return (0); 1232 /* Likewise if it is neither IO nor MEM, nothing to do for us. */ 1233 type = pci_resource_type(pdev, bar); 1234 if (type < 0) 1235 return (0); 1236 1237 rid = PCIR_BAR(bar); 1238 res = bus_alloc_resource_any(pdev->dev.bsddev, type, &rid, 1239 RF_ACTIVE|RF_SHAREABLE); 1240 if (res == NULL) { 1241 device_printf(pdev->dev.bsddev, "%s: failed to alloc " 1242 "bar %d type %d rid %d\n", 1243 __func__, bar, type, PCIR_BAR(bar)); 1244 return (-ENODEV); 1245 } 1246 1247 /* 1248 * It seems there is an implicit devres tracking on these if the device 1249 * is managed (lkpi_pci_devres_find() case); otherwise the resources are 1250 * not automatically freed on FreeBSD/LinuxKPI though they should be/are 1251 * expected to be by Linux drivers. 1252 * Otherwise if we are called from a pcim-function with the managed 1253 * argument set, we need to track devres independent of pdev->managed. 1254 */ 1255 if (managed) 1256 dr = lkpi_pci_devres_get_alloc(pdev); 1257 else 1258 dr = lkpi_pci_devres_find(pdev); 1259 if (dr != NULL) { 1260 dr->region_mask |= (1 << bar); 1261 dr->region_table[bar] = res; 1262 } 1263 1264 /* Even if the device is not managed we need to track it for iomap. */ 1265 mmio = malloc(sizeof(*mmio), M_DEVBUF, M_WAITOK | M_ZERO); 1266 mmio->rid = PCIR_BAR(bar); 1267 mmio->type = type; 1268 mmio->res = res; 1269 TAILQ_INSERT_TAIL(&pdev->mmio, mmio, next); 1270 1271 return (0); 1272 } 1273 1274 int 1275 linuxkpi_pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) 1276 { 1277 return (lkpi_pci_request_region(pdev, bar, res_name, false)); 1278 } 1279 1280 int 1281 linuxkpi_pci_request_regions(struct pci_dev *pdev, const char *res_name) 1282 { 1283 int error; 1284 int i; 1285 1286 for (i = 0; i <= PCIR_MAX_BAR_0; i++) { 1287 error = pci_request_region(pdev, i, res_name); 1288 if (error && error != -ENODEV) { 1289 pci_release_regions(pdev); 1290 return (error); 1291 } 1292 } 1293 return (0); 1294 } 1295 1296 int 1297 linuxkpi_pcim_request_all_regions(struct pci_dev *pdev, const char *res_name) 1298 { 1299 int bar, error; 1300 1301 for (bar = 0; bar <= PCIR_MAX_BAR_0; bar++) { 1302 error = lkpi_pci_request_region(pdev, bar, res_name, true); 1303 if (error != 0) { 1304 device_printf(pdev->dev.bsddev, "%s: bar %d res_name '%s': " 1305 "lkpi_pci_request_region returned %d\n", __func__, 1306 bar, res_name, error); 1307 pci_release_regions(pdev); 1308 return (error); 1309 } 1310 } 1311 return (0); 1312 } 1313 1314 void 1315 linuxkpi_pci_release_region(struct pci_dev *pdev, int bar) 1316 { 1317 struct resource_list_entry *rle; 1318 struct pci_devres *dr; 1319 struct pci_mmio_region *mmio, *p; 1320 1321 if ((rle = lkpi_pci_get_bar(pdev, bar, false)) == NULL) 1322 return; 1323 1324 /* 1325 * As we implicitly track the requests we also need to clear them on 1326 * release. Do clear before resource release. 1327 */ 1328 dr = lkpi_pci_devres_find(pdev); 1329 if (dr != NULL) { 1330 KASSERT(dr->region_table[bar] == rle->res, ("%s: pdev %p bar %d" 1331 " region_table res %p != rel->res %p\n", __func__, pdev, 1332 bar, dr->region_table[bar], rle->res)); 1333 dr->region_table[bar] = NULL; 1334 dr->region_mask &= ~(1 << bar); 1335 } 1336 1337 TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) { 1338 if (rle->res != (void *)rman_get_bushandle(mmio->res)) 1339 continue; 1340 TAILQ_REMOVE(&pdev->mmio, mmio, next); 1341 free(mmio, M_DEVBUF); 1342 } 1343 1344 bus_release_resource(pdev->dev.bsddev, rle->type, rle->rid, rle->res); 1345 } 1346 1347 void 1348 linuxkpi_pci_release_regions(struct pci_dev *pdev) 1349 { 1350 int i; 1351 1352 for (i = 0; i <= PCIR_MAX_BAR_0; i++) 1353 pci_release_region(pdev, i); 1354 } 1355 1356 int 1357 linux_pci_register_drm_driver(struct pci_driver *pdrv) 1358 { 1359 devclass_t dc; 1360 1361 dc = devclass_create("vgapci"); 1362 if (dc == NULL) 1363 return (-ENXIO); 1364 pdrv->isdrm = true; 1365 pdrv->name = "drmn"; 1366 return (_linux_pci_register_driver(pdrv, dc)); 1367 } 1368 1369 void 1370 linux_pci_unregister_driver(struct pci_driver *pdrv) 1371 { 1372 devclass_t bus; 1373 1374 bus = devclass_find(pdrv->isdrm ? "vgapci" : "pci"); 1375 1376 spin_lock(&pci_lock); 1377 list_del(&pdrv->node); 1378 spin_unlock(&pci_lock); 1379 bus_topo_lock(); 1380 if (bus != NULL) 1381 devclass_delete_driver(bus, &pdrv->bsddriver); 1382 bus_topo_unlock(); 1383 } 1384 1385 void 1386 linux_pci_unregister_drm_driver(struct pci_driver *pdrv) 1387 { 1388 devclass_t bus; 1389 1390 bus = devclass_find("vgapci"); 1391 1392 spin_lock(&pci_lock); 1393 list_del(&pdrv->node); 1394 spin_unlock(&pci_lock); 1395 bus_topo_lock(); 1396 if (bus != NULL) 1397 devclass_delete_driver(bus, &pdrv->bsddriver); 1398 bus_topo_unlock(); 1399 } 1400 1401 int 1402 linuxkpi_pci_enable_msix(struct pci_dev *pdev, struct msix_entry *entries, 1403 int nreq) 1404 { 1405 struct resource_list_entry *rle; 1406 int error; 1407 int avail; 1408 int i; 1409 1410 avail = pci_msix_count(pdev->dev.bsddev); 1411 if (avail < nreq) { 1412 if (avail == 0) 1413 return -EINVAL; 1414 return avail; 1415 } 1416 avail = nreq; 1417 if ((error = -pci_alloc_msix(pdev->dev.bsddev, &avail)) != 0) 1418 return error; 1419 /* 1420 * Handle case where "pci_alloc_msix()" may allocate less 1421 * interrupts than available and return with no error: 1422 */ 1423 if (avail < nreq) { 1424 pci_release_msi(pdev->dev.bsddev); 1425 return avail; 1426 } 1427 rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 1, false); 1428 pdev->dev.irq_start = rle->start; 1429 pdev->dev.irq_end = rle->start + avail; 1430 for (i = 0; i < nreq; i++) 1431 entries[i].vector = pdev->dev.irq_start + i; 1432 pdev->msix_enabled = true; 1433 return (0); 1434 } 1435 1436 int 1437 _lkpi_pci_enable_msi_range(struct pci_dev *pdev, int minvec, int maxvec) 1438 { 1439 struct resource_list_entry *rle; 1440 int error; 1441 int nvec; 1442 1443 if (maxvec < minvec) 1444 return (-EINVAL); 1445 1446 nvec = pci_msi_count(pdev->dev.bsddev); 1447 if (nvec < 1 || nvec < minvec) 1448 return (-ENOSPC); 1449 1450 nvec = min(nvec, maxvec); 1451 if ((error = -pci_alloc_msi(pdev->dev.bsddev, &nvec)) != 0) 1452 return error; 1453 1454 /* Native PCI might only ever ask for 32 vectors. */ 1455 if (nvec < minvec) { 1456 pci_release_msi(pdev->dev.bsddev); 1457 return (-ENOSPC); 1458 } 1459 1460 rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 1, false); 1461 pdev->dev.irq_start = rle->start; 1462 pdev->dev.irq_end = rle->start + nvec; 1463 pdev->irq = rle->start; 1464 pdev->msi_enabled = true; 1465 return (0); 1466 } 1467 1468 int 1469 pci_alloc_irq_vectors(struct pci_dev *pdev, int minv, int maxv, 1470 unsigned int flags) 1471 { 1472 int error; 1473 1474 if (flags & PCI_IRQ_MSIX) { 1475 struct msix_entry *entries; 1476 int i; 1477 1478 entries = kcalloc(maxv, sizeof(*entries), GFP_KERNEL); 1479 if (entries == NULL) { 1480 error = -ENOMEM; 1481 goto out; 1482 } 1483 for (i = 0; i < maxv; ++i) 1484 entries[i].entry = i; 1485 error = pci_enable_msix(pdev, entries, maxv); 1486 out: 1487 kfree(entries); 1488 if (error == 0 && pdev->msix_enabled) 1489 return (pdev->dev.irq_end - pdev->dev.irq_start); 1490 } 1491 if (flags & PCI_IRQ_MSI) { 1492 if (pci_msi_count(pdev->dev.bsddev) < minv) 1493 return (-ENOSPC); 1494 error = _lkpi_pci_enable_msi_range(pdev, minv, maxv); 1495 if (error == 0 && pdev->msi_enabled) 1496 return (pdev->dev.irq_end - pdev->dev.irq_start); 1497 } 1498 if (flags & PCI_IRQ_INTX) { 1499 if (pdev->irq) 1500 return (1); 1501 } 1502 1503 return (-EINVAL); 1504 } 1505 1506 struct msi_desc * 1507 lkpi_pci_msi_desc_alloc(int irq) 1508 { 1509 struct device *dev; 1510 struct pci_dev *pdev; 1511 struct msi_desc *desc; 1512 struct pci_devinfo *dinfo; 1513 struct pcicfg_msi *msi; 1514 int vec; 1515 1516 dev = lkpi_pci_find_irq_dev(irq); 1517 if (dev == NULL) 1518 return (NULL); 1519 1520 pdev = to_pci_dev(dev); 1521 1522 if (pdev->msi_desc == NULL) 1523 return (NULL); 1524 1525 if (irq < pdev->dev.irq_start || irq >= pdev->dev.irq_end) 1526 return (NULL); 1527 1528 vec = pdev->dev.irq_start - irq; 1529 1530 if (pdev->msi_desc[vec] != NULL) 1531 return (pdev->msi_desc[vec]); 1532 1533 dinfo = device_get_ivars(dev->bsddev); 1534 msi = &dinfo->cfg.msi; 1535 1536 desc = malloc(sizeof(*desc), M_DEVBUF, M_WAITOK | M_ZERO); 1537 1538 desc->pci.msi_attrib.is_64 = 1539 (msi->msi_ctrl & PCIM_MSICTRL_64BIT) ? true : false; 1540 desc->msg.data = msi->msi_data; 1541 1542 pdev->msi_desc[vec] = desc; 1543 1544 return (desc); 1545 } 1546 1547 bool 1548 pci_device_is_present(struct pci_dev *pdev) 1549 { 1550 device_t dev; 1551 1552 dev = pdev->dev.bsddev; 1553 1554 return (bus_child_present(dev)); 1555 } 1556 1557 CTASSERT(sizeof(dma_addr_t) <= sizeof(uint64_t)); 1558 1559 struct linux_dma_obj { 1560 void *vaddr; 1561 uint64_t dma_addr; 1562 bus_dmamap_t dmamap; 1563 bus_dma_tag_t dmat; 1564 }; 1565 1566 static uma_zone_t linux_dma_trie_zone; 1567 static uma_zone_t linux_dma_obj_zone; 1568 1569 static void 1570 linux_dma_init(void *arg) 1571 { 1572 1573 linux_dma_trie_zone = uma_zcreate("linux_dma_pctrie", 1574 pctrie_node_size(), NULL, NULL, pctrie_zone_init, NULL, 1575 UMA_ALIGN_PTR, 0); 1576 linux_dma_obj_zone = uma_zcreate("linux_dma_object", 1577 sizeof(struct linux_dma_obj), NULL, NULL, NULL, NULL, 1578 UMA_ALIGN_PTR, 0); 1579 lkpi_pci_nseg1_fail = counter_u64_alloc(M_WAITOK); 1580 } 1581 SYSINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_init, NULL); 1582 1583 static void 1584 linux_dma_uninit(void *arg) 1585 { 1586 1587 counter_u64_free(lkpi_pci_nseg1_fail); 1588 uma_zdestroy(linux_dma_obj_zone); 1589 uma_zdestroy(linux_dma_trie_zone); 1590 } 1591 SYSUNINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_uninit, NULL); 1592 1593 static void * 1594 linux_dma_trie_alloc(struct pctrie *ptree) 1595 { 1596 1597 return (uma_zalloc(linux_dma_trie_zone, M_NOWAIT)); 1598 } 1599 1600 static void 1601 linux_dma_trie_free(struct pctrie *ptree, void *node) 1602 { 1603 1604 uma_zfree(linux_dma_trie_zone, node); 1605 } 1606 1607 PCTRIE_DEFINE(LINUX_DMA, linux_dma_obj, dma_addr, linux_dma_trie_alloc, 1608 linux_dma_trie_free); 1609 1610 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 1611 static dma_addr_t 1612 linux_dma_map_phys_common(struct device *dev, vm_paddr_t phys, size_t len, 1613 bus_dma_tag_t dmat) 1614 { 1615 struct linux_dma_priv *priv; 1616 struct linux_dma_obj *obj; 1617 int error, nseg; 1618 bus_dma_segment_t seg; 1619 1620 priv = dev->dma_priv; 1621 1622 /* 1623 * If the resultant mapping will be entirely 1:1 with the 1624 * physical address, short-circuit the remainder of the 1625 * bus_dma API. This avoids tracking collisions in the pctrie 1626 * with the additional benefit of reducing overhead. 1627 */ 1628 if (bus_dma_id_mapped(dmat, phys, len)) 1629 return (phys); 1630 1631 obj = uma_zalloc(linux_dma_obj_zone, M_NOWAIT); 1632 if (obj == NULL) { 1633 return (0); 1634 } 1635 obj->dmat = dmat; 1636 1637 DMA_PRIV_LOCK(priv); 1638 if (bus_dmamap_create(obj->dmat, 0, &obj->dmamap) != 0) { 1639 DMA_PRIV_UNLOCK(priv); 1640 uma_zfree(linux_dma_obj_zone, obj); 1641 return (0); 1642 } 1643 1644 nseg = -1; 1645 error = _bus_dmamap_load_phys(obj->dmat, obj->dmamap, phys, len, 1646 BUS_DMA_NOWAIT, &seg, &nseg); 1647 if (error != 0) { 1648 bus_dmamap_destroy(obj->dmat, obj->dmamap); 1649 DMA_PRIV_UNLOCK(priv); 1650 uma_zfree(linux_dma_obj_zone, obj); 1651 counter_u64_add(lkpi_pci_nseg1_fail, 1); 1652 if (linuxkpi_debug) { 1653 device_printf(dev->bsddev, "%s: _bus_dmamap_load_phys " 1654 "error %d, phys %#018jx len %zu\n", __func__, 1655 error, (uintmax_t)phys, len); 1656 dump_stack(); 1657 } 1658 return (0); 1659 } 1660 1661 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); 1662 obj->dma_addr = seg.ds_addr; 1663 1664 error = LINUX_DMA_PCTRIE_INSERT(&priv->ptree, obj); 1665 if (error != 0) { 1666 bus_dmamap_unload(obj->dmat, obj->dmamap); 1667 bus_dmamap_destroy(obj->dmat, obj->dmamap); 1668 DMA_PRIV_UNLOCK(priv); 1669 uma_zfree(linux_dma_obj_zone, obj); 1670 return (0); 1671 } 1672 DMA_PRIV_UNLOCK(priv); 1673 return (obj->dma_addr); 1674 } 1675 #else 1676 static dma_addr_t 1677 linux_dma_map_phys_common(struct device *dev __unused, vm_paddr_t phys, 1678 size_t len __unused, bus_dma_tag_t dmat __unused) 1679 { 1680 return (phys); 1681 } 1682 #endif 1683 1684 dma_addr_t 1685 lkpi_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len, 1686 enum dma_data_direction direction, unsigned long attrs) 1687 { 1688 struct linux_dma_priv *priv; 1689 dma_addr_t dma; 1690 1691 priv = dev->dma_priv; 1692 dma = linux_dma_map_phys_common(dev, phys, len, priv->dmat); 1693 if (dma_mapping_error(dev, dma)) 1694 return (dma); 1695 1696 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 1697 dma_sync_single_for_device(dev, dma, len, direction); 1698 1699 return (dma); 1700 } 1701 1702 /* For backward compat only so we can MFC this. Remove before 15. */ 1703 dma_addr_t 1704 linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len) 1705 { 1706 return (lkpi_dma_map_phys(dev, phys, len, DMA_NONE, 0)); 1707 } 1708 1709 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 1710 void 1711 lkpi_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len, 1712 enum dma_data_direction direction, unsigned long attrs) 1713 { 1714 struct linux_dma_priv *priv; 1715 struct linux_dma_obj *obj; 1716 1717 priv = dev->dma_priv; 1718 1719 if (pctrie_is_empty(&priv->ptree)) 1720 return; 1721 1722 DMA_PRIV_LOCK(priv); 1723 obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr); 1724 if (obj == NULL) { 1725 DMA_PRIV_UNLOCK(priv); 1726 return; 1727 } 1728 LINUX_DMA_PCTRIE_REMOVE(&priv->ptree, dma_addr); 1729 1730 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 1731 dma_sync_single_for_cpu(dev, dma_addr, len, direction); 1732 1733 bus_dmamap_unload(obj->dmat, obj->dmamap); 1734 bus_dmamap_destroy(obj->dmat, obj->dmamap); 1735 DMA_PRIV_UNLOCK(priv); 1736 1737 uma_zfree(linux_dma_obj_zone, obj); 1738 } 1739 #else 1740 void 1741 lkpi_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len, 1742 enum dma_data_direction direction, unsigned long attrs) 1743 { 1744 } 1745 #endif 1746 1747 /* For backward compat only so we can MFC this. Remove before 15. */ 1748 void 1749 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len) 1750 { 1751 lkpi_dma_unmap(dev, dma_addr, len, DMA_NONE, 0); 1752 } 1753 1754 void * 1755 linux_dma_alloc_coherent(struct device *dev, size_t size, 1756 dma_addr_t *dma_handle, gfp_t flag) 1757 { 1758 struct linux_dma_priv *priv; 1759 vm_paddr_t high; 1760 size_t align; 1761 void *mem; 1762 1763 if (dev == NULL || dev->dma_priv == NULL) { 1764 *dma_handle = 0; 1765 return (NULL); 1766 } 1767 priv = dev->dma_priv; 1768 if (priv->dma_coherent_mask) 1769 high = priv->dma_coherent_mask; 1770 else 1771 /* Coherent is lower 32bit only by default in Linux. */ 1772 high = BUS_SPACE_MAXADDR_32BIT; 1773 align = PAGE_SIZE << get_order(size); 1774 /* Always zero the allocation. */ 1775 flag |= M_ZERO; 1776 mem = kmem_alloc_contig(size, flag & GFP_NATIVE_MASK, 0, high, 1777 align, 0, VM_MEMATTR_DEFAULT); 1778 if (mem != NULL) { 1779 *dma_handle = linux_dma_map_phys_common(dev, vtophys(mem), size, 1780 priv->dmat_coherent); 1781 if (*dma_handle == 0) { 1782 kmem_free(mem, size); 1783 mem = NULL; 1784 } 1785 } else { 1786 *dma_handle = 0; 1787 } 1788 return (mem); 1789 } 1790 1791 struct lkpi_devres_dmam_coherent { 1792 size_t size; 1793 dma_addr_t *handle; 1794 void *mem; 1795 }; 1796 1797 static void 1798 lkpi_dmam_free_coherent(struct device *dev, void *p) 1799 { 1800 struct lkpi_devres_dmam_coherent *dr; 1801 1802 dr = p; 1803 dma_free_coherent(dev, dr->size, dr->mem, *dr->handle); 1804 } 1805 1806 void * 1807 linuxkpi_dmam_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 1808 gfp_t flag) 1809 { 1810 struct lkpi_devres_dmam_coherent *dr; 1811 1812 dr = lkpi_devres_alloc(lkpi_dmam_free_coherent, 1813 sizeof(*dr), GFP_KERNEL | __GFP_ZERO); 1814 1815 if (dr == NULL) 1816 return (NULL); 1817 1818 dr->size = size; 1819 dr->mem = linux_dma_alloc_coherent(dev, size, dma_handle, flag); 1820 dr->handle = dma_handle; 1821 if (dr->mem == NULL) { 1822 lkpi_devres_free(dr); 1823 return (NULL); 1824 } 1825 1826 lkpi_devres_add(dev, dr); 1827 return (dr->mem); 1828 } 1829 1830 void 1831 linuxkpi_dma_sync(struct device *dev, dma_addr_t dma_addr, size_t size, 1832 bus_dmasync_op_t op) 1833 { 1834 struct linux_dma_priv *priv; 1835 struct linux_dma_obj *obj; 1836 1837 priv = dev->dma_priv; 1838 1839 if (pctrie_is_empty(&priv->ptree)) 1840 return; 1841 1842 DMA_PRIV_LOCK(priv); 1843 obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr); 1844 if (obj == NULL) { 1845 DMA_PRIV_UNLOCK(priv); 1846 return; 1847 } 1848 1849 bus_dmamap_sync(obj->dmat, obj->dmamap, op); 1850 DMA_PRIV_UNLOCK(priv); 1851 } 1852 1853 int 1854 linux_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents, 1855 enum dma_data_direction direction, unsigned long attrs) 1856 { 1857 struct linux_dma_priv *priv; 1858 struct scatterlist *sg; 1859 int i, nseg; 1860 bus_dma_segment_t seg; 1861 1862 priv = dev->dma_priv; 1863 1864 DMA_PRIV_LOCK(priv); 1865 1866 /* create common DMA map in the first S/G entry */ 1867 if (bus_dmamap_create(priv->dmat, 0, &sgl->dma_map) != 0) { 1868 DMA_PRIV_UNLOCK(priv); 1869 return (0); 1870 } 1871 1872 /* load all S/G list entries */ 1873 for_each_sg(sgl, sg, nents, i) { 1874 nseg = -1; 1875 if (_bus_dmamap_load_phys(priv->dmat, sgl->dma_map, 1876 sg_phys(sg), sg->length, BUS_DMA_NOWAIT, 1877 &seg, &nseg) != 0) { 1878 bus_dmamap_unload(priv->dmat, sgl->dma_map); 1879 bus_dmamap_destroy(priv->dmat, sgl->dma_map); 1880 DMA_PRIV_UNLOCK(priv); 1881 return (0); 1882 } 1883 KASSERT(nseg == 0, 1884 ("More than one segment (nseg=%d)", nseg + 1)); 1885 1886 sg_dma_address(sg) = seg.ds_addr; 1887 } 1888 1889 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) != 0) 1890 goto skip_sync; 1891 1892 switch (direction) { 1893 case DMA_BIDIRECTIONAL: 1894 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREWRITE); 1895 break; 1896 case DMA_TO_DEVICE: 1897 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREREAD); 1898 break; 1899 case DMA_FROM_DEVICE: 1900 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREWRITE); 1901 break; 1902 default: 1903 break; 1904 } 1905 skip_sync: 1906 1907 DMA_PRIV_UNLOCK(priv); 1908 1909 return (nents); 1910 } 1911 1912 void 1913 linux_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, 1914 int nents __unused, enum dma_data_direction direction, 1915 unsigned long attrs) 1916 { 1917 struct linux_dma_priv *priv; 1918 1919 priv = dev->dma_priv; 1920 1921 DMA_PRIV_LOCK(priv); 1922 1923 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) != 0) 1924 goto skip_sync; 1925 1926 switch (direction) { 1927 case DMA_BIDIRECTIONAL: 1928 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTREAD); 1929 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREREAD); 1930 break; 1931 case DMA_TO_DEVICE: 1932 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTWRITE); 1933 break; 1934 case DMA_FROM_DEVICE: 1935 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTREAD); 1936 break; 1937 default: 1938 break; 1939 } 1940 skip_sync: 1941 1942 bus_dmamap_unload(priv->dmat, sgl->dma_map); 1943 bus_dmamap_destroy(priv->dmat, sgl->dma_map); 1944 DMA_PRIV_UNLOCK(priv); 1945 } 1946 1947 struct dma_pool { 1948 struct device *pool_device; 1949 uma_zone_t pool_zone; 1950 struct mtx pool_lock; 1951 bus_dma_tag_t pool_dmat; 1952 size_t pool_entry_size; 1953 struct pctrie pool_ptree; 1954 }; 1955 1956 #define DMA_POOL_LOCK(pool) mtx_lock(&(pool)->pool_lock) 1957 #define DMA_POOL_UNLOCK(pool) mtx_unlock(&(pool)->pool_lock) 1958 1959 static inline int 1960 dma_pool_obj_ctor(void *mem, int size, void *arg, int flags) 1961 { 1962 struct linux_dma_obj *obj = mem; 1963 struct dma_pool *pool = arg; 1964 int error, nseg; 1965 bus_dma_segment_t seg; 1966 1967 nseg = -1; 1968 DMA_POOL_LOCK(pool); 1969 error = _bus_dmamap_load_phys(pool->pool_dmat, obj->dmamap, 1970 vtophys(obj->vaddr), pool->pool_entry_size, BUS_DMA_NOWAIT, 1971 &seg, &nseg); 1972 DMA_POOL_UNLOCK(pool); 1973 if (error != 0) { 1974 return (error); 1975 } 1976 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); 1977 obj->dma_addr = seg.ds_addr; 1978 1979 return (0); 1980 } 1981 1982 static void 1983 dma_pool_obj_dtor(void *mem, int size, void *arg) 1984 { 1985 struct linux_dma_obj *obj = mem; 1986 struct dma_pool *pool = arg; 1987 1988 DMA_POOL_LOCK(pool); 1989 bus_dmamap_unload(pool->pool_dmat, obj->dmamap); 1990 DMA_POOL_UNLOCK(pool); 1991 } 1992 1993 static int 1994 dma_pool_obj_import(void *arg, void **store, int count, int domain __unused, 1995 int flags) 1996 { 1997 struct dma_pool *pool = arg; 1998 struct linux_dma_obj *obj; 1999 int error, i; 2000 2001 for (i = 0; i < count; i++) { 2002 obj = uma_zalloc(linux_dma_obj_zone, flags); 2003 if (obj == NULL) 2004 break; 2005 2006 error = bus_dmamem_alloc(pool->pool_dmat, &obj->vaddr, 2007 BUS_DMA_NOWAIT, &obj->dmamap); 2008 if (error!= 0) { 2009 uma_zfree(linux_dma_obj_zone, obj); 2010 break; 2011 } 2012 2013 store[i] = obj; 2014 } 2015 2016 return (i); 2017 } 2018 2019 static void 2020 dma_pool_obj_release(void *arg, void **store, int count) 2021 { 2022 struct dma_pool *pool = arg; 2023 struct linux_dma_obj *obj; 2024 int i; 2025 2026 for (i = 0; i < count; i++) { 2027 obj = store[i]; 2028 bus_dmamem_free(pool->pool_dmat, obj->vaddr, obj->dmamap); 2029 uma_zfree(linux_dma_obj_zone, obj); 2030 } 2031 } 2032 2033 struct dma_pool * 2034 linux_dma_pool_create(char *name, struct device *dev, size_t size, 2035 size_t align, size_t boundary) 2036 { 2037 struct linux_dma_priv *priv; 2038 struct dma_pool *pool; 2039 2040 priv = dev->dma_priv; 2041 2042 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 2043 pool->pool_device = dev; 2044 pool->pool_entry_size = size; 2045 2046 if (bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 2047 align, boundary, /* alignment, boundary */ 2048 priv->dma_mask, /* lowaddr */ 2049 BUS_SPACE_MAXADDR, /* highaddr */ 2050 NULL, NULL, /* filtfunc, filtfuncarg */ 2051 size, /* maxsize */ 2052 1, /* nsegments */ 2053 size, /* maxsegsz */ 2054 0, /* flags */ 2055 NULL, NULL, /* lockfunc, lockfuncarg */ 2056 &pool->pool_dmat)) { 2057 kfree(pool); 2058 return (NULL); 2059 } 2060 2061 pool->pool_zone = uma_zcache_create(name, -1, dma_pool_obj_ctor, 2062 dma_pool_obj_dtor, NULL, NULL, dma_pool_obj_import, 2063 dma_pool_obj_release, pool, 0); 2064 2065 mtx_init(&pool->pool_lock, "lkpi-dma-pool", NULL, MTX_DEF); 2066 pctrie_init(&pool->pool_ptree); 2067 2068 return (pool); 2069 } 2070 2071 void 2072 linux_dma_pool_destroy(struct dma_pool *pool) 2073 { 2074 2075 uma_zdestroy(pool->pool_zone); 2076 bus_dma_tag_destroy(pool->pool_dmat); 2077 mtx_destroy(&pool->pool_lock); 2078 kfree(pool); 2079 } 2080 2081 void 2082 lkpi_dmam_pool_destroy(struct device *dev, void *p) 2083 { 2084 struct dma_pool *pool; 2085 2086 pool = *(struct dma_pool **)p; 2087 LINUX_DMA_PCTRIE_RECLAIM(&pool->pool_ptree); 2088 linux_dma_pool_destroy(pool); 2089 } 2090 2091 void * 2092 linux_dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, 2093 dma_addr_t *handle) 2094 { 2095 struct linux_dma_obj *obj; 2096 2097 obj = uma_zalloc_arg(pool->pool_zone, pool, mem_flags & GFP_NATIVE_MASK); 2098 if (obj == NULL) 2099 return (NULL); 2100 2101 DMA_POOL_LOCK(pool); 2102 if (LINUX_DMA_PCTRIE_INSERT(&pool->pool_ptree, obj) != 0) { 2103 DMA_POOL_UNLOCK(pool); 2104 uma_zfree_arg(pool->pool_zone, obj, pool); 2105 return (NULL); 2106 } 2107 DMA_POOL_UNLOCK(pool); 2108 2109 *handle = obj->dma_addr; 2110 return (obj->vaddr); 2111 } 2112 2113 void 2114 linux_dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma_addr) 2115 { 2116 struct linux_dma_obj *obj; 2117 2118 DMA_POOL_LOCK(pool); 2119 obj = LINUX_DMA_PCTRIE_LOOKUP(&pool->pool_ptree, dma_addr); 2120 if (obj == NULL) { 2121 DMA_POOL_UNLOCK(pool); 2122 return; 2123 } 2124 LINUX_DMA_PCTRIE_REMOVE(&pool->pool_ptree, dma_addr); 2125 DMA_POOL_UNLOCK(pool); 2126 2127 uma_zfree_arg(pool->pool_zone, obj, pool); 2128 } 2129 2130 static int 2131 linux_backlight_get_status(device_t dev, struct backlight_props *props) 2132 { 2133 struct pci_dev *pdev; 2134 2135 linux_set_current(curthread); 2136 pdev = device_get_softc(dev); 2137 2138 props->brightness = pdev->dev.bd->props.brightness; 2139 props->brightness = props->brightness * 100 / pdev->dev.bd->props.max_brightness; 2140 props->nlevels = 0; 2141 2142 return (0); 2143 } 2144 2145 static int 2146 linux_backlight_get_info(device_t dev, struct backlight_info *info) 2147 { 2148 struct pci_dev *pdev; 2149 2150 linux_set_current(curthread); 2151 pdev = device_get_softc(dev); 2152 2153 info->type = BACKLIGHT_TYPE_PANEL; 2154 strlcpy(info->name, pdev->dev.bd->name, BACKLIGHTMAXNAMELENGTH); 2155 return (0); 2156 } 2157 2158 static int 2159 linux_backlight_update_status(device_t dev, struct backlight_props *props) 2160 { 2161 struct pci_dev *pdev; 2162 2163 linux_set_current(curthread); 2164 pdev = device_get_softc(dev); 2165 2166 pdev->dev.bd->props.brightness = pdev->dev.bd->props.max_brightness * 2167 props->brightness / 100; 2168 pdev->dev.bd->props.power = props->brightness == 0 ? 2169 4/* FB_BLANK_POWERDOWN */ : 0/* FB_BLANK_UNBLANK */; 2170 return (pdev->dev.bd->ops->update_status(pdev->dev.bd)); 2171 } 2172 2173 struct backlight_device * 2174 linux_backlight_device_register(const char *name, struct device *dev, 2175 void *data, const struct backlight_ops *ops, struct backlight_properties *props) 2176 { 2177 2178 dev->bd = malloc(sizeof(*dev->bd), M_DEVBUF, M_WAITOK | M_ZERO); 2179 dev->bd->ops = ops; 2180 dev->bd->props.type = props->type; 2181 dev->bd->props.max_brightness = props->max_brightness; 2182 dev->bd->props.brightness = props->brightness; 2183 dev->bd->props.power = props->power; 2184 dev->bd->data = data; 2185 dev->bd->dev = dev; 2186 dev->bd->name = strdup(name, M_DEVBUF); 2187 2188 dev->backlight_dev = backlight_register(name, dev->bsddev); 2189 2190 return (dev->bd); 2191 } 2192 2193 void 2194 linux_backlight_device_unregister(struct backlight_device *bd) 2195 { 2196 2197 backlight_destroy(bd->dev->backlight_dev); 2198 free(bd->name, M_DEVBUF); 2199 free(bd, M_DEVBUF); 2200 } 2201