1 /*- 2 * Copyright (c) 2015-2016 Mellanox Technologies, Ltd. 3 * All rights reserved. 4 * Copyright (c) 2020-2025 The FreeBSD Foundation 5 * 6 * Portions of this software were developed by Björn Zeeb 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice unmodified, this list of conditions, and the following 14 * disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 20 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 21 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 22 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 23 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 24 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 28 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 29 */ 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/bus.h> 34 #include <sys/malloc.h> 35 #include <sys/kernel.h> 36 #include <sys/sysctl.h> 37 #include <sys/lock.h> 38 #include <sys/mutex.h> 39 #include <sys/fcntl.h> 40 #include <sys/file.h> 41 #include <sys/filio.h> 42 #include <sys/pciio.h> 43 #include <sys/pctrie.h> 44 #include <sys/rman.h> 45 #include <sys/rwlock.h> 46 #include <sys/stdarg.h> 47 48 #include <vm/vm.h> 49 #include <vm/pmap.h> 50 51 #include <machine/bus.h> 52 #include <machine/resource.h> 53 54 #include <dev/pci/pcivar.h> 55 #include <dev/pci/pci_private.h> 56 #include <dev/pci/pci_iov.h> 57 #include <dev/backlight/backlight.h> 58 59 #include <linux/kernel.h> 60 #include <linux/kobject.h> 61 #include <linux/device.h> 62 #include <linux/slab.h> 63 #include <linux/module.h> 64 #include <linux/cdev.h> 65 #include <linux/file.h> 66 #include <linux/sysfs.h> 67 #include <linux/mm.h> 68 #include <linux/io.h> 69 #include <linux/vmalloc.h> 70 #include <linux/pci.h> 71 #include <linux/compat.h> 72 73 #include <linux/backlight.h> 74 75 #include "backlight_if.h" 76 #include "pcib_if.h" 77 78 /* Undef the linux function macro defined in linux/pci.h */ 79 #undef pci_get_class 80 81 extern int linuxkpi_debug; 82 83 SYSCTL_DECL(_compat_linuxkpi); 84 85 static counter_u64_t lkpi_pci_nseg1_fail; 86 SYSCTL_COUNTER_U64(_compat_linuxkpi, OID_AUTO, lkpi_pci_nseg1_fail, CTLFLAG_RD, 87 &lkpi_pci_nseg1_fail, "Count of busdma mapping failures of single-segment"); 88 89 static device_probe_t linux_pci_probe; 90 static device_attach_t linux_pci_attach; 91 static device_detach_t linux_pci_detach; 92 static device_suspend_t linux_pci_suspend; 93 static device_resume_t linux_pci_resume; 94 static device_shutdown_t linux_pci_shutdown; 95 static pci_iov_init_t linux_pci_iov_init; 96 static pci_iov_uninit_t linux_pci_iov_uninit; 97 static pci_iov_add_vf_t linux_pci_iov_add_vf; 98 static int linux_backlight_get_status(device_t dev, struct backlight_props *props); 99 static int linux_backlight_update_status(device_t dev, struct backlight_props *props); 100 static int linux_backlight_get_info(device_t dev, struct backlight_info *info); 101 static void lkpi_pcim_iomap_table_release(struct device *, void *); 102 103 static device_method_t pci_methods[] = { 104 DEVMETHOD(device_probe, linux_pci_probe), 105 DEVMETHOD(device_attach, linux_pci_attach), 106 DEVMETHOD(device_detach, linux_pci_detach), 107 DEVMETHOD(device_suspend, linux_pci_suspend), 108 DEVMETHOD(device_resume, linux_pci_resume), 109 DEVMETHOD(device_shutdown, linux_pci_shutdown), 110 DEVMETHOD(pci_iov_init, linux_pci_iov_init), 111 DEVMETHOD(pci_iov_uninit, linux_pci_iov_uninit), 112 DEVMETHOD(pci_iov_add_vf, linux_pci_iov_add_vf), 113 114 /* Bus interface. */ 115 DEVMETHOD(bus_add_child, bus_generic_add_child), 116 117 /* backlight interface */ 118 DEVMETHOD(backlight_update_status, linux_backlight_update_status), 119 DEVMETHOD(backlight_get_status, linux_backlight_get_status), 120 DEVMETHOD(backlight_get_info, linux_backlight_get_info), 121 DEVMETHOD_END 122 }; 123 124 const char *pci_power_names[] = { 125 "UNKNOWN", "D0", "D1", "D2", "D3hot", "D3cold" 126 }; 127 128 /* We need some meta-struct to keep track of these for devres. */ 129 struct pci_devres { 130 bool enable_io; 131 /* PCIR_MAX_BAR_0 + 1 = 6 => BIT(0..5). */ 132 uint8_t region_mask; 133 struct resource *region_table[PCIR_MAX_BAR_0 + 1]; /* Not needed. */ 134 }; 135 struct pcim_iomap_devres { 136 void *mmio_table[PCIR_MAX_BAR_0 + 1]; 137 struct resource *res_table[PCIR_MAX_BAR_0 + 1]; 138 }; 139 140 struct linux_dma_priv { 141 uint64_t dma_mask; 142 bus_dma_tag_t dmat; 143 uint64_t dma_coherent_mask; 144 bus_dma_tag_t dmat_coherent; 145 struct mtx lock; 146 struct pctrie ptree; 147 }; 148 #define DMA_PRIV_LOCK(priv) mtx_lock(&(priv)->lock) 149 #define DMA_PRIV_UNLOCK(priv) mtx_unlock(&(priv)->lock) 150 151 static void 152 lkpi_set_pcim_iomap_devres(struct pcim_iomap_devres *dr, int bar, 153 void *res) 154 { 155 dr->mmio_table[bar] = (void *)rman_get_bushandle(res); 156 dr->res_table[bar] = res; 157 } 158 159 static bool 160 lkpi_pci_bar_id_valid(int bar) 161 { 162 if (bar < 0 || bar > PCIR_MAX_BAR_0) 163 return (false); 164 165 return (true); 166 } 167 168 static int 169 linux_pdev_dma_uninit(struct pci_dev *pdev) 170 { 171 struct linux_dma_priv *priv; 172 173 priv = pdev->dev.dma_priv; 174 if (priv->dmat) 175 bus_dma_tag_destroy(priv->dmat); 176 if (priv->dmat_coherent) 177 bus_dma_tag_destroy(priv->dmat_coherent); 178 mtx_destroy(&priv->lock); 179 pdev->dev.dma_priv = NULL; 180 free(priv, M_DEVBUF); 181 return (0); 182 } 183 184 static int 185 linux_pdev_dma_init(struct pci_dev *pdev) 186 { 187 struct linux_dma_priv *priv; 188 int error; 189 190 priv = malloc(sizeof(*priv), M_DEVBUF, M_WAITOK | M_ZERO); 191 192 mtx_init(&priv->lock, "lkpi-priv-dma", NULL, MTX_DEF); 193 pctrie_init(&priv->ptree); 194 195 pdev->dev.dma_priv = priv; 196 197 /* Create a default DMA tags. */ 198 error = linux_dma_tag_init(&pdev->dev, DMA_BIT_MASK(64)); 199 if (error != 0) 200 goto err; 201 /* Coherent is lower 32bit only by default in Linux. */ 202 error = linux_dma_tag_init_coherent(&pdev->dev, DMA_BIT_MASK(32)); 203 if (error != 0) 204 goto err; 205 206 return (error); 207 208 err: 209 linux_pdev_dma_uninit(pdev); 210 return (error); 211 } 212 213 int 214 linux_dma_tag_init(struct device *dev, u64 dma_mask) 215 { 216 struct linux_dma_priv *priv; 217 int error; 218 219 priv = dev->dma_priv; 220 221 if (priv->dmat) { 222 if (priv->dma_mask == dma_mask) 223 return (0); 224 225 bus_dma_tag_destroy(priv->dmat); 226 } 227 228 priv->dma_mask = dma_mask; 229 230 error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 231 1, 0, /* alignment, boundary */ 232 dma_mask, /* lowaddr */ 233 BUS_SPACE_MAXADDR, /* highaddr */ 234 NULL, NULL, /* filtfunc, filtfuncarg */ 235 BUS_SPACE_MAXSIZE, /* maxsize */ 236 1, /* nsegments */ 237 BUS_SPACE_MAXSIZE, /* maxsegsz */ 238 0, /* flags */ 239 NULL, NULL, /* lockfunc, lockfuncarg */ 240 &priv->dmat); 241 return (-error); 242 } 243 244 int 245 linux_dma_tag_init_coherent(struct device *dev, u64 dma_mask) 246 { 247 struct linux_dma_priv *priv; 248 int error; 249 250 priv = dev->dma_priv; 251 252 if (priv->dmat_coherent) { 253 if (priv->dma_coherent_mask == dma_mask) 254 return (0); 255 256 bus_dma_tag_destroy(priv->dmat_coherent); 257 } 258 259 priv->dma_coherent_mask = dma_mask; 260 261 error = bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 262 1, 0, /* alignment, boundary */ 263 dma_mask, /* lowaddr */ 264 BUS_SPACE_MAXADDR, /* highaddr */ 265 NULL, NULL, /* filtfunc, filtfuncarg */ 266 BUS_SPACE_MAXSIZE, /* maxsize */ 267 1, /* nsegments */ 268 BUS_SPACE_MAXSIZE, /* maxsegsz */ 269 0, /* flags */ 270 NULL, NULL, /* lockfunc, lockfuncarg */ 271 &priv->dmat_coherent); 272 return (-error); 273 } 274 275 static struct pci_driver * 276 linux_pci_find(device_t dev, const struct pci_device_id **idp) 277 { 278 const struct pci_device_id *id; 279 struct pci_driver *pdrv; 280 uint16_t vendor; 281 uint16_t device; 282 uint16_t subvendor; 283 uint16_t subdevice; 284 285 vendor = pci_get_vendor(dev); 286 device = pci_get_device(dev); 287 subvendor = pci_get_subvendor(dev); 288 subdevice = pci_get_subdevice(dev); 289 290 spin_lock(&pci_lock); 291 list_for_each_entry(pdrv, &pci_drivers, node) { 292 for (id = pdrv->id_table; id->vendor != 0; id++) { 293 if (vendor == id->vendor && 294 (PCI_ANY_ID == id->device || device == id->device) && 295 (PCI_ANY_ID == id->subvendor || subvendor == id->subvendor) && 296 (PCI_ANY_ID == id->subdevice || subdevice == id->subdevice)) { 297 *idp = id; 298 spin_unlock(&pci_lock); 299 return (pdrv); 300 } 301 } 302 } 303 spin_unlock(&pci_lock); 304 return (NULL); 305 } 306 307 struct pci_dev * 308 lkpi_pci_get_device(uint32_t vendor, uint32_t device, struct pci_dev *odev) 309 { 310 struct pci_dev *pdev, *found; 311 312 found = NULL; 313 spin_lock(&pci_lock); 314 list_for_each_entry(pdev, &pci_devices, links) { 315 /* Walk until we find odev. */ 316 if (odev != NULL) { 317 if (pdev == odev) 318 odev = NULL; 319 continue; 320 } 321 322 if ((pdev->vendor == vendor || vendor == PCI_ANY_ID) && 323 (pdev->device == device || device == PCI_ANY_ID)) { 324 found = pdev; 325 break; 326 } 327 } 328 pci_dev_get(found); 329 spin_unlock(&pci_lock); 330 331 return (found); 332 } 333 334 static void 335 lkpi_pci_dev_release(struct device *dev) 336 { 337 338 lkpi_devres_release_free_list(dev); 339 spin_lock_destroy(&dev->devres_lock); 340 } 341 342 static int 343 lkpifill_pci_dev(device_t dev, struct pci_dev *pdev) 344 { 345 int error; 346 347 error = kobject_init_and_add(&pdev->dev.kobj, &linux_dev_ktype, 348 &linux_root_device.kobj, device_get_nameunit(dev)); 349 if (error != 0) { 350 printf("%s:%d: kobject_init_and_add returned %d\n", 351 __func__, __LINE__, error); 352 return (error); 353 } 354 355 pdev->devfn = PCI_DEVFN(pci_get_slot(dev), pci_get_function(dev)); 356 pdev->vendor = pci_get_vendor(dev); 357 pdev->device = pci_get_device(dev); 358 pdev->subsystem_vendor = pci_get_subvendor(dev); 359 pdev->subsystem_device = pci_get_subdevice(dev); 360 pdev->class = pci_get_class(dev); 361 pdev->revision = pci_get_revid(dev); 362 pdev->path_name = kasprintf(GFP_KERNEL, "%04d:%02d:%02d.%d", 363 pci_get_domain(dev), pci_get_bus(dev), pci_get_slot(dev), 364 pci_get_function(dev)); 365 pdev->bus = malloc(sizeof(*pdev->bus), M_DEVBUF, M_WAITOK | M_ZERO); 366 /* 367 * This should be the upstream bridge; pci_upstream_bridge() 368 * handles that case on demand as otherwise we'll shadow the 369 * entire PCI hierarchy. 370 */ 371 pdev->bus->self = pdev; 372 pdev->bus->number = pci_get_bus(dev); 373 pdev->bus->domain = pci_get_domain(dev); 374 pdev->dev.bsddev = dev; 375 pdev->dev.parent = &linux_root_device; 376 pdev->dev.release = lkpi_pci_dev_release; 377 INIT_LIST_HEAD(&pdev->dev.irqents); 378 379 if (pci_msi_count(dev) > 0) 380 pdev->msi_desc = malloc(pci_msi_count(dev) * 381 sizeof(*pdev->msi_desc), M_DEVBUF, M_WAITOK | M_ZERO); 382 383 spin_lock_init(&pdev->dev.devres_lock); 384 INIT_LIST_HEAD(&pdev->dev.devres_head); 385 386 return (0); 387 } 388 389 static void 390 lkpinew_pci_dev_release(struct device *dev) 391 { 392 struct pci_dev *pdev; 393 int i; 394 395 pdev = to_pci_dev(dev); 396 if (pdev->root != NULL) 397 pci_dev_put(pdev->root); 398 if (pdev->bus->self != pdev) 399 pci_dev_put(pdev->bus->self); 400 free(pdev->bus, M_DEVBUF); 401 if (pdev->msi_desc != NULL) { 402 for (i = pci_msi_count(pdev->dev.bsddev) - 1; i >= 0; i--) 403 free(pdev->msi_desc[i], M_DEVBUF); 404 free(pdev->msi_desc, M_DEVBUF); 405 } 406 kfree(pdev->path_name); 407 free(pdev, M_DEVBUF); 408 } 409 410 struct pci_dev * 411 lkpinew_pci_dev(device_t dev) 412 { 413 struct pci_dev *pdev; 414 int error; 415 416 pdev = malloc(sizeof(*pdev), M_DEVBUF, M_WAITOK|M_ZERO); 417 error = lkpifill_pci_dev(dev, pdev); 418 if (error != 0) { 419 free(pdev, M_DEVBUF); 420 return (NULL); 421 } 422 pdev->dev.release = lkpinew_pci_dev_release; 423 424 return (pdev); 425 } 426 427 struct pci_dev * 428 lkpi_pci_get_class(unsigned int class, struct pci_dev *from) 429 { 430 device_t dev; 431 device_t devfrom = NULL; 432 struct pci_dev *pdev; 433 434 if (from != NULL) 435 devfrom = from->dev.bsddev; 436 437 dev = pci_find_class_from(class >> 16, (class >> 8) & 0xFF, devfrom); 438 if (dev == NULL) 439 return (NULL); 440 441 pdev = lkpinew_pci_dev(dev); 442 return (pdev); 443 } 444 445 struct pci_dev * 446 lkpi_pci_get_base_class(unsigned int baseclass, struct pci_dev *from) 447 { 448 device_t dev; 449 device_t devfrom = NULL; 450 struct pci_dev *pdev; 451 452 if (from != NULL) 453 devfrom = from->dev.bsddev; 454 455 dev = pci_find_base_class_from(baseclass, devfrom); 456 if (dev == NULL) 457 return (NULL); 458 459 pdev = lkpinew_pci_dev(dev); 460 return (pdev); 461 } 462 463 struct pci_dev * 464 lkpi_pci_get_domain_bus_and_slot(int domain, unsigned int bus, 465 unsigned int devfn) 466 { 467 device_t dev; 468 struct pci_dev *pdev; 469 470 dev = pci_find_dbsf(domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn)); 471 if (dev == NULL) 472 return (NULL); 473 474 pdev = lkpinew_pci_dev(dev); 475 return (pdev); 476 } 477 478 static int 479 linux_pci_probe(device_t dev) 480 { 481 const struct pci_device_id *id; 482 struct pci_driver *pdrv; 483 484 if ((pdrv = linux_pci_find(dev, &id)) == NULL) 485 return (ENXIO); 486 if (device_get_driver(dev) != &pdrv->bsddriver) 487 return (ENXIO); 488 device_set_desc(dev, pdrv->name); 489 490 /* Assume BSS initialized (should never return BUS_PROBE_SPECIFIC). */ 491 if (pdrv->bsd_probe_return == 0) 492 return (BUS_PROBE_DEFAULT); 493 else 494 return (pdrv->bsd_probe_return); 495 } 496 497 static int 498 linux_pci_attach(device_t dev) 499 { 500 const struct pci_device_id *id; 501 struct pci_driver *pdrv; 502 struct pci_dev *pdev; 503 504 pdrv = linux_pci_find(dev, &id); 505 pdev = device_get_softc(dev); 506 507 MPASS(pdrv != NULL); 508 MPASS(pdev != NULL); 509 510 return (linux_pci_attach_device(dev, pdrv, id, pdev)); 511 } 512 513 static struct resource_list_entry * 514 linux_pci_reserve_bar(struct pci_dev *pdev, struct resource_list *rl, 515 int type, int rid) 516 { 517 device_t dev; 518 struct resource *res; 519 520 KASSERT(type == SYS_RES_IOPORT || type == SYS_RES_MEMORY, 521 ("trying to reserve non-BAR type %d", type)); 522 523 dev = pdev->pdrv != NULL && pdev->pdrv->isdrm ? 524 device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev; 525 res = pci_reserve_map(device_get_parent(dev), dev, type, &rid, 0, ~0, 526 1, 1, 0); 527 if (res == NULL) 528 return (NULL); 529 return (resource_list_find(rl, type, rid)); 530 } 531 532 static struct resource_list_entry * 533 linux_pci_get_rle(struct pci_dev *pdev, int type, int rid, bool reserve_bar) 534 { 535 struct pci_devinfo *dinfo; 536 struct resource_list *rl; 537 struct resource_list_entry *rle; 538 539 dinfo = device_get_ivars(pdev->dev.bsddev); 540 rl = &dinfo->resources; 541 rle = resource_list_find(rl, type, rid); 542 /* Reserve resources for this BAR if needed. */ 543 if (rle == NULL && reserve_bar) 544 rle = linux_pci_reserve_bar(pdev, rl, type, rid); 545 return (rle); 546 } 547 548 int 549 linux_pci_attach_device(device_t dev, struct pci_driver *pdrv, 550 const struct pci_device_id *id, struct pci_dev *pdev) 551 { 552 struct resource_list_entry *rle; 553 device_t parent; 554 uintptr_t rid; 555 int error; 556 bool isdrm; 557 558 linux_set_current(curthread); 559 560 parent = device_get_parent(dev); 561 isdrm = pdrv != NULL && pdrv->isdrm; 562 563 if (isdrm) { 564 struct pci_devinfo *dinfo; 565 566 dinfo = device_get_ivars(parent); 567 device_set_ivars(dev, dinfo); 568 } 569 570 error = lkpifill_pci_dev(dev, pdev); 571 if (error != 0) 572 return (error); 573 574 if (isdrm) 575 PCI_GET_ID(device_get_parent(parent), parent, PCI_ID_RID, &rid); 576 else 577 PCI_GET_ID(parent, dev, PCI_ID_RID, &rid); 578 pdev->devfn = rid; 579 pdev->pdrv = pdrv; 580 rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 0, false); 581 if (rle != NULL) 582 pdev->dev.irq = rle->start; 583 else 584 pdev->dev.irq = LINUX_IRQ_INVALID; 585 pdev->irq = pdev->dev.irq; 586 error = linux_pdev_dma_init(pdev); 587 if (error) 588 goto out_dma_init; 589 590 TAILQ_INIT(&pdev->mmio); 591 spin_lock_init(&pdev->pcie_cap_lock); 592 593 spin_lock(&pci_lock); 594 list_add(&pdev->links, &pci_devices); 595 spin_unlock(&pci_lock); 596 597 if (pdrv != NULL) { 598 error = pdrv->probe(pdev, id); 599 if (error) 600 goto out_probe; 601 } 602 return (0); 603 604 out_probe: 605 free(pdev->bus, M_DEVBUF); 606 spin_lock_destroy(&pdev->pcie_cap_lock); 607 linux_pdev_dma_uninit(pdev); 608 out_dma_init: 609 spin_lock(&pci_lock); 610 list_del(&pdev->links); 611 spin_unlock(&pci_lock); 612 put_device(&pdev->dev); 613 return (-error); 614 } 615 616 static int 617 linux_pci_detach(device_t dev) 618 { 619 struct pci_dev *pdev; 620 621 pdev = device_get_softc(dev); 622 623 MPASS(pdev != NULL); 624 625 device_set_desc(dev, NULL); 626 627 return (linux_pci_detach_device(pdev)); 628 } 629 630 int 631 linux_pci_detach_device(struct pci_dev *pdev) 632 { 633 634 linux_set_current(curthread); 635 636 if (pdev->pdrv != NULL) 637 pdev->pdrv->remove(pdev); 638 639 if (pdev->root != NULL) 640 pci_dev_put(pdev->root); 641 free(pdev->bus, M_DEVBUF); 642 linux_pdev_dma_uninit(pdev); 643 644 spin_lock(&pci_lock); 645 list_del(&pdev->links); 646 spin_unlock(&pci_lock); 647 spin_lock_destroy(&pdev->pcie_cap_lock); 648 put_device(&pdev->dev); 649 650 return (0); 651 } 652 653 static int 654 lkpi_pci_disable_dev(struct device *dev) 655 { 656 657 (void) pci_disable_io(dev->bsddev, SYS_RES_MEMORY); 658 (void) pci_disable_io(dev->bsddev, SYS_RES_IOPORT); 659 return (0); 660 } 661 662 static struct pci_devres * 663 lkpi_pci_devres_get_alloc(struct pci_dev *pdev) 664 { 665 struct pci_devres *dr; 666 667 dr = lkpi_devres_find(&pdev->dev, lkpi_pci_devres_release, NULL, NULL); 668 if (dr == NULL) { 669 dr = lkpi_devres_alloc(lkpi_pci_devres_release, sizeof(*dr), 670 GFP_KERNEL | __GFP_ZERO); 671 if (dr != NULL) 672 lkpi_devres_add(&pdev->dev, dr); 673 } 674 675 return (dr); 676 } 677 678 static struct pci_devres * 679 lkpi_pci_devres_find(struct pci_dev *pdev) 680 { 681 if (!pdev->managed) 682 return (NULL); 683 684 return (lkpi_pci_devres_get_alloc(pdev)); 685 } 686 687 void 688 lkpi_pci_devres_release(struct device *dev, void *p) 689 { 690 struct pci_devres *dr; 691 struct pci_dev *pdev; 692 int bar; 693 694 pdev = to_pci_dev(dev); 695 dr = p; 696 697 if (pdev->msix_enabled) 698 lkpi_pci_disable_msix(pdev); 699 if (pdev->msi_enabled) 700 lkpi_pci_disable_msi(pdev); 701 702 if (dr->enable_io && lkpi_pci_disable_dev(dev) == 0) 703 dr->enable_io = false; 704 705 if (dr->region_mask == 0) 706 return; 707 for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) { 708 709 if ((dr->region_mask & (1 << bar)) == 0) 710 continue; 711 pci_release_region(pdev, bar); 712 } 713 } 714 715 int 716 linuxkpi_pcim_enable_device(struct pci_dev *pdev) 717 { 718 struct pci_devres *dr; 719 int error; 720 721 /* Here we cannot run through the pdev->managed check. */ 722 dr = lkpi_pci_devres_get_alloc(pdev); 723 if (dr == NULL) 724 return (-ENOMEM); 725 726 /* If resources were enabled before do not do it again. */ 727 if (dr->enable_io) 728 return (0); 729 730 error = pci_enable_device(pdev); 731 if (error == 0) 732 dr->enable_io = true; 733 734 /* This device is not managed. */ 735 pdev->managed = true; 736 737 return (error); 738 } 739 740 static struct pcim_iomap_devres * 741 lkpi_pcim_iomap_devres_find(struct pci_dev *pdev) 742 { 743 struct pcim_iomap_devres *dr; 744 745 dr = lkpi_devres_find(&pdev->dev, lkpi_pcim_iomap_table_release, 746 NULL, NULL); 747 if (dr == NULL) { 748 dr = lkpi_devres_alloc(lkpi_pcim_iomap_table_release, 749 sizeof(*dr), GFP_KERNEL | __GFP_ZERO); 750 if (dr != NULL) 751 lkpi_devres_add(&pdev->dev, dr); 752 } 753 754 if (dr == NULL) 755 device_printf(pdev->dev.bsddev, "%s: NULL\n", __func__); 756 757 return (dr); 758 } 759 760 void __iomem ** 761 linuxkpi_pcim_iomap_table(struct pci_dev *pdev) 762 { 763 struct pcim_iomap_devres *dr; 764 765 dr = lkpi_pcim_iomap_devres_find(pdev); 766 if (dr == NULL) 767 return (NULL); 768 769 /* 770 * If the driver has manually set a flag to be able to request the 771 * resource to use bus_read/write_<n>, return the shadow table. 772 */ 773 if (pdev->want_iomap_res) 774 return ((void **)dr->res_table); 775 776 /* This is the Linux default. */ 777 return (dr->mmio_table); 778 } 779 780 static struct resource * 781 _lkpi_pci_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen __unused) 782 { 783 struct pci_mmio_region *mmio, *p; 784 int type; 785 786 if (!lkpi_pci_bar_id_valid(bar)) 787 return (NULL); 788 789 type = pci_resource_type(pdev, bar); 790 if (type < 0) { 791 device_printf(pdev->dev.bsddev, "%s: bar %d type %d\n", 792 __func__, bar, type); 793 return (NULL); 794 } 795 796 /* 797 * Check for duplicate mappings. 798 * This can happen if a driver calls pci_request_region() first. 799 */ 800 TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) { 801 if (mmio->type == type && mmio->rid == PCIR_BAR(bar)) { 802 return (mmio->res); 803 } 804 } 805 806 mmio = malloc(sizeof(*mmio), M_DEVBUF, M_WAITOK | M_ZERO); 807 mmio->rid = PCIR_BAR(bar); 808 mmio->type = type; 809 mmio->res = bus_alloc_resource_any(pdev->dev.bsddev, mmio->type, 810 &mmio->rid, RF_ACTIVE|RF_SHAREABLE); 811 if (mmio->res == NULL) { 812 device_printf(pdev->dev.bsddev, "%s: failed to alloc " 813 "bar %d type %d rid %d\n", 814 __func__, bar, type, PCIR_BAR(bar)); 815 free(mmio, M_DEVBUF); 816 return (NULL); 817 } 818 TAILQ_INSERT_TAIL(&pdev->mmio, mmio, next); 819 820 return (mmio->res); 821 } 822 823 void * 824 linuxkpi_pci_iomap_range(struct pci_dev *pdev, int bar, 825 unsigned long off, unsigned long maxlen) 826 { 827 struct resource *res; 828 829 if (!lkpi_pci_bar_id_valid(bar)) 830 return (NULL); 831 832 res = _lkpi_pci_iomap(pdev, bar, maxlen); 833 if (res == NULL) 834 return (NULL); 835 /* This is a FreeBSD extension so we can use bus_*(). */ 836 if (pdev->want_iomap_res) 837 return (res); 838 MPASS(off < rman_get_size(res)); 839 return ((void *)(rman_get_bushandle(res) + off)); 840 } 841 842 void * 843 linuxkpi_pci_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen) 844 { 845 if (!lkpi_pci_bar_id_valid(bar)) 846 return (NULL); 847 848 return (linuxkpi_pci_iomap_range(pdev, bar, 0, maxlen)); 849 } 850 851 void * 852 linuxkpi_pcim_iomap(struct pci_dev *pdev, int bar, unsigned long maxlen) 853 { 854 struct pcim_iomap_devres *dr; 855 void *res; 856 857 if (!lkpi_pci_bar_id_valid(bar)) 858 return (NULL); 859 860 dr = lkpi_pcim_iomap_devres_find(pdev); 861 if (dr == NULL) 862 return (NULL); 863 864 if (dr->res_table[bar] != NULL) 865 return (dr->res_table[bar]); 866 867 res = linuxkpi_pci_iomap(pdev, bar, maxlen); 868 if (res == NULL) { 869 /* 870 * Do not free the devres in case there were 871 * other valid mappings before already. 872 */ 873 return (NULL); 874 } 875 lkpi_set_pcim_iomap_devres(dr, bar, res); 876 877 return (res); 878 } 879 880 void 881 linuxkpi_pci_iounmap(struct pci_dev *pdev, void *res) 882 { 883 struct pci_mmio_region *mmio, *p; 884 bus_space_handle_t bh = (bus_space_handle_t)res; 885 886 TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) { 887 if (pdev->want_iomap_res) { 888 if (res != mmio->res) 889 continue; 890 } else { 891 if (bh < rman_get_bushandle(mmio->res) || 892 bh >= rman_get_bushandle(mmio->res) + 893 rman_get_size(mmio->res)) 894 continue; 895 } 896 bus_release_resource(pdev->dev.bsddev, 897 mmio->type, mmio->rid, mmio->res); 898 TAILQ_REMOVE(&pdev->mmio, mmio, next); 899 free(mmio, M_DEVBUF); 900 return; 901 } 902 } 903 904 int 905 linuxkpi_pcim_iomap_regions(struct pci_dev *pdev, uint32_t mask, const char *name) 906 { 907 struct pcim_iomap_devres *dr; 908 void *res; 909 uint32_t mappings; 910 int bar; 911 912 dr = lkpi_pcim_iomap_devres_find(pdev); 913 if (dr == NULL) 914 return (-ENOMEM); 915 916 /* Now iomap all the requested (by "mask") ones. */ 917 for (bar = mappings = 0; mappings != mask; bar++) { 918 if ((mask & (1 << bar)) == 0) 919 continue; 920 921 /* Request double is not allowed. */ 922 if (dr->mmio_table[bar] != NULL) { 923 device_printf(pdev->dev.bsddev, "%s: bar %d %p\n", 924 __func__, bar, dr->mmio_table[bar]); 925 goto err; 926 } 927 928 res = _lkpi_pci_iomap(pdev, bar, 0); 929 if (res == NULL) 930 goto err; 931 lkpi_set_pcim_iomap_devres(dr, bar, res); 932 933 mappings |= (1 << bar); 934 } 935 936 return (0); 937 err: 938 for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) { 939 if ((mappings & (1 << bar)) != 0) { 940 res = dr->mmio_table[bar]; 941 if (res == NULL) 942 continue; 943 pci_iounmap(pdev, res); 944 } 945 } 946 947 return (-EINVAL); 948 } 949 950 static void 951 lkpi_pcim_iomap_table_release(struct device *dev, void *p) 952 { 953 struct pcim_iomap_devres *dr; 954 struct pci_dev *pdev; 955 int bar; 956 957 dr = p; 958 pdev = to_pci_dev(dev); 959 for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) { 960 961 if (dr->mmio_table[bar] == NULL) 962 continue; 963 964 pci_iounmap(pdev, dr->mmio_table[bar]); 965 } 966 } 967 968 static int 969 linux_pci_suspend(device_t dev) 970 { 971 const struct dev_pm_ops *pmops; 972 struct pm_message pm = { }; 973 struct pci_dev *pdev; 974 int error; 975 976 error = 0; 977 linux_set_current(curthread); 978 pdev = device_get_softc(dev); 979 pmops = pdev->pdrv->driver.pm; 980 981 if (pdev->pdrv->suspend != NULL) 982 error = -pdev->pdrv->suspend(pdev, pm); 983 else if (pmops != NULL && pmops->suspend != NULL) { 984 error = -pmops->suspend(&pdev->dev); 985 if (error == 0 && pmops->suspend_late != NULL) 986 error = -pmops->suspend_late(&pdev->dev); 987 if (error == 0 && pmops->suspend_noirq != NULL) 988 error = -pmops->suspend_noirq(&pdev->dev); 989 } 990 return (error); 991 } 992 993 static int 994 linux_pci_resume(device_t dev) 995 { 996 const struct dev_pm_ops *pmops; 997 struct pci_dev *pdev; 998 int error; 999 1000 error = 0; 1001 linux_set_current(curthread); 1002 pdev = device_get_softc(dev); 1003 pmops = pdev->pdrv->driver.pm; 1004 1005 if (pdev->pdrv->resume != NULL) 1006 error = -pdev->pdrv->resume(pdev); 1007 else if (pmops != NULL && pmops->resume != NULL) { 1008 if (pmops->resume_early != NULL) 1009 error = -pmops->resume_early(&pdev->dev); 1010 if (error == 0 && pmops->resume != NULL) 1011 error = -pmops->resume(&pdev->dev); 1012 } 1013 return (error); 1014 } 1015 1016 static int 1017 linux_pci_shutdown(device_t dev) 1018 { 1019 struct pci_dev *pdev; 1020 1021 linux_set_current(curthread); 1022 pdev = device_get_softc(dev); 1023 if (pdev->pdrv->shutdown != NULL) 1024 pdev->pdrv->shutdown(pdev); 1025 return (0); 1026 } 1027 1028 static int 1029 linux_pci_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *pf_config) 1030 { 1031 struct pci_dev *pdev; 1032 int error; 1033 1034 linux_set_current(curthread); 1035 pdev = device_get_softc(dev); 1036 if (pdev->pdrv->bsd_iov_init != NULL) 1037 error = pdev->pdrv->bsd_iov_init(dev, num_vfs, pf_config); 1038 else 1039 error = EINVAL; 1040 return (error); 1041 } 1042 1043 static void 1044 linux_pci_iov_uninit(device_t dev) 1045 { 1046 struct pci_dev *pdev; 1047 1048 linux_set_current(curthread); 1049 pdev = device_get_softc(dev); 1050 if (pdev->pdrv->bsd_iov_uninit != NULL) 1051 pdev->pdrv->bsd_iov_uninit(dev); 1052 } 1053 1054 static int 1055 linux_pci_iov_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *vf_config) 1056 { 1057 struct pci_dev *pdev; 1058 int error; 1059 1060 linux_set_current(curthread); 1061 pdev = device_get_softc(dev); 1062 if (pdev->pdrv->bsd_iov_add_vf != NULL) 1063 error = pdev->pdrv->bsd_iov_add_vf(dev, vfnum, vf_config); 1064 else 1065 error = EINVAL; 1066 return (error); 1067 } 1068 1069 static int 1070 _linux_pci_register_driver(struct pci_driver *pdrv, devclass_t dc) 1071 { 1072 int error; 1073 1074 linux_set_current(curthread); 1075 spin_lock(&pci_lock); 1076 list_add(&pdrv->node, &pci_drivers); 1077 spin_unlock(&pci_lock); 1078 if (pdrv->bsddriver.name == NULL) 1079 pdrv->bsddriver.name = pdrv->name; 1080 pdrv->bsddriver.methods = pci_methods; 1081 pdrv->bsddriver.size = sizeof(struct pci_dev); 1082 1083 bus_topo_lock(); 1084 error = devclass_add_driver(dc, &pdrv->bsddriver, 1085 BUS_PASS_DEFAULT, &pdrv->bsdclass); 1086 bus_topo_unlock(); 1087 return (-error); 1088 } 1089 1090 int 1091 linux_pci_register_driver(struct pci_driver *pdrv) 1092 { 1093 devclass_t dc; 1094 1095 pdrv->isdrm = strcmp(pdrv->name, "drmn") == 0; 1096 dc = pdrv->isdrm ? devclass_create("vgapci") : devclass_find("pci"); 1097 if (dc == NULL) 1098 return (-ENXIO); 1099 return (_linux_pci_register_driver(pdrv, dc)); 1100 } 1101 1102 static struct resource_list_entry * 1103 lkpi_pci_get_bar(struct pci_dev *pdev, int bar, bool reserve) 1104 { 1105 int type; 1106 1107 type = pci_resource_type(pdev, bar); 1108 if (type < 0) 1109 return (NULL); 1110 bar = PCIR_BAR(bar); 1111 return (linux_pci_get_rle(pdev, type, bar, reserve)); 1112 } 1113 1114 struct device * 1115 lkpi_pci_find_irq_dev(unsigned int irq) 1116 { 1117 struct pci_dev *pdev; 1118 struct device *found; 1119 1120 found = NULL; 1121 spin_lock(&pci_lock); 1122 list_for_each_entry(pdev, &pci_devices, links) { 1123 if (irq == pdev->dev.irq || 1124 (irq >= pdev->dev.irq_start && irq < pdev->dev.irq_end)) { 1125 found = &pdev->dev; 1126 break; 1127 } 1128 } 1129 spin_unlock(&pci_lock); 1130 return (found); 1131 } 1132 1133 unsigned long 1134 pci_resource_start(struct pci_dev *pdev, int bar) 1135 { 1136 struct resource_list_entry *rle; 1137 rman_res_t newstart; 1138 device_t dev; 1139 int error; 1140 1141 if ((rle = lkpi_pci_get_bar(pdev, bar, true)) == NULL) 1142 return (0); 1143 dev = pdev->pdrv != NULL && pdev->pdrv->isdrm ? 1144 device_get_parent(pdev->dev.bsddev) : pdev->dev.bsddev; 1145 error = bus_translate_resource(dev, rle->type, rle->start, &newstart); 1146 if (error != 0) { 1147 device_printf(pdev->dev.bsddev, 1148 "translate of %#jx failed: %d\n", 1149 (uintmax_t)rle->start, error); 1150 return (0); 1151 } 1152 return (newstart); 1153 } 1154 1155 unsigned long 1156 pci_resource_len(struct pci_dev *pdev, int bar) 1157 { 1158 struct resource_list_entry *rle; 1159 1160 if ((rle = lkpi_pci_get_bar(pdev, bar, true)) == NULL) 1161 return (0); 1162 return (rle->count); 1163 } 1164 1165 static int 1166 lkpi_pci_request_region(struct pci_dev *pdev, int bar, const char *res_name, 1167 bool managed) 1168 { 1169 struct resource *res; 1170 struct pci_devres *dr; 1171 struct pci_mmio_region *mmio; 1172 int rid; 1173 int type; 1174 1175 if (!lkpi_pci_bar_id_valid(bar)) 1176 return (-EINVAL); 1177 1178 /* 1179 * If the bar is not valid, return success without adding the BAR; 1180 * otherwise linuxkpi_pcim_request_all_regions() will error. 1181 */ 1182 if (pci_resource_len(pdev, bar) == 0) 1183 return (0); 1184 /* Likewise if it is neither IO nor MEM, nothing to do for us. */ 1185 type = pci_resource_type(pdev, bar); 1186 if (type < 0) 1187 return (0); 1188 1189 rid = PCIR_BAR(bar); 1190 res = bus_alloc_resource_any(pdev->dev.bsddev, type, &rid, 1191 RF_ACTIVE|RF_SHAREABLE); 1192 if (res == NULL) { 1193 device_printf(pdev->dev.bsddev, "%s: failed to alloc " 1194 "bar %d type %d rid %d\n", 1195 __func__, bar, type, PCIR_BAR(bar)); 1196 return (-ENODEV); 1197 } 1198 1199 /* 1200 * It seems there is an implicit devres tracking on these if the device 1201 * is managed (lkpi_pci_devres_find() case); otherwise the resources are 1202 * not automatically freed on FreeBSD/LinuxKPI though they should be/are 1203 * expected to be by Linux drivers. 1204 * Otherwise if we are called from a pcim-function with the managed 1205 * argument set, we need to track devres independent of pdev->managed. 1206 */ 1207 if (managed) 1208 dr = lkpi_pci_devres_get_alloc(pdev); 1209 else 1210 dr = lkpi_pci_devres_find(pdev); 1211 if (dr != NULL) { 1212 dr->region_mask |= (1 << bar); 1213 dr->region_table[bar] = res; 1214 } 1215 1216 /* Even if the device is not managed we need to track it for iomap. */ 1217 mmio = malloc(sizeof(*mmio), M_DEVBUF, M_WAITOK | M_ZERO); 1218 mmio->rid = PCIR_BAR(bar); 1219 mmio->type = type; 1220 mmio->res = res; 1221 TAILQ_INSERT_TAIL(&pdev->mmio, mmio, next); 1222 1223 return (0); 1224 } 1225 1226 int 1227 linuxkpi_pci_request_region(struct pci_dev *pdev, int bar, const char *res_name) 1228 { 1229 return (lkpi_pci_request_region(pdev, bar, res_name, false)); 1230 } 1231 1232 int 1233 linuxkpi_pci_request_regions(struct pci_dev *pdev, const char *res_name) 1234 { 1235 int error; 1236 int i; 1237 1238 for (i = 0; i <= PCIR_MAX_BAR_0; i++) { 1239 error = pci_request_region(pdev, i, res_name); 1240 if (error && error != -ENODEV) { 1241 pci_release_regions(pdev); 1242 return (error); 1243 } 1244 } 1245 return (0); 1246 } 1247 1248 int 1249 linuxkpi_pcim_request_all_regions(struct pci_dev *pdev, const char *res_name) 1250 { 1251 int bar, error; 1252 1253 for (bar = 0; bar <= PCIR_MAX_BAR_0; bar++) { 1254 error = lkpi_pci_request_region(pdev, bar, res_name, true); 1255 if (error != 0) { 1256 device_printf(pdev->dev.bsddev, "%s: bar %d res_name '%s': " 1257 "lkpi_pci_request_region returned %d\n", __func__, 1258 bar, res_name, error); 1259 pci_release_regions(pdev); 1260 return (error); 1261 } 1262 } 1263 return (0); 1264 } 1265 1266 void 1267 linuxkpi_pci_release_region(struct pci_dev *pdev, int bar) 1268 { 1269 struct resource_list_entry *rle; 1270 struct pci_devres *dr; 1271 struct pci_mmio_region *mmio, *p; 1272 1273 if ((rle = lkpi_pci_get_bar(pdev, bar, false)) == NULL) 1274 return; 1275 1276 /* 1277 * As we implicitly track the requests we also need to clear them on 1278 * release. Do clear before resource release. 1279 */ 1280 dr = lkpi_pci_devres_find(pdev); 1281 if (dr != NULL) { 1282 KASSERT(dr->region_table[bar] == rle->res, ("%s: pdev %p bar %d" 1283 " region_table res %p != rel->res %p\n", __func__, pdev, 1284 bar, dr->region_table[bar], rle->res)); 1285 dr->region_table[bar] = NULL; 1286 dr->region_mask &= ~(1 << bar); 1287 } 1288 1289 TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) { 1290 if (rle->res != (void *)rman_get_bushandle(mmio->res)) 1291 continue; 1292 TAILQ_REMOVE(&pdev->mmio, mmio, next); 1293 free(mmio, M_DEVBUF); 1294 } 1295 1296 bus_release_resource(pdev->dev.bsddev, rle->type, rle->rid, rle->res); 1297 } 1298 1299 void 1300 linuxkpi_pci_release_regions(struct pci_dev *pdev) 1301 { 1302 int i; 1303 1304 for (i = 0; i <= PCIR_MAX_BAR_0; i++) 1305 pci_release_region(pdev, i); 1306 } 1307 1308 int 1309 linux_pci_register_drm_driver(struct pci_driver *pdrv) 1310 { 1311 devclass_t dc; 1312 1313 dc = devclass_create("vgapci"); 1314 if (dc == NULL) 1315 return (-ENXIO); 1316 pdrv->isdrm = true; 1317 pdrv->name = "drmn"; 1318 return (_linux_pci_register_driver(pdrv, dc)); 1319 } 1320 1321 void 1322 linux_pci_unregister_driver(struct pci_driver *pdrv) 1323 { 1324 devclass_t bus; 1325 1326 bus = devclass_find(pdrv->isdrm ? "vgapci" : "pci"); 1327 1328 spin_lock(&pci_lock); 1329 list_del(&pdrv->node); 1330 spin_unlock(&pci_lock); 1331 bus_topo_lock(); 1332 if (bus != NULL) 1333 devclass_delete_driver(bus, &pdrv->bsddriver); 1334 bus_topo_unlock(); 1335 } 1336 1337 void 1338 linux_pci_unregister_drm_driver(struct pci_driver *pdrv) 1339 { 1340 devclass_t bus; 1341 1342 bus = devclass_find("vgapci"); 1343 1344 spin_lock(&pci_lock); 1345 list_del(&pdrv->node); 1346 spin_unlock(&pci_lock); 1347 bus_topo_lock(); 1348 if (bus != NULL) 1349 devclass_delete_driver(bus, &pdrv->bsddriver); 1350 bus_topo_unlock(); 1351 } 1352 1353 int 1354 linuxkpi_pci_enable_msix(struct pci_dev *pdev, struct msix_entry *entries, 1355 int nreq) 1356 { 1357 struct resource_list_entry *rle; 1358 int error; 1359 int avail; 1360 int i; 1361 1362 avail = pci_msix_count(pdev->dev.bsddev); 1363 if (avail < nreq) { 1364 if (avail == 0) 1365 return -EINVAL; 1366 return avail; 1367 } 1368 avail = nreq; 1369 if ((error = -pci_alloc_msix(pdev->dev.bsddev, &avail)) != 0) 1370 return error; 1371 /* 1372 * Handle case where "pci_alloc_msix()" may allocate less 1373 * interrupts than available and return with no error: 1374 */ 1375 if (avail < nreq) { 1376 pci_release_msi(pdev->dev.bsddev); 1377 return avail; 1378 } 1379 rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 1, false); 1380 pdev->dev.irq_start = rle->start; 1381 pdev->dev.irq_end = rle->start + avail; 1382 for (i = 0; i < nreq; i++) 1383 entries[i].vector = pdev->dev.irq_start + i; 1384 pdev->msix_enabled = true; 1385 return (0); 1386 } 1387 1388 int 1389 _lkpi_pci_enable_msi_range(struct pci_dev *pdev, int minvec, int maxvec) 1390 { 1391 struct resource_list_entry *rle; 1392 int error; 1393 int nvec; 1394 1395 if (maxvec < minvec) 1396 return (-EINVAL); 1397 1398 nvec = pci_msi_count(pdev->dev.bsddev); 1399 if (nvec < 1 || nvec < minvec) 1400 return (-ENOSPC); 1401 1402 nvec = min(nvec, maxvec); 1403 if ((error = -pci_alloc_msi(pdev->dev.bsddev, &nvec)) != 0) 1404 return error; 1405 1406 /* Native PCI might only ever ask for 32 vectors. */ 1407 if (nvec < minvec) { 1408 pci_release_msi(pdev->dev.bsddev); 1409 return (-ENOSPC); 1410 } 1411 1412 rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 1, false); 1413 pdev->dev.irq_start = rle->start; 1414 pdev->dev.irq_end = rle->start + nvec; 1415 pdev->irq = rle->start; 1416 pdev->msi_enabled = true; 1417 return (0); 1418 } 1419 1420 int 1421 pci_alloc_irq_vectors(struct pci_dev *pdev, int minv, int maxv, 1422 unsigned int flags) 1423 { 1424 int error; 1425 1426 if (flags & PCI_IRQ_MSIX) { 1427 struct msix_entry *entries; 1428 int i; 1429 1430 entries = kcalloc(maxv, sizeof(*entries), GFP_KERNEL); 1431 if (entries == NULL) { 1432 error = -ENOMEM; 1433 goto out; 1434 } 1435 for (i = 0; i < maxv; ++i) 1436 entries[i].entry = i; 1437 error = pci_enable_msix(pdev, entries, maxv); 1438 out: 1439 kfree(entries); 1440 if (error == 0 && pdev->msix_enabled) 1441 return (pdev->dev.irq_end - pdev->dev.irq_start); 1442 } 1443 if (flags & PCI_IRQ_MSI) { 1444 if (pci_msi_count(pdev->dev.bsddev) < minv) 1445 return (-ENOSPC); 1446 error = _lkpi_pci_enable_msi_range(pdev, minv, maxv); 1447 if (error == 0 && pdev->msi_enabled) 1448 return (pdev->dev.irq_end - pdev->dev.irq_start); 1449 } 1450 if (flags & PCI_IRQ_INTX) { 1451 if (pdev->irq) 1452 return (1); 1453 } 1454 1455 return (-EINVAL); 1456 } 1457 1458 struct msi_desc * 1459 lkpi_pci_msi_desc_alloc(int irq) 1460 { 1461 struct device *dev; 1462 struct pci_dev *pdev; 1463 struct msi_desc *desc; 1464 struct pci_devinfo *dinfo; 1465 struct pcicfg_msi *msi; 1466 int vec; 1467 1468 dev = lkpi_pci_find_irq_dev(irq); 1469 if (dev == NULL) 1470 return (NULL); 1471 1472 pdev = to_pci_dev(dev); 1473 1474 if (pdev->msi_desc == NULL) 1475 return (NULL); 1476 1477 if (irq < pdev->dev.irq_start || irq >= pdev->dev.irq_end) 1478 return (NULL); 1479 1480 vec = pdev->dev.irq_start - irq; 1481 1482 if (pdev->msi_desc[vec] != NULL) 1483 return (pdev->msi_desc[vec]); 1484 1485 dinfo = device_get_ivars(dev->bsddev); 1486 msi = &dinfo->cfg.msi; 1487 1488 desc = malloc(sizeof(*desc), M_DEVBUF, M_WAITOK | M_ZERO); 1489 1490 desc->pci.msi_attrib.is_64 = 1491 (msi->msi_ctrl & PCIM_MSICTRL_64BIT) ? true : false; 1492 desc->msg.data = msi->msi_data; 1493 1494 pdev->msi_desc[vec] = desc; 1495 1496 return (desc); 1497 } 1498 1499 bool 1500 pci_device_is_present(struct pci_dev *pdev) 1501 { 1502 device_t dev; 1503 1504 dev = pdev->dev.bsddev; 1505 1506 return (bus_child_present(dev)); 1507 } 1508 1509 CTASSERT(sizeof(dma_addr_t) <= sizeof(uint64_t)); 1510 1511 struct linux_dma_obj { 1512 void *vaddr; 1513 uint64_t dma_addr; 1514 bus_dmamap_t dmamap; 1515 bus_dma_tag_t dmat; 1516 }; 1517 1518 static uma_zone_t linux_dma_trie_zone; 1519 static uma_zone_t linux_dma_obj_zone; 1520 1521 static void 1522 linux_dma_init(void *arg) 1523 { 1524 1525 linux_dma_trie_zone = uma_zcreate("linux_dma_pctrie", 1526 pctrie_node_size(), NULL, NULL, pctrie_zone_init, NULL, 1527 UMA_ALIGN_PTR, 0); 1528 linux_dma_obj_zone = uma_zcreate("linux_dma_object", 1529 sizeof(struct linux_dma_obj), NULL, NULL, NULL, NULL, 1530 UMA_ALIGN_PTR, 0); 1531 lkpi_pci_nseg1_fail = counter_u64_alloc(M_WAITOK); 1532 } 1533 SYSINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_init, NULL); 1534 1535 static void 1536 linux_dma_uninit(void *arg) 1537 { 1538 1539 counter_u64_free(lkpi_pci_nseg1_fail); 1540 uma_zdestroy(linux_dma_obj_zone); 1541 uma_zdestroy(linux_dma_trie_zone); 1542 } 1543 SYSUNINIT(linux_dma, SI_SUB_DRIVERS, SI_ORDER_THIRD, linux_dma_uninit, NULL); 1544 1545 static void * 1546 linux_dma_trie_alloc(struct pctrie *ptree) 1547 { 1548 1549 return (uma_zalloc(linux_dma_trie_zone, M_NOWAIT)); 1550 } 1551 1552 static void 1553 linux_dma_trie_free(struct pctrie *ptree, void *node) 1554 { 1555 1556 uma_zfree(linux_dma_trie_zone, node); 1557 } 1558 1559 PCTRIE_DEFINE(LINUX_DMA, linux_dma_obj, dma_addr, linux_dma_trie_alloc, 1560 linux_dma_trie_free); 1561 1562 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 1563 static dma_addr_t 1564 linux_dma_map_phys_common(struct device *dev, vm_paddr_t phys, size_t len, 1565 bus_dma_tag_t dmat) 1566 { 1567 struct linux_dma_priv *priv; 1568 struct linux_dma_obj *obj; 1569 int error, nseg; 1570 bus_dma_segment_t seg; 1571 1572 priv = dev->dma_priv; 1573 1574 /* 1575 * If the resultant mapping will be entirely 1:1 with the 1576 * physical address, short-circuit the remainder of the 1577 * bus_dma API. This avoids tracking collisions in the pctrie 1578 * with the additional benefit of reducing overhead. 1579 */ 1580 if (bus_dma_id_mapped(dmat, phys, len)) 1581 return (phys); 1582 1583 obj = uma_zalloc(linux_dma_obj_zone, M_NOWAIT); 1584 if (obj == NULL) { 1585 return (0); 1586 } 1587 obj->dmat = dmat; 1588 1589 DMA_PRIV_LOCK(priv); 1590 if (bus_dmamap_create(obj->dmat, 0, &obj->dmamap) != 0) { 1591 DMA_PRIV_UNLOCK(priv); 1592 uma_zfree(linux_dma_obj_zone, obj); 1593 return (0); 1594 } 1595 1596 nseg = -1; 1597 error = _bus_dmamap_load_phys(obj->dmat, obj->dmamap, phys, len, 1598 BUS_DMA_NOWAIT, &seg, &nseg); 1599 if (error != 0) { 1600 bus_dmamap_destroy(obj->dmat, obj->dmamap); 1601 DMA_PRIV_UNLOCK(priv); 1602 uma_zfree(linux_dma_obj_zone, obj); 1603 counter_u64_add(lkpi_pci_nseg1_fail, 1); 1604 if (linuxkpi_debug) { 1605 device_printf(dev->bsddev, "%s: _bus_dmamap_load_phys " 1606 "error %d, phys %#018jx len %zu\n", __func__, 1607 error, (uintmax_t)phys, len); 1608 dump_stack(); 1609 } 1610 return (0); 1611 } 1612 1613 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); 1614 obj->dma_addr = seg.ds_addr; 1615 1616 error = LINUX_DMA_PCTRIE_INSERT(&priv->ptree, obj); 1617 if (error != 0) { 1618 bus_dmamap_unload(obj->dmat, obj->dmamap); 1619 bus_dmamap_destroy(obj->dmat, obj->dmamap); 1620 DMA_PRIV_UNLOCK(priv); 1621 uma_zfree(linux_dma_obj_zone, obj); 1622 return (0); 1623 } 1624 DMA_PRIV_UNLOCK(priv); 1625 return (obj->dma_addr); 1626 } 1627 #else 1628 static dma_addr_t 1629 linux_dma_map_phys_common(struct device *dev __unused, vm_paddr_t phys, 1630 size_t len __unused, bus_dma_tag_t dmat __unused) 1631 { 1632 return (phys); 1633 } 1634 #endif 1635 1636 dma_addr_t 1637 lkpi_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len, 1638 enum dma_data_direction direction, unsigned long attrs) 1639 { 1640 struct linux_dma_priv *priv; 1641 dma_addr_t dma; 1642 1643 priv = dev->dma_priv; 1644 dma = linux_dma_map_phys_common(dev, phys, len, priv->dmat); 1645 if (dma_mapping_error(dev, dma)) 1646 return (dma); 1647 1648 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 1649 dma_sync_single_for_device(dev, dma, len, direction); 1650 1651 return (dma); 1652 } 1653 1654 /* For backward compat only so we can MFC this. Remove before 15. */ 1655 dma_addr_t 1656 linux_dma_map_phys(struct device *dev, vm_paddr_t phys, size_t len) 1657 { 1658 return (lkpi_dma_map_phys(dev, phys, len, DMA_NONE, 0)); 1659 } 1660 1661 #if defined(__i386__) || defined(__amd64__) || defined(__aarch64__) 1662 void 1663 lkpi_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len, 1664 enum dma_data_direction direction, unsigned long attrs) 1665 { 1666 struct linux_dma_priv *priv; 1667 struct linux_dma_obj *obj; 1668 1669 priv = dev->dma_priv; 1670 1671 if (pctrie_is_empty(&priv->ptree)) 1672 return; 1673 1674 DMA_PRIV_LOCK(priv); 1675 obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr); 1676 if (obj == NULL) { 1677 DMA_PRIV_UNLOCK(priv); 1678 return; 1679 } 1680 LINUX_DMA_PCTRIE_REMOVE(&priv->ptree, dma_addr); 1681 1682 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0) 1683 dma_sync_single_for_cpu(dev, dma_addr, len, direction); 1684 1685 bus_dmamap_unload(obj->dmat, obj->dmamap); 1686 bus_dmamap_destroy(obj->dmat, obj->dmamap); 1687 DMA_PRIV_UNLOCK(priv); 1688 1689 uma_zfree(linux_dma_obj_zone, obj); 1690 } 1691 #else 1692 void 1693 lkpi_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len, 1694 enum dma_data_direction direction, unsigned long attrs) 1695 { 1696 } 1697 #endif 1698 1699 /* For backward compat only so we can MFC this. Remove before 15. */ 1700 void 1701 linux_dma_unmap(struct device *dev, dma_addr_t dma_addr, size_t len) 1702 { 1703 lkpi_dma_unmap(dev, dma_addr, len, DMA_NONE, 0); 1704 } 1705 1706 void * 1707 linux_dma_alloc_coherent(struct device *dev, size_t size, 1708 dma_addr_t *dma_handle, gfp_t flag) 1709 { 1710 struct linux_dma_priv *priv; 1711 vm_paddr_t high; 1712 size_t align; 1713 void *mem; 1714 1715 if (dev == NULL || dev->dma_priv == NULL) { 1716 *dma_handle = 0; 1717 return (NULL); 1718 } 1719 priv = dev->dma_priv; 1720 if (priv->dma_coherent_mask) 1721 high = priv->dma_coherent_mask; 1722 else 1723 /* Coherent is lower 32bit only by default in Linux. */ 1724 high = BUS_SPACE_MAXADDR_32BIT; 1725 align = PAGE_SIZE << get_order(size); 1726 /* Always zero the allocation. */ 1727 flag |= M_ZERO; 1728 mem = kmem_alloc_contig(size, flag & GFP_NATIVE_MASK, 0, high, 1729 align, 0, VM_MEMATTR_DEFAULT); 1730 if (mem != NULL) { 1731 *dma_handle = linux_dma_map_phys_common(dev, vtophys(mem), size, 1732 priv->dmat_coherent); 1733 if (*dma_handle == 0) { 1734 kmem_free(mem, size); 1735 mem = NULL; 1736 } 1737 } else { 1738 *dma_handle = 0; 1739 } 1740 return (mem); 1741 } 1742 1743 struct lkpi_devres_dmam_coherent { 1744 size_t size; 1745 dma_addr_t *handle; 1746 void *mem; 1747 }; 1748 1749 static void 1750 lkpi_dmam_free_coherent(struct device *dev, void *p) 1751 { 1752 struct lkpi_devres_dmam_coherent *dr; 1753 1754 dr = p; 1755 dma_free_coherent(dev, dr->size, dr->mem, *dr->handle); 1756 } 1757 1758 void * 1759 linuxkpi_dmam_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, 1760 gfp_t flag) 1761 { 1762 struct lkpi_devres_dmam_coherent *dr; 1763 1764 dr = lkpi_devres_alloc(lkpi_dmam_free_coherent, 1765 sizeof(*dr), GFP_KERNEL | __GFP_ZERO); 1766 1767 if (dr == NULL) 1768 return (NULL); 1769 1770 dr->size = size; 1771 dr->mem = linux_dma_alloc_coherent(dev, size, dma_handle, flag); 1772 dr->handle = dma_handle; 1773 if (dr->mem == NULL) { 1774 lkpi_devres_free(dr); 1775 return (NULL); 1776 } 1777 1778 lkpi_devres_add(dev, dr); 1779 return (dr->mem); 1780 } 1781 1782 void 1783 linuxkpi_dma_sync(struct device *dev, dma_addr_t dma_addr, size_t size, 1784 bus_dmasync_op_t op) 1785 { 1786 struct linux_dma_priv *priv; 1787 struct linux_dma_obj *obj; 1788 1789 priv = dev->dma_priv; 1790 1791 if (pctrie_is_empty(&priv->ptree)) 1792 return; 1793 1794 DMA_PRIV_LOCK(priv); 1795 obj = LINUX_DMA_PCTRIE_LOOKUP(&priv->ptree, dma_addr); 1796 if (obj == NULL) { 1797 DMA_PRIV_UNLOCK(priv); 1798 return; 1799 } 1800 1801 bus_dmamap_sync(obj->dmat, obj->dmamap, op); 1802 DMA_PRIV_UNLOCK(priv); 1803 } 1804 1805 int 1806 linux_dma_map_sg_attrs(struct device *dev, struct scatterlist *sgl, int nents, 1807 enum dma_data_direction direction, unsigned long attrs) 1808 { 1809 struct linux_dma_priv *priv; 1810 struct scatterlist *sg; 1811 int i, nseg; 1812 bus_dma_segment_t seg; 1813 1814 priv = dev->dma_priv; 1815 1816 DMA_PRIV_LOCK(priv); 1817 1818 /* create common DMA map in the first S/G entry */ 1819 if (bus_dmamap_create(priv->dmat, 0, &sgl->dma_map) != 0) { 1820 DMA_PRIV_UNLOCK(priv); 1821 return (0); 1822 } 1823 1824 /* load all S/G list entries */ 1825 for_each_sg(sgl, sg, nents, i) { 1826 nseg = -1; 1827 if (_bus_dmamap_load_phys(priv->dmat, sgl->dma_map, 1828 sg_phys(sg), sg->length, BUS_DMA_NOWAIT, 1829 &seg, &nseg) != 0) { 1830 bus_dmamap_unload(priv->dmat, sgl->dma_map); 1831 bus_dmamap_destroy(priv->dmat, sgl->dma_map); 1832 DMA_PRIV_UNLOCK(priv); 1833 return (0); 1834 } 1835 KASSERT(nseg == 0, 1836 ("More than one segment (nseg=%d)", nseg + 1)); 1837 1838 sg_dma_address(sg) = seg.ds_addr; 1839 } 1840 1841 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) != 0) 1842 goto skip_sync; 1843 1844 switch (direction) { 1845 case DMA_BIDIRECTIONAL: 1846 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREWRITE); 1847 break; 1848 case DMA_TO_DEVICE: 1849 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREREAD); 1850 break; 1851 case DMA_FROM_DEVICE: 1852 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREWRITE); 1853 break; 1854 default: 1855 break; 1856 } 1857 skip_sync: 1858 1859 DMA_PRIV_UNLOCK(priv); 1860 1861 return (nents); 1862 } 1863 1864 void 1865 linux_dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sgl, 1866 int nents __unused, enum dma_data_direction direction, 1867 unsigned long attrs) 1868 { 1869 struct linux_dma_priv *priv; 1870 1871 priv = dev->dma_priv; 1872 1873 DMA_PRIV_LOCK(priv); 1874 1875 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) != 0) 1876 goto skip_sync; 1877 1878 switch (direction) { 1879 case DMA_BIDIRECTIONAL: 1880 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTREAD); 1881 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_PREREAD); 1882 break; 1883 case DMA_TO_DEVICE: 1884 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTWRITE); 1885 break; 1886 case DMA_FROM_DEVICE: 1887 bus_dmamap_sync(priv->dmat, sgl->dma_map, BUS_DMASYNC_POSTREAD); 1888 break; 1889 default: 1890 break; 1891 } 1892 skip_sync: 1893 1894 bus_dmamap_unload(priv->dmat, sgl->dma_map); 1895 bus_dmamap_destroy(priv->dmat, sgl->dma_map); 1896 DMA_PRIV_UNLOCK(priv); 1897 } 1898 1899 struct dma_pool { 1900 struct device *pool_device; 1901 uma_zone_t pool_zone; 1902 struct mtx pool_lock; 1903 bus_dma_tag_t pool_dmat; 1904 size_t pool_entry_size; 1905 struct pctrie pool_ptree; 1906 }; 1907 1908 #define DMA_POOL_LOCK(pool) mtx_lock(&(pool)->pool_lock) 1909 #define DMA_POOL_UNLOCK(pool) mtx_unlock(&(pool)->pool_lock) 1910 1911 static inline int 1912 dma_pool_obj_ctor(void *mem, int size, void *arg, int flags) 1913 { 1914 struct linux_dma_obj *obj = mem; 1915 struct dma_pool *pool = arg; 1916 int error, nseg; 1917 bus_dma_segment_t seg; 1918 1919 nseg = -1; 1920 DMA_POOL_LOCK(pool); 1921 error = _bus_dmamap_load_phys(pool->pool_dmat, obj->dmamap, 1922 vtophys(obj->vaddr), pool->pool_entry_size, BUS_DMA_NOWAIT, 1923 &seg, &nseg); 1924 DMA_POOL_UNLOCK(pool); 1925 if (error != 0) { 1926 return (error); 1927 } 1928 KASSERT(++nseg == 1, ("More than one segment (nseg=%d)", nseg)); 1929 obj->dma_addr = seg.ds_addr; 1930 1931 return (0); 1932 } 1933 1934 static void 1935 dma_pool_obj_dtor(void *mem, int size, void *arg) 1936 { 1937 struct linux_dma_obj *obj = mem; 1938 struct dma_pool *pool = arg; 1939 1940 DMA_POOL_LOCK(pool); 1941 bus_dmamap_unload(pool->pool_dmat, obj->dmamap); 1942 DMA_POOL_UNLOCK(pool); 1943 } 1944 1945 static int 1946 dma_pool_obj_import(void *arg, void **store, int count, int domain __unused, 1947 int flags) 1948 { 1949 struct dma_pool *pool = arg; 1950 struct linux_dma_obj *obj; 1951 int error, i; 1952 1953 for (i = 0; i < count; i++) { 1954 obj = uma_zalloc(linux_dma_obj_zone, flags); 1955 if (obj == NULL) 1956 break; 1957 1958 error = bus_dmamem_alloc(pool->pool_dmat, &obj->vaddr, 1959 BUS_DMA_NOWAIT, &obj->dmamap); 1960 if (error!= 0) { 1961 uma_zfree(linux_dma_obj_zone, obj); 1962 break; 1963 } 1964 1965 store[i] = obj; 1966 } 1967 1968 return (i); 1969 } 1970 1971 static void 1972 dma_pool_obj_release(void *arg, void **store, int count) 1973 { 1974 struct dma_pool *pool = arg; 1975 struct linux_dma_obj *obj; 1976 int i; 1977 1978 for (i = 0; i < count; i++) { 1979 obj = store[i]; 1980 bus_dmamem_free(pool->pool_dmat, obj->vaddr, obj->dmamap); 1981 uma_zfree(linux_dma_obj_zone, obj); 1982 } 1983 } 1984 1985 struct dma_pool * 1986 linux_dma_pool_create(char *name, struct device *dev, size_t size, 1987 size_t align, size_t boundary) 1988 { 1989 struct linux_dma_priv *priv; 1990 struct dma_pool *pool; 1991 1992 priv = dev->dma_priv; 1993 1994 pool = kzalloc(sizeof(*pool), GFP_KERNEL); 1995 pool->pool_device = dev; 1996 pool->pool_entry_size = size; 1997 1998 if (bus_dma_tag_create(bus_get_dma_tag(dev->bsddev), 1999 align, boundary, /* alignment, boundary */ 2000 priv->dma_mask, /* lowaddr */ 2001 BUS_SPACE_MAXADDR, /* highaddr */ 2002 NULL, NULL, /* filtfunc, filtfuncarg */ 2003 size, /* maxsize */ 2004 1, /* nsegments */ 2005 size, /* maxsegsz */ 2006 0, /* flags */ 2007 NULL, NULL, /* lockfunc, lockfuncarg */ 2008 &pool->pool_dmat)) { 2009 kfree(pool); 2010 return (NULL); 2011 } 2012 2013 pool->pool_zone = uma_zcache_create(name, -1, dma_pool_obj_ctor, 2014 dma_pool_obj_dtor, NULL, NULL, dma_pool_obj_import, 2015 dma_pool_obj_release, pool, 0); 2016 2017 mtx_init(&pool->pool_lock, "lkpi-dma-pool", NULL, MTX_DEF); 2018 pctrie_init(&pool->pool_ptree); 2019 2020 return (pool); 2021 } 2022 2023 void 2024 linux_dma_pool_destroy(struct dma_pool *pool) 2025 { 2026 2027 uma_zdestroy(pool->pool_zone); 2028 bus_dma_tag_destroy(pool->pool_dmat); 2029 mtx_destroy(&pool->pool_lock); 2030 kfree(pool); 2031 } 2032 2033 void 2034 lkpi_dmam_pool_destroy(struct device *dev, void *p) 2035 { 2036 struct dma_pool *pool; 2037 2038 pool = *(struct dma_pool **)p; 2039 LINUX_DMA_PCTRIE_RECLAIM(&pool->pool_ptree); 2040 linux_dma_pool_destroy(pool); 2041 } 2042 2043 void * 2044 linux_dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, 2045 dma_addr_t *handle) 2046 { 2047 struct linux_dma_obj *obj; 2048 2049 obj = uma_zalloc_arg(pool->pool_zone, pool, mem_flags & GFP_NATIVE_MASK); 2050 if (obj == NULL) 2051 return (NULL); 2052 2053 DMA_POOL_LOCK(pool); 2054 if (LINUX_DMA_PCTRIE_INSERT(&pool->pool_ptree, obj) != 0) { 2055 DMA_POOL_UNLOCK(pool); 2056 uma_zfree_arg(pool->pool_zone, obj, pool); 2057 return (NULL); 2058 } 2059 DMA_POOL_UNLOCK(pool); 2060 2061 *handle = obj->dma_addr; 2062 return (obj->vaddr); 2063 } 2064 2065 void 2066 linux_dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma_addr) 2067 { 2068 struct linux_dma_obj *obj; 2069 2070 DMA_POOL_LOCK(pool); 2071 obj = LINUX_DMA_PCTRIE_LOOKUP(&pool->pool_ptree, dma_addr); 2072 if (obj == NULL) { 2073 DMA_POOL_UNLOCK(pool); 2074 return; 2075 } 2076 LINUX_DMA_PCTRIE_REMOVE(&pool->pool_ptree, dma_addr); 2077 DMA_POOL_UNLOCK(pool); 2078 2079 uma_zfree_arg(pool->pool_zone, obj, pool); 2080 } 2081 2082 static int 2083 linux_backlight_get_status(device_t dev, struct backlight_props *props) 2084 { 2085 struct pci_dev *pdev; 2086 2087 linux_set_current(curthread); 2088 pdev = device_get_softc(dev); 2089 2090 props->brightness = pdev->dev.bd->props.brightness; 2091 props->brightness = props->brightness * 100 / pdev->dev.bd->props.max_brightness; 2092 props->nlevels = 0; 2093 2094 return (0); 2095 } 2096 2097 static int 2098 linux_backlight_get_info(device_t dev, struct backlight_info *info) 2099 { 2100 struct pci_dev *pdev; 2101 2102 linux_set_current(curthread); 2103 pdev = device_get_softc(dev); 2104 2105 info->type = BACKLIGHT_TYPE_PANEL; 2106 strlcpy(info->name, pdev->dev.bd->name, BACKLIGHTMAXNAMELENGTH); 2107 return (0); 2108 } 2109 2110 static int 2111 linux_backlight_update_status(device_t dev, struct backlight_props *props) 2112 { 2113 struct pci_dev *pdev; 2114 2115 linux_set_current(curthread); 2116 pdev = device_get_softc(dev); 2117 2118 pdev->dev.bd->props.brightness = pdev->dev.bd->props.max_brightness * 2119 props->brightness / 100; 2120 pdev->dev.bd->props.power = props->brightness == 0 ? 2121 4/* FB_BLANK_POWERDOWN */ : 0/* FB_BLANK_UNBLANK */; 2122 return (pdev->dev.bd->ops->update_status(pdev->dev.bd)); 2123 } 2124 2125 struct backlight_device * 2126 linux_backlight_device_register(const char *name, struct device *dev, 2127 void *data, const struct backlight_ops *ops, struct backlight_properties *props) 2128 { 2129 2130 dev->bd = malloc(sizeof(*dev->bd), M_DEVBUF, M_WAITOK | M_ZERO); 2131 dev->bd->ops = ops; 2132 dev->bd->props.type = props->type; 2133 dev->bd->props.max_brightness = props->max_brightness; 2134 dev->bd->props.brightness = props->brightness; 2135 dev->bd->props.power = props->power; 2136 dev->bd->data = data; 2137 dev->bd->dev = dev; 2138 dev->bd->name = strdup(name, M_DEVBUF); 2139 2140 dev->backlight_dev = backlight_register(name, dev->bsddev); 2141 2142 return (dev->bd); 2143 } 2144 2145 void 2146 linux_backlight_device_unregister(struct backlight_device *bd) 2147 { 2148 2149 backlight_destroy(bd->dev->backlight_dev); 2150 free(bd->name, M_DEVBUF); 2151 free(bd, M_DEVBUF); 2152 } 2153